Skip to content

Commit

Permalink
Merge pull request #21 from AssemblyAI/fern-bot/03-18-2024-0809PM
Browse files Browse the repository at this point in the history
🌿 Fern Regeneration -- March 18, 2024
  • Loading branch information
Swimburger committed Mar 18, 2024
2 parents 6eab2c4 + fe75ddd commit 1886494
Show file tree
Hide file tree
Showing 9 changed files with 194 additions and 26 deletions.
2 changes: 1 addition & 1 deletion assemblyai.gemspec
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ require_relative "lib/gemconfig"

Gem::Specification.new do |spec|
spec.name = "assemblyai"
spec.version = "1.0.0-beta.3"
spec.version = "1.0.0-beta.4"
spec.authors = AssemblyAI::Gemconfig::AUTHORS
spec.email = AssemblyAI::Gemconfig::EMAIL
spec.summary = AssemblyAI::Gemconfig::SUMMARY
Expand Down
16 changes: 8 additions & 8 deletions lib/assemblyai/lemur/client.rb
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def initialize(request_client:)
# Use either transcript_ids or input_text as input into LeMUR.
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
# Use either transcript_ids or input_text as input into LeMUR.
# @param context [String, Hash{String => String}] Context to provide the model. This can be a string or a free-form JSON value.
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
# @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
# Defaults to "default".
# @param max_output_size [Integer] Max output size in tokens, up to 4000
Expand Down Expand Up @@ -66,7 +66,7 @@ def task(prompt:, transcript_ids: nil, input_text: nil, context: nil, final_mode
# Use either transcript_ids or input_text as input into LeMUR.
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
# Use either transcript_ids or input_text as input into LeMUR.
# @param context [String, Hash{String => String}] Context to provide the model. This can be a string or a free-form JSON value.
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
# @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
# Defaults to "default".
# @param max_output_size [Integer] Max output size in tokens, up to 4000
Expand Down Expand Up @@ -104,7 +104,7 @@ def summary(transcript_ids: nil, input_text: nil, context: nil, final_model: nil
# Use either transcript_ids or input_text as input into LeMUR.
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
# Use either transcript_ids or input_text as input into LeMUR.
# @param context [String, Hash{String => String}] Context to provide the model. This can be a string or a free-form JSON value.
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
# @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
# Defaults to "default".
# @param max_output_size [Integer] Max output size in tokens, up to 4000
Expand Down Expand Up @@ -146,7 +146,7 @@ def question_answer(questions:, transcript_ids: nil, input_text: nil, context: n
# Use either transcript_ids or input_text as input into LeMUR.
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
# Use either transcript_ids or input_text as input into LeMUR.
# @param context [String, Hash{String => String}] Context to provide the model. This can be a string or a free-form JSON value.
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
# @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
# Defaults to "default".
# @param max_output_size [Integer] Max output size in tokens, up to 4000
Expand Down Expand Up @@ -211,7 +211,7 @@ def initialize(request_client:)
# Use either transcript_ids or input_text as input into LeMUR.
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
# Use either transcript_ids or input_text as input into LeMUR.
# @param context [String, Hash{String => String}] Context to provide the model. This can be a string or a free-form JSON value.
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
# @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
# Defaults to "default".
# @param max_output_size [Integer] Max output size in tokens, up to 4000
Expand Down Expand Up @@ -251,7 +251,7 @@ def task(prompt:, transcript_ids: nil, input_text: nil, context: nil, final_mode
# Use either transcript_ids or input_text as input into LeMUR.
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
# Use either transcript_ids or input_text as input into LeMUR.
# @param context [String, Hash{String => String}] Context to provide the model. This can be a string or a free-form JSON value.
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
# @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
# Defaults to "default".
# @param max_output_size [Integer] Max output size in tokens, up to 4000
Expand Down Expand Up @@ -291,7 +291,7 @@ def summary(transcript_ids: nil, input_text: nil, context: nil, final_model: nil
# Use either transcript_ids or input_text as input into LeMUR.
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
# Use either transcript_ids or input_text as input into LeMUR.
# @param context [String, Hash{String => String}] Context to provide the model. This can be a string or a free-form JSON value.
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
# @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
# Defaults to "default".
# @param max_output_size [Integer] Max output size in tokens, up to 4000
Expand Down Expand Up @@ -335,7 +335,7 @@ def question_answer(questions:, transcript_ids: nil, input_text: nil, context: n
# Use either transcript_ids or input_text as input into LeMUR.
# @param input_text [String] Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000.
# Use either transcript_ids or input_text as input into LeMUR.
# @param context [String, Hash{String => String}] Context to provide the model. This can be a string or a free-form JSON value.
# @param context [String, Hash{String => Object}] Context to provide the model. This can be a string or a free-form JSON value.
# @param final_model [Lemur::LemurModel] The model that is used for the final prompt after compression is performed.
# Defaults to "default".
# @param max_output_size [Integer] Max output size in tokens, up to 4000
Expand Down
87 changes: 87 additions & 0 deletions lib/assemblyai/realtime/types/receive_message.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
# frozen_string_literal: true

require "json"
require_relative "session_begins"
require_relative "partial_transcript"
require_relative "final_transcript"
require_relative "session_terminated"
require_relative "realtime_error"

module AssemblyAI
class Realtime
# Receive messages from the WebSocket
class ReceiveMessage
# Deserialize a JSON object to an instance of ReceiveMessage
#
# @param json_object [JSON]
# @return [Realtime::ReceiveMessage]
def self.from_json(json_object:)
struct = JSON.parse(json_object, object_class: OpenStruct)
begin
Realtime::SessionBegins.validate_raw(obj: struct)
return Realtime::SessionBegins.from_json(json_object: json_object)
rescue StandardError
# noop
end
begin
Realtime::PartialTranscript.validate_raw(obj: struct)
return Realtime::PartialTranscript.from_json(json_object: json_object)
rescue StandardError
# noop
end
begin
Realtime::FinalTranscript.validate_raw(obj: struct)
return Realtime::FinalTranscript.from_json(json_object: json_object)
rescue StandardError
# noop
end
begin
Realtime::SessionTerminated.validate_raw(obj: struct)
return Realtime::SessionTerminated.from_json(json_object: json_object)
rescue StandardError
# noop
end
begin
Realtime::RealtimeError.validate_raw(obj: struct)
return Realtime::RealtimeError.from_json(json_object: json_object)
rescue StandardError
# noop
end
struct
end

# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
#
# @param obj [Object]
# @return [Void]
def self.validate_raw(obj:)
begin
return Realtime::SessionBegins.validate_raw(obj: obj)
rescue StandardError
# noop
end
begin
return Realtime::PartialTranscript.validate_raw(obj: obj)
rescue StandardError
# noop
end
begin
return Realtime::FinalTranscript.validate_raw(obj: obj)
rescue StandardError
# noop
end
begin
return Realtime::SessionTerminated.validate_raw(obj: obj)
rescue StandardError
# noop
end
begin
return Realtime::RealtimeError.validate_raw(obj: obj)
rescue StandardError
# noop
end
raise("Passed value matched no type within the union, validation failed.")
end
end
end
end
74 changes: 74 additions & 0 deletions lib/assemblyai/realtime/types/send_message.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# frozen_string_literal: true

require "json"
require_relative "terminate_session"
require_relative "force_end_utterance"
require_relative "configure_end_utterance_silence_threshold"

module AssemblyAI
class Realtime
# Send messages to the WebSocket
class SendMessage
# Deserialize a JSON object to an instance of SendMessage
#
# @param json_object [JSON]
# @return [Realtime::SendMessage]
def self.from_json(json_object:)
struct = JSON.parse(json_object, object_class: OpenStruct)
begin
struct.is_a?(String) != false || raise("Passed value for field struct is not the expected type, validation failed.")
return json_object
rescue StandardError
# noop
end
begin
Realtime::TerminateSession.validate_raw(obj: struct)
return Realtime::TerminateSession.from_json(json_object: json_object)
rescue StandardError
# noop
end
begin
Realtime::ForceEndUtterance.validate_raw(obj: struct)
return Realtime::ForceEndUtterance.from_json(json_object: json_object)
rescue StandardError
# noop
end
begin
Realtime::ConfigureEndUtteranceSilenceThreshold.validate_raw(obj: struct)
return Realtime::ConfigureEndUtteranceSilenceThreshold.from_json(json_object: json_object)
rescue StandardError
# noop
end
struct
end

# Leveraged for Union-type generation, validate_raw attempts to parse the given hash and check each fields type against the current object's property definitions.
#
# @param obj [Object]
# @return [Void]
def self.validate_raw(obj:)
begin
return obj.is_a?(String) != false || raise("Passed value for field obj is not the expected type, validation failed.")
rescue StandardError
# noop
end
begin
return Realtime::TerminateSession.validate_raw(obj: obj)
rescue StandardError
# noop
end
begin
return Realtime::ForceEndUtterance.validate_raw(obj: obj)
rescue StandardError
# noop
end
begin
return Realtime::ConfigureEndUtteranceSilenceThreshold.validate_raw(obj: obj)
rescue StandardError
# noop
end
raise("Passed value matched no type within the union, validation failed.")
end
end
end
end
13 changes: 8 additions & 5 deletions lib/assemblyai/transcripts/types/content_safety_labels_result.rb
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,18 @@ class ContentSafetyLabelsResult

# @param status [Transcripts::AudioIntelligenceModelStatus] The status of the Content Moderation model. Either success, or unavailable in the rare case that the model failed.
# @param results [Array<Transcripts::ContentSafetyLabelResult>]
# @param summary [Hash{String => String}] A summary of the Content Moderation confidence results for the entire audio file
# @param severity_score_summary [Hash{String => String}] A summary of the Content Moderation severity results for the entire audio file
# @param summary [Hash{String => Float}] A summary of the Content Moderation confidence results for the entire audio file
# @param severity_score_summary [Hash{String => Transcripts::SeverityScoreSummary}] A summary of the Content Moderation severity results for the entire audio file
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
# @return [Transcripts::ContentSafetyLabelsResult]
def initialize(status:, results:, summary:, severity_score_summary:, additional_properties: nil)
# @type [Transcripts::AudioIntelligenceModelStatus] The status of the Content Moderation model. Either success, or unavailable in the rare case that the model failed.
@status = status
# @type [Array<Transcripts::ContentSafetyLabelResult>]
@results = results
# @type [Hash{String => String}] A summary of the Content Moderation confidence results for the entire audio file
# @type [Hash{String => Float}] A summary of the Content Moderation confidence results for the entire audio file
@summary = summary
# @type [Hash{String => String}] A summary of the Content Moderation severity results for the entire audio file
# @type [Hash{String => Transcripts::SeverityScoreSummary}] A summary of the Content Moderation severity results for the entire audio file
@severity_score_summary = severity_score_summary
# @type [OpenStruct] Additional properties unmapped to the current class definition
@additional_properties = additional_properties
Expand All @@ -43,7 +43,10 @@ def self.from_json(json_object:)
Transcripts::ContentSafetyLabelResult.from_json(json_object: v)
end
summary = struct.summary
severity_score_summary = struct.severity_score_summary
severity_score_summary = parsed_json["severity_score_summary"]&.transform_values do |_k, v|
v = v.to_json
Transcripts::SeverityScoreSummary.from_json(json_object: v)
end
new(status: status, results: results, summary: summary, severity_score_summary: severity_score_summary,
additional_properties: struct)
end
Expand Down
1 change: 1 addition & 0 deletions lib/assemblyai/transcripts/types/speech_model.rb
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ class Transcripts
# The speech model to use for the transcription.
class SpeechModel
NANO = "nano"
CONFORMER2 = "conformer-2"
end
end
end
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,15 @@ class TopicDetectionModelResult

# @param status [Transcripts::AudioIntelligenceModelStatus] The status of the Topic Detection model. Either success, or unavailable in the rare case that the model failed.
# @param results [Array<Transcripts::TopicDetectionResult>] An array of results for the Topic Detection model
# @param summary [Hash{String => String}] The overall relevance of topic to the entire audio file
# @param summary [Hash{String => Float}] The overall relevance of topic to the entire audio file
# @param additional_properties [OpenStruct] Additional properties unmapped to the current class definition
# @return [Transcripts::TopicDetectionModelResult]
def initialize(status:, results:, summary:, additional_properties: nil)
# @type [Transcripts::AudioIntelligenceModelStatus] The status of the Topic Detection model. Either success, or unavailable in the rare case that the model failed.
@status = status
# @type [Array<Transcripts::TopicDetectionResult>] An array of results for the Topic Detection model
@results = results
# @type [Hash{String => String}] The overall relevance of topic to the entire audio file
# @type [Hash{String => Float}] The overall relevance of topic to the entire audio file
@summary = summary
# @type [OpenStruct] Additional properties unmapped to the current class definition
@additional_properties = additional_properties
Expand Down
8 changes: 4 additions & 4 deletions lib/requests.rb
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ def initialize(api_key:, environment: Environment::DEFAULT, max_retries: nil, ti
@base_url = environment
@headers = {
"X-Fern-Language": "Ruby",
"X-Fern-SDK-Name": "AssemblyAI",
"X-Fern-SDK-Version": "1.0.0-beta.3",
"X-Fern-SDK-Name": "assemblyai",
"X-Fern-SDK-Version": "1.0.0-beta.4",
"Authorization": api_key.to_s
}
@conn = Faraday.new(@base_url, headers: @headers) do |faraday|
Expand All @@ -45,8 +45,8 @@ def initialize(api_key:, environment: Environment::DEFAULT, max_retries: nil, ti
@base_url = environment
@headers = {
"X-Fern-Language": "Ruby",
"X-Fern-SDK-Name": "AssemblyAI",
"X-Fern-SDK-Version": "1.0.0-beta.3",
"X-Fern-SDK-Name": "assemblyai",
"X-Fern-SDK-Version": "1.0.0-beta.4",
"Authorization": api_key.to_s
}
@conn = Faraday.new(@base_url, headers: @headers) do |faraday|
Expand Down
15 changes: 9 additions & 6 deletions lib/types_export.rb
Original file line number Diff line number Diff line change
Expand Up @@ -46,23 +46,26 @@
require_relative "assemblyai/transcripts/types/transcript_list_item"
require_relative "assemblyai/transcripts/types/transcript_list"
require_relative "assemblyai/transcripts/types/audio_intelligence_model_status"
require_relative "assemblyai/realtime/types/realtime_temporary_token_response"
require_relative "assemblyai/realtime/types/realtime_base_message"
require_relative "assemblyai/realtime/types/realtime"
require_relative "assemblyai/realtime/types/session_begins"
require_relative "assemblyai/realtime/types/partial_transcript"
require_relative "assemblyai/realtime/types/final_transcript"
require_relative "assemblyai/realtime/types/session_terminated"
require_relative "assemblyai/realtime/types/realtime_error"
require_relative "assemblyai/realtime/types/receive_message"
require_relative "assemblyai/realtime/types/audio_data"
require_relative "assemblyai/realtime/types/terminate_session"
require_relative "assemblyai/realtime/types/force_end_utterance"
require_relative "assemblyai/realtime/types/configure_end_utterance_silence_threshold"
require_relative "assemblyai/realtime/types/send_message"
require_relative "assemblyai/realtime/types/realtime_temporary_token_response"
require_relative "assemblyai/realtime/types/realtime_base_message"
require_relative "assemblyai/realtime/types/realtime_message"
require_relative "assemblyai/realtime/types/message_type"
require_relative "assemblyai/realtime/types/realtime_transcript_type"
require_relative "assemblyai/realtime/types/realtime_transcript"
require_relative "assemblyai/realtime/types/realtime_base_transcript"
require_relative "assemblyai/realtime/types/word"
require_relative "assemblyai/realtime/types/audio_data"
require_relative "assemblyai/realtime/types/force_end_utterance"
require_relative "assemblyai/realtime/types/configure_end_utterance_silence_threshold"
require_relative "assemblyai/realtime/types/terminate_session"
require_relative "assemblyai/realtime/types/audio_encoding"
require_relative "assemblyai/lemur/types/purge_lemur_request_data_response"
require_relative "assemblyai/lemur/types/lemur_base_response"
Expand Down

0 comments on commit 1886494

Please sign in to comment.