diff --git a/OpenAI.ChatGpt.AspNetCore/ChatGPTFactory.cs b/OpenAI.ChatGpt.AspNetCore/ChatGPTFactory.cs index 268e24d..6153511 100644 --- a/OpenAI.ChatGpt.AspNetCore/ChatGPTFactory.cs +++ b/OpenAI.ChatGpt.AspNetCore/ChatGPTFactory.cs @@ -127,9 +127,9 @@ public async Task Create( public void Dispose() { - if (!_isHttpClientInjected) + if (!_isHttpClientInjected && _client is IDisposable disposableClient) { - _client.Dispose(); + disposableClient.Dispose(); } } } \ No newline at end of file diff --git a/OpenAI.ChatGpt.Modules.Translator/ChatGPTTranslatorService.cs b/OpenAI.ChatGpt.Modules.Translator/ChatGPTTranslatorService.cs index 12010f4..7e8807e 100644 --- a/OpenAI.ChatGpt.Modules.Translator/ChatGPTTranslatorService.cs +++ b/OpenAI.ChatGpt.Modules.Translator/ChatGPTTranslatorService.cs @@ -25,7 +25,7 @@ public ChatGPTTranslatorService( public ChatGPTTranslatorService( string apiKey, - string? host, + string? host = null, string? defaultSourceLanguage = null, string? defaultTargetLanguage = null, string? extraPrompt = null) @@ -39,9 +39,9 @@ public ChatGPTTranslatorService( public void Dispose() { - if (!_isHttpClientInjected) + if (!_isHttpClientInjected && _client is IDisposable disposableClient) { - _client.Dispose(); + disposableClient.Dispose(); } } @@ -55,6 +55,14 @@ public async Task Translate( if (text == null) throw new ArgumentNullException(nameof(text)); var sourceLanguageOrDefault = sourceLanguage ?? _defaultSourceLanguage; var targetLanguageOrDefault = targetLanguage ?? _defaultTargetLanguage; + if (sourceLanguageOrDefault is null) + { + throw new ArgumentNullException(nameof(sourceLanguage), "Source language is not specified"); + } + if (targetLanguageOrDefault is null) + { + throw new ArgumentNullException(nameof(targetLanguage), "Target language is not specified"); + } var prompt = GetPrompt(sourceLanguageOrDefault, targetLanguageOrDefault); var response = await _client.GetChatCompletions( Dialog.StartAsSystem(prompt).ThenUser(text), diff --git a/OpenAI.ChatGpt/ChatGPT.cs b/OpenAI.ChatGpt/ChatGPT.cs index 202840a..bf899d7 100644 --- a/OpenAI.ChatGpt/ChatGPT.cs +++ b/OpenAI.ChatGpt/ChatGPT.cs @@ -92,9 +92,9 @@ public void Dispose() { Stop(); _currentChat?.Dispose(); - if (!_isClientInjected) + if (!_isClientInjected && _client is IDisposable disposableClient) { - _client.Dispose(); + disposableClient.Dispose(); } } diff --git a/OpenAI.ChatGpt/ChatService.cs b/OpenAI.ChatGpt/ChatService.cs index 5435b9b..1fda380 100644 --- a/OpenAI.ChatGpt/ChatService.cs +++ b/OpenAI.ChatGpt/ChatService.cs @@ -93,13 +93,16 @@ private async Task GetNextMessageResponse( cancellationToken = _cts.Token; var history = await LoadHistory(cancellationToken); - var messages = history.Append(message); + var messages = history.Append(message).ToArray(); IsWriting = true; try { + var (model, maxTokens) = FindOptimalModelAndMaxToken(messages); var response = await _client.GetChatCompletionsRaw( messages, + maxTokens: maxTokens, + model: model, user: Topic.Config.PassUserIdToOpenAiRequests is true ? UserId : null, requestModifier: Topic.Config.ModifyRequest, cancellationToken: cancellationToken @@ -117,7 +120,13 @@ await _chatHistoryStorage.SaveMessages( IsWriting = false; } } - + + private (string model, int maxTokens) FindOptimalModelAndMaxToken(ChatCompletionMessage[] messages) + { + return ChatCompletionMessage.FindOptimalModelAndMaxToken( + messages, Topic.Config.Model, Topic.Config.MaxTokens); + } + public IAsyncEnumerable StreamNextMessageResponse( string message, bool throwOnCancellation = true, @@ -143,11 +152,14 @@ private async IAsyncEnumerable StreamNextMessageResponse( cancellationToken = _cts.Token; var history = await LoadHistory(cancellationToken); - var messages = history.Append(message); + var messages = history.Append(message).ToArray(); var sb = new StringBuilder(); IsWriting = true; + var (model, maxTokens) = FindOptimalModelAndMaxToken(messages); var stream = _client.StreamChatCompletions( messages, + maxTokens: maxTokens, + model: model, user: Topic.Config.PassUserIdToOpenAiRequests is true ? UserId : null, requestModifier: Topic.Config.ModifyRequest, cancellationToken: cancellationToken diff --git a/OpenAI.ChatGpt/IOpenAiClient.cs b/OpenAI.ChatGpt/IOpenAiClient.cs index 36b0403..7049e27 100644 --- a/OpenAI.ChatGpt/IOpenAiClient.cs +++ b/OpenAI.ChatGpt/IOpenAiClient.cs @@ -1,10 +1,9 @@ -using System.Runtime.CompilerServices; -using OpenAI.ChatGpt.Models.ChatCompletion; +using OpenAI.ChatGpt.Models.ChatCompletion; using OpenAI.ChatGpt.Models.ChatCompletion.Messaging; namespace OpenAI.ChatGpt; -public interface IOpenAiClient : IDisposable +public interface IOpenAiClient { Task GetChatCompletions( UserOrSystemMessage dialog, @@ -13,6 +12,7 @@ Task GetChatCompletions( float temperature = ChatCompletionTemperatures.Default, string? user = null, Action? requestModifier = null, + Action? rawResponseGetter = null, CancellationToken cancellationToken = default); Task GetChatCompletions( @@ -22,6 +22,7 @@ Task GetChatCompletions( float temperature = ChatCompletionTemperatures.Default, string? user = null, Action? requestModifier = null, + Action? rawResponseGetter = null, CancellationToken cancellationToken = default); Task GetChatCompletionsRaw( @@ -81,8 +82,7 @@ IAsyncEnumerable StreamChatCompletions( CancellationToken cancellationToken = default); IAsyncEnumerable StreamChatCompletions( - ChatCompletionRequest request, - [EnumeratorCancellation] CancellationToken cancellationToken = default); + ChatCompletionRequest request,CancellationToken cancellationToken = default); IAsyncEnumerable StreamChatCompletionsRaw( ChatCompletionRequest request, CancellationToken cancellationToken = default); diff --git a/OpenAI.ChatGpt/Models/ChatCompletion/ChatCompletionModels.cs b/OpenAI.ChatGpt/Models/ChatCompletion/ChatCompletionModels.cs index be486bc..0cdb874 100644 --- a/OpenAI.ChatGpt/Models/ChatCompletion/ChatCompletionModels.cs +++ b/OpenAI.ChatGpt/Models/ChatCompletion/ChatCompletionModels.cs @@ -199,7 +199,7 @@ public static void EnsureMaxTokensIsSupported(string model, int maxTokens) { throw new ArgumentOutOfRangeException( nameof(maxTokens), - $"Max tokens must be less than or equal to {limit} for model {model}" + $"Max tokens must be less than or equal to {limit} for model {model} but was {maxTokens}" ); } } @@ -210,7 +210,7 @@ public static void EnsureMaxTokensIsSupportedByAnyModel(int maxTokens) if (maxTokens > limit) { throw new ArgumentOutOfRangeException( - nameof(maxTokens), $"Max tokens must be less than or equal to {limit}"); + nameof(maxTokens), $"Max tokens must be less than or equal to {limit} but was {maxTokens}"); } } } \ No newline at end of file diff --git a/OpenAI.ChatGpt/Models/ChatCompletion/Messaging/ChatCompletionMessage.cs b/OpenAI.ChatGpt/Models/ChatCompletion/Messaging/ChatCompletionMessage.cs index 1d8d0a5..e81e202 100644 --- a/OpenAI.ChatGpt/Models/ChatCompletion/Messaging/ChatCompletionMessage.cs +++ b/OpenAI.ChatGpt/Models/ChatCompletion/Messaging/ChatCompletionMessage.cs @@ -95,4 +95,34 @@ public override string ToString() ? $"{Role}: {Content}" : string.Join(Environment.NewLine, _messages.Select(m => $"{m.Role}: {m.Content}")); } + + public static (string model, int maxTokens) FindOptimalModelAndMaxToken( + IEnumerable messages, + string? model, + int? maxTokens, + string smallModel = ChatCompletionModels.Default, + string bigModel = ChatCompletionModels.Gpt3_5_Turbo_16k, + bool useMaxPossibleTokens = true) + { + var tokenCount = CalculateApproxTotalTokenCount(messages); + switch (model, maxTokens) + { + case (null, null): + { + model = tokenCount > 6000 ? bigModel : smallModel; + maxTokens = GetMaxPossibleTokens(model); + break; + } + case (null, _): + model = smallModel; + break; + case (_, null): + maxTokens = useMaxPossibleTokens ? GetMaxPossibleTokens(model) : ChatCompletionRequest.MaxTokensDefault; + break; + } + + return (model, maxTokens.Value); + + int GetMaxPossibleTokens(string s) => ChatCompletionModels.GetMaxTokensLimitForModel(s) - tokenCount - 500; + } } \ No newline at end of file diff --git a/OpenAI.ChatGpt/OpenAIClient.cs b/OpenAI.ChatGpt/OpenAiClient.cs similarity index 98% rename from OpenAI.ChatGpt/OpenAIClient.cs rename to OpenAI.ChatGpt/OpenAiClient.cs index bef7f93..0c5ab57 100644 --- a/OpenAI.ChatGpt/OpenAIClient.cs +++ b/OpenAI.ChatGpt/OpenAiClient.cs @@ -12,7 +12,7 @@ namespace OpenAI.ChatGpt; /// Thread-safe OpenAI client. [Fody.ConfigureAwait(false)] -public class OpenAiClient : IDisposable, IOpenAiClient +public class OpenAiClient : IOpenAiClient, IDisposable { private const string DefaultHost = "https://api.openai.com/v1/"; private const string ImagesEndpoint = "images/generations"; @@ -122,6 +122,7 @@ public async Task GetChatCompletions( float temperature = ChatCompletionTemperatures.Default, string? user = null, Action? requestModifier = null, + Action? rawResponseGetter = null, CancellationToken cancellationToken = default) { if (dialog == null) throw new ArgumentNullException(nameof(dialog)); @@ -135,6 +136,7 @@ public async Task GetChatCompletions( requestModifier ); var response = await GetChatCompletionsRaw(request, cancellationToken); + rawResponseGetter?.Invoke(response); return response.Choices[0].Message!.Content; } @@ -145,6 +147,7 @@ public async Task GetChatCompletions( float temperature = ChatCompletionTemperatures.Default, string? user = null, Action? requestModifier = null, + Action? rawResponseGetter = null, CancellationToken cancellationToken = default) { if (messages == null) throw new ArgumentNullException(nameof(messages)); @@ -158,6 +161,7 @@ public async Task GetChatCompletions( requestModifier ); var response = await GetChatCompletionsRaw(request, cancellationToken); + rawResponseGetter?.Invoke(response); return response.GetMessageContent(); } diff --git a/OpenAI.ChatGpt/OpenAiClientExtensions.GetAsObjectAsync.cs b/OpenAI.ChatGpt/OpenAiClientExtensions.GetAsObjectAsync.cs new file mode 100644 index 0000000..05cf488 --- /dev/null +++ b/OpenAI.ChatGpt/OpenAiClientExtensions.GetAsObjectAsync.cs @@ -0,0 +1,198 @@ +using System.Text.Json; +using System.Text.Json.Serialization; +using OpenAI.ChatGpt.Models.ChatCompletion; +using OpenAI.ChatGpt.Models.ChatCompletion.Messaging; + +namespace OpenAI.ChatGpt; + +public static class OpenAiClientExtensions +{ + /// + /// Asynchronously sends a chat completion request to the OpenAI API and deserializes the response to a specific object type. + /// + /// The type of object to deserialize the response to. The type must have a parameterless constructor. + /// The OpenAI client. + /// The chat dialog, including a user message and any system messages that set the behavior of the assistant. + /// Optional. The maximum number of tokens in the response. Defaults to the limit of the model, minus the number of input tokens, minus 500. + /// Optional. The name of the model to use. Defaults to "text-davinci-002" unless the message input is longer than 6000 tokens, in which case it defaults to "text-davinci-003". + /// Controls the randomness of the assistant’s output. Ranges from 0.0 to 1.0, where 0.0 is deterministic and 1.0 is highly random. Default value is the default for the OpenAI API. + /// Optional. The user who is having the conversation. If not specified, defaults to "system". + /// Optional. A function that can modify the chat completion request before it is sent to the API. + /// Optional. A function that can access the raw API response. + /// Optional. Custom JSON serializer options for the response format. If not specified, default options are used. + /// Optional. Custom JSON deserializer options for the deserialization. If not specified, default options with case insensitive property names are used. + /// Optional. A cancellation token that can be used to cancel the operation. + /// + /// A task that represents the asynchronous operation. The task result contains the deserialized object from the API response. + /// + /// Thrown when the or is null. + /// Thrown when the response from the API cannot be deserialized to the specified object type. + /// + /// The method modifies the content of the message in the dialog to include a request for a JSON-formatted response. + /// The original message content is restored after the API call. + /// + public static Task GetStructuredResponse( + this IOpenAiClient client, + UserOrSystemMessage dialog, + int? maxTokens = null, + string? model = null, + float temperature = ChatCompletionTemperatures.Default, + string? user = null, + Action? requestModifier = null, + Action? rawResponseGetter = null, + JsonSerializerOptions? jsonSerializerOptions = null, + JsonSerializerOptions? jsonDeserializerOptions = null, + CancellationToken cancellationToken = default) where TObject: new() + { + ArgumentNullException.ThrowIfNull(client); + ArgumentNullException.ThrowIfNull(dialog); + var responseFormat = CreateResponseFormatJson(new TObject(), jsonSerializerOptions); + + return client.GetStructuredResponse( + dialog: dialog, + responseFormat: responseFormat, + maxTokens: maxTokens, + model: model, + temperature: temperature, + user: user, + requestModifier: requestModifier, + rawResponseGetter: rawResponseGetter, + jsonDeserializerOptions: jsonDeserializerOptions, + cancellationToken: cancellationToken); + } + + /// + /// Asynchronously gets a response from the OpenAI API, and attempts to deserialize it into an instance of the specified type. + /// + /// The type into which to deserialize the response. + /// The OpenAI client. + /// The dialog to send to the OpenAI API. + /// Is used to infer the expected structure of the response if no response format is explicitly specified. + /// (Optional) The maximum number of tokens for the model to generate. If null, the default is calculated. + /// (Optional) The model to use. If null, the default model is used. + /// (Optional) Controls randomness in the AI's output. Default is defined by ChatCompletionTemperatures.Default. + /// (Optional) User identifier. If null, the default user is used. + /// (Optional) Delegate for modifying the request. + /// (Optional) Delegate for processing the raw response. + /// (Optional) Options for the JSON serializer. If null, the default options are used. + /// (Optional) Options for the JSON deserializer. If null, case-insensitive property name matching is used. + /// (Optional) A token that can be used to cancel the operation. + /// The task object representing the asynchronous operation, containing the deserialized response, + /// or the default response if deserialization fails. + /// Thrown if or or is null. + /// + /// This method modifies the content of the dialog to include a message instructing the AI to respond in a certain format. + /// After the call to the API, the original content of the dialog is restored. + /// + public static Task GetStructuredResponse( + this IOpenAiClient client, + UserOrSystemMessage dialog, + TObject responseExample, + int? maxTokens = null, + string? model = null, + float temperature = ChatCompletionTemperatures.Default, + string? user = null, + Action? requestModifier = null, + Action? rawResponseGetter = null, + JsonSerializerOptions? jsonSerializerOptions = null, + JsonSerializerOptions? jsonDeserializerOptions = null, + CancellationToken cancellationToken = default) + { + ArgumentNullException.ThrowIfNull(client); + ArgumentNullException.ThrowIfNull(dialog); + ArgumentNullException.ThrowIfNull(responseExample); + + var responseFormat = CreateResponseFormatJson(responseExample, jsonSerializerOptions); + return client.GetStructuredResponse( + dialog: dialog, + responseFormat: responseFormat, + maxTokens: maxTokens, + model: model, + temperature: temperature, + user: user, + requestModifier: requestModifier, + rawResponseGetter: rawResponseGetter, + jsonDeserializerOptions: jsonDeserializerOptions, + cancellationToken: cancellationToken); + } + + internal static async Task GetStructuredResponse( + this IOpenAiClient client, + UserOrSystemMessage dialog, + string responseFormat, + int? maxTokens = null, + string? model = null, + float temperature = ChatCompletionTemperatures.Default, + string? user = null, + Action? requestModifier = null, + Action? rawResponseGetter = null, + JsonSerializerOptions? jsonDeserializerOptions = null, + CancellationToken cancellationToken = default) + { + ArgumentNullException.ThrowIfNull(client); + ArgumentNullException.ThrowIfNull(dialog); + + var editMsg = dialog.GetMessages().FirstOrDefault(it => it is SystemMessage) + ?? dialog.GetMessages().First(); + var originalContent = editMsg.Content; + try + { + editMsg.Content += GetAdditionalJsonResponsePrompt(responseFormat); + + (model, maxTokens) = ChatCompletionMessage.FindOptimalModelAndMaxToken(dialog.GetMessages(), model, maxTokens); + + var response = await client.GetChatCompletions( + dialog, + maxTokens.Value, + model, + temperature, + user, + requestModifier, + rawResponseGetter, + cancellationToken); + + jsonDeserializerOptions ??= new JsonSerializerOptions { PropertyNameCaseInsensitive = true }; + var deserialized = JsonSerializer.Deserialize(response, jsonDeserializerOptions); + if (deserialized is null) + { + throw new InvalidOperationException( + $"Failed to deserialize response to {typeof(TObject)}. Response: {response}"); + } + + return deserialized; + } + finally + { + editMsg.Content = originalContent; + } + } + + private static string GetAdditionalJsonResponsePrompt(string responseFormat) + { + return $"\n\nWrite your response in JSON format, which structure is enclosed within double backticks ``{responseFormat}``"; + } + + internal static string CreateResponseFormatJson( + TObject objectToDeserialize, + JsonSerializerOptions? jsonSerializerOptions) + { + ArgumentNullException.ThrowIfNull(objectToDeserialize); + if (jsonSerializerOptions is null) + { + jsonSerializerOptions = new JsonSerializerOptions() + { + WriteIndented = false, + DefaultIgnoreCondition = JsonIgnoreCondition.Never + }; + } + else + { + jsonSerializerOptions = new JsonSerializerOptions(jsonSerializerOptions) + { + WriteIndented = false, + DefaultIgnoreCondition = JsonIgnoreCondition.Never + }; + } + return JsonSerializer.Serialize(objectToDeserialize, jsonSerializerOptions); + } +} \ No newline at end of file diff --git a/OpenAI.ChatGpt/TokensCounterDecorator.cs b/OpenAI.ChatGpt/TokensCounterDecorator.cs new file mode 100644 index 0000000..2b218fd --- /dev/null +++ b/OpenAI.ChatGpt/TokensCounterDecorator.cs @@ -0,0 +1,107 @@ +using OpenAI.ChatGpt.Models.ChatCompletion; +using OpenAI.ChatGpt.Models.ChatCompletion.Messaging; + +namespace OpenAI.ChatGpt; + +public class TokensCounterDecorator : IOpenAiClient, IDisposable +{ + private readonly IOpenAiClient _decorated; + + public long TotalTokensCount { get; private set; } + + + public TokensCounterDecorator(IOpenAiClient decorated) + { + _decorated = decorated ?? throw new ArgumentNullException(nameof(decorated)); + } + + public async Task GetChatCompletions( + UserOrSystemMessage dialog, + int maxTokens = ChatCompletionRequest.MaxTokensDefault, + string model = ChatCompletionModels.Default, + float temperature = ChatCompletionTemperatures.Default, + string? user = null, + Action? requestModifier = null, + Action? rawResponseGetter = null, + CancellationToken cancellationToken = default) + { + return await _decorated.GetChatCompletions(dialog, maxTokens, model, temperature, user, + requestModifier, WrapRawResponseGetter(rawResponseGetter), cancellationToken); + } + + public async Task GetChatCompletions( + IEnumerable messages, + int maxTokens = ChatCompletionRequest.MaxTokensDefault, + string model = ChatCompletionModels.Default, + float temperature = ChatCompletionTemperatures.Default, + string? user = null, + Action? requestModifier = null, + Action? rawResponseGetter = null, + CancellationToken cancellationToken = default) + { + return await _decorated.GetChatCompletions(messages, maxTokens, model, temperature, user, + requestModifier, WrapRawResponseGetter(rawResponseGetter), cancellationToken); + } + + private Action WrapRawResponseGetter( + Action? rawResponseGetter) + { + return response => + { + TotalTokensCount += response.Usage.TotalTokens; + rawResponseGetter?.Invoke(response); + }; + } + + public void Dispose() + { + if(_decorated is IDisposable disposable) + { + disposable.Dispose(); + } + } + + public async Task GetChatCompletionsRaw( + IEnumerable messages, int maxTokens = 64, + string model = "gpt-3.5-turbo", + float temperature = 0.5f, string? user = null, + Action? requestModifier = null, + CancellationToken cancellationToken = default) + { + return await _decorated.GetChatCompletionsRaw(messages, maxTokens, model, temperature, user, + requestModifier, cancellationToken); + } + + public IAsyncEnumerable StreamChatCompletions( + IEnumerable messages, int maxTokens = 64, + string model = "gpt-3.5-turbo", float temperature = 0.5f, string? user = null, + Action? requestModifier = null, + CancellationToken cancellationToken = default) + { + return _decorated.StreamChatCompletions(messages, maxTokens, model, temperature, user, + requestModifier, cancellationToken); + } + + public IAsyncEnumerable StreamChatCompletions(UserOrSystemMessage messages, + int maxTokens = 64, + string model = "gpt-3.5-turbo", float temperature = 0.5f, string? user = null, + Action? requestModifier = null, + CancellationToken cancellationToken = new CancellationToken()) + { + return _decorated.StreamChatCompletions(messages, maxTokens, model, temperature, user, + requestModifier, cancellationToken); + } + + public IAsyncEnumerable StreamChatCompletions(ChatCompletionRequest request, + CancellationToken cancellationToken = new CancellationToken()) + { + return _decorated.StreamChatCompletions(request, cancellationToken); + } + + public IAsyncEnumerable StreamChatCompletionsRaw( + ChatCompletionRequest request, + CancellationToken cancellationToken = new CancellationToken()) + { + return _decorated.StreamChatCompletionsRaw(request, cancellationToken); + } +} \ No newline at end of file diff --git a/tests/OpenAI.ChatGpt.IntegrationTests/OpenAiClientTests/ChatCompletionsApiTests.cs b/tests/OpenAI.ChatGpt.IntegrationTests/OpenAiClientTests/ChatCompletionsApiTests.cs index 27555f8..a00412e 100644 --- a/tests/OpenAI.ChatGpt.IntegrationTests/OpenAiClientTests/ChatCompletionsApiTests.cs +++ b/tests/OpenAI.ChatGpt.IntegrationTests/OpenAiClientTests/ChatCompletionsApiTests.cs @@ -1,6 +1,4 @@ -using OpenAI.Tests.Shared; - -namespace OpenAI.ChatGpt.IntegrationTests.OpenAiClientTests; +namespace OpenAI.ChatGpt.IntegrationTests.OpenAiClientTests; [Collection("OpenAiTestCollection")] //to prevent parallel execution public class ChatCompletionsApiTests diff --git a/tests/OpenAI.ChatGpt.IntegrationTests/OpenAiClientTests/OpenAiClient_GetAsObjectTests.cs b/tests/OpenAI.ChatGpt.IntegrationTests/OpenAiClientTests/OpenAiClient_GetAsObjectTests.cs new file mode 100644 index 0000000..67b64b1 --- /dev/null +++ b/tests/OpenAI.ChatGpt.IntegrationTests/OpenAiClientTests/OpenAiClient_GetAsObjectTests.cs @@ -0,0 +1,62 @@ +namespace OpenAI.ChatGpt.IntegrationTests.OpenAiClientTests; + +public class OpenAiClientGetAsObjectTests +{ + private readonly OpenAiClient _client; + + public OpenAiClientGetAsObjectTests() + { + _client = new OpenAiClient(Helpers.GetOpenAiKey()); + } + + [Fact] + public async void Get_simple_structured_response_from_ChatGPT() + { + var message = + Dialog.StartAsSystem("What did user input?") + .ThenUser("My name is John, my age is 30, my email is john@gmail.com"); + var response = await _client.GetStructuredResponse(message); + response.Should().NotBeNull(); + response.Name.Should().Be("John"); + response.Age.Should().Be(30); + response.Email.Should().Be("john@gmail.com"); + } + + [Fact] + public async void Get_structured_response_with_array_from_ChatGPT() + { + var message = + Dialog.StartAsSystem("What did user input?") + .ThenUser("My name is John, my age is 30, my email is john@gmail.com. I want to buy 2 apples and 3 oranges."); + var response = await _client.GetStructuredResponse(message); + response.Should().NotBeNull(); + response.UserInfo.Name.Should().Be("John"); + response.UserInfo.Age.Should().Be(30); + response.UserInfo.Email.Should().Be("john@gmail.com"); + + response.Items.Should().HaveCount(2); + response.Items[0].Name.Should().Be("apple"); + response.Items[0].Quantity.Should().Be(2); + response.Items[1].Name.Should().Be("orange"); + response.Items[1].Quantity.Should().Be(3); + } + + private class Order + { + public UserInfo UserInfo { get; set; } = new(); + public List Items { get; set; } = new() {new Item()}; + + public class Item + { + public string Name { get; set; } = ""; + public int Quantity { get; set; } + } + } + + private class UserInfo + { + public string Name { get; init; } + public int Age { get; init; } + public string Email { get; init; } + } +} diff --git a/tests/OpenAI.ChatGpt.IntegrationTests/Usings.cs b/tests/OpenAI.ChatGpt.IntegrationTests/Usings.cs index af42377..19d0ec2 100644 --- a/tests/OpenAI.ChatGpt.IntegrationTests/Usings.cs +++ b/tests/OpenAI.ChatGpt.IntegrationTests/Usings.cs @@ -18,4 +18,5 @@ global using OpenAI.ChatGpt.Interfaces; global using OpenAI.ChatGpt.Internal; global using OpenAI.ChatGpt.Models; -global using OpenAI.ChatGpt.Exceptions; \ No newline at end of file +global using OpenAI.ChatGpt.Exceptions; +global using OpenAI.Tests.Shared; \ No newline at end of file diff --git a/tests/OpenAI.ChatGpt.Modules.Translator.IntegrationTests/OpenAI.ChatGpt.Modules.Translator.IntegrationTests.csproj b/tests/OpenAI.ChatGpt.Modules.Translator.IntegrationTests/OpenAI.ChatGpt.Modules.Translator.IntegrationTests.csproj index c30d465..ac50b7a 100644 --- a/tests/OpenAI.ChatGpt.Modules.Translator.IntegrationTests/OpenAI.ChatGpt.Modules.Translator.IntegrationTests.csproj +++ b/tests/OpenAI.ChatGpt.Modules.Translator.IntegrationTests/OpenAI.ChatGpt.Modules.Translator.IntegrationTests.csproj @@ -12,7 +12,6 @@ - runtime; build; native; contentfiles; analyzers; buildtransitive diff --git a/tests/OpenAI.ChatGpt.Modules.Translator.UnitTests/ChatGptTranslatorServiceTests.cs b/tests/OpenAI.ChatGpt.Modules.Translator.UnitTests/ChatGptTranslatorServiceTests.cs index c594ab4..6f36ddb 100644 --- a/tests/OpenAI.ChatGpt.Modules.Translator.UnitTests/ChatGptTranslatorServiceTests.cs +++ b/tests/OpenAI.ChatGpt.Modules.Translator.UnitTests/ChatGptTranslatorServiceTests.cs @@ -23,7 +23,7 @@ public void Initialization_with_null_api_key_should_throw_exception() public void Initialization_with_null_client_should_throw_exception() { // Arrange & Act - Action act = () => new ChatGPTTranslatorService(client: null); + Action act = () => new ChatGPTTranslatorService(client: null!); // Assert act.Should().Throw(); @@ -33,7 +33,7 @@ public void Initialization_with_null_client_should_throw_exception() public void Dispose_with_injected_client_should_not_dispose_client() { // Arrange - var clientMock = new Mock(); + var clientMock = new Mock(); clientMock.Setup(client => client.Dispose()).Verifiable(); var translatorService = new ChatGPTTranslatorService(clientMock.Object); @@ -59,6 +59,7 @@ public async Task Translate_without_source_and_target_languages_uses_default_lan It.IsAny(), It.IsAny(), It.IsAny>(), + It.IsAny>(), It.IsAny())) .ReturnsAsync("Привет, мир!") .Verifiable(); @@ -80,7 +81,8 @@ public async Task Translate_without_source_and_target_languages_uses_default_lan It.IsAny(), It.IsAny(), It.IsAny(), - It.IsAny>(), + It.IsAny>(), + It.IsAny>(), It.IsAny()), Times.Once); translatedText.Should().Be("Привет, мир!"); diff --git a/tests/OpenAI.ChatGpt.UnitTests/OpenAI.ChatGpt.UnitTests.csproj b/tests/OpenAI.ChatGpt.UnitTests/OpenAI.ChatGpt.UnitTests.csproj index d5163cf..c951225 100644 --- a/tests/OpenAI.ChatGpt.UnitTests/OpenAI.ChatGpt.UnitTests.csproj +++ b/tests/OpenAI.ChatGpt.UnitTests/OpenAI.ChatGpt.UnitTests.csproj @@ -10,6 +10,7 @@ +