feat: 支持codex
This commit is contained in:
@@ -0,0 +1,29 @@
|
||||
using Yi.Framework.AiHub.Domain.Shared.Dtos;
|
||||
using Yi.Framework.AiHub.Domain.Shared.Dtos.OpenAi.Responses;
|
||||
|
||||
namespace Yi.Framework.AiHub.Domain.AiGateWay;
|
||||
|
||||
public interface IOpenAiResponseService
|
||||
{
|
||||
/// <summary>
|
||||
/// 响应-流式
|
||||
/// </summary>
|
||||
/// <param name="aiModelDescribe"></param>
|
||||
/// <param name="input"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
/// <returns></returns>
|
||||
public IAsyncEnumerable<(string, dynamic?)> ResponsesStreamAsync(AiModelDescribe aiModelDescribe,
|
||||
OpenAiResponsesInput input,
|
||||
CancellationToken cancellationToken);
|
||||
|
||||
/// <summary>
|
||||
/// 响应-非流式
|
||||
/// </summary>
|
||||
/// <param name="aiModelDescribe"></param>
|
||||
/// <param name="input"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
/// <returns></returns>
|
||||
public Task<OpenAiResponsesOutput> ResponsesAsync(AiModelDescribe aiModelDescribe,
|
||||
OpenAiResponsesInput input,
|
||||
CancellationToken cancellationToken);
|
||||
}
|
||||
@@ -26,19 +26,7 @@ public class CustomOpenAIAnthropicChatCompletionsService(
|
||||
{
|
||||
// 转换请求格式:Claude -> OpenAI
|
||||
var openAIRequest = AnthropicToOpenAi.ConvertAnthropicToOpenAi(request);
|
||||
|
||||
if (openAIRequest.Model.StartsWith("gpt-5"))
|
||||
{
|
||||
openAIRequest.MaxCompletionTokens = request.MaxTokens;
|
||||
openAIRequest.MaxTokens = null;
|
||||
}
|
||||
else if (openAIRequest.Model.StartsWith("o3-mini") || openAIRequest.Model.StartsWith("o4-mini"))
|
||||
{
|
||||
openAIRequest.MaxCompletionTokens = request.MaxTokens;
|
||||
openAIRequest.MaxTokens = null;
|
||||
openAIRequest.Temperature = null;
|
||||
}
|
||||
|
||||
|
||||
// 调用OpenAI服务
|
||||
var openAIResponse =
|
||||
await GetChatCompletionService().CompleteChatAsync(aiModelDescribe,openAIRequest, cancellationToken);
|
||||
@@ -55,19 +43,7 @@ public class CustomOpenAIAnthropicChatCompletionsService(
|
||||
{
|
||||
var openAIRequest = AnthropicToOpenAi.ConvertAnthropicToOpenAi(request);
|
||||
openAIRequest.Stream = true;
|
||||
|
||||
if (openAIRequest.Model.StartsWith("gpt-5"))
|
||||
{
|
||||
openAIRequest.MaxCompletionTokens = request.MaxTokens;
|
||||
openAIRequest.MaxTokens = null;
|
||||
}
|
||||
else if (openAIRequest.Model.StartsWith("o3-mini") || openAIRequest.Model.StartsWith("o4-mini"))
|
||||
{
|
||||
openAIRequest.MaxCompletionTokens = request.MaxTokens;
|
||||
openAIRequest.MaxTokens = null;
|
||||
openAIRequest.Temperature = null;
|
||||
}
|
||||
|
||||
|
||||
var messageId = Guid.NewGuid().ToString();
|
||||
var hasStarted = false;
|
||||
var hasTextContentBlockStarted = false;
|
||||
|
||||
@@ -130,7 +130,7 @@ public sealed class OpenAiChatCompletionsService(ILogger<OpenAiChatCompletionsSe
|
||||
using var openai =
|
||||
Activity.Current?.Source.StartActivity("OpenAI 对话补全");
|
||||
|
||||
var response = await HttpClientFactory.GetHttpClient(options.Endpoint).PostJsonAsync(
|
||||
var response = await httpClientFactory.CreateClient().PostJsonAsync(
|
||||
options?.Endpoint.TrimEnd('/') + "/chat/completions",
|
||||
chatCompletionCreate, options.ApiKey).ConfigureAwait(false);
|
||||
|
||||
|
||||
@@ -0,0 +1,123 @@
|
||||
using System.Diagnostics;
|
||||
using System.Net;
|
||||
using System.Net.Http.Json;
|
||||
using System.Text.Json;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Yi.Framework.AiHub.Domain.AiGateWay.Exceptions;
|
||||
using Yi.Framework.AiHub.Domain.Shared.Dtos;
|
||||
using Yi.Framework.AiHub.Domain.Shared.Dtos.OpenAi;
|
||||
using Yi.Framework.AiHub.Domain.Shared.Dtos.OpenAi.Responses;
|
||||
|
||||
namespace Yi.Framework.AiHub.Domain.AiGateWay.Impl.ThorCustomOpenAI.Chats;
|
||||
|
||||
public class OpenAiResponseService(ILogger<OpenAiResponseService> logger,IHttpClientFactory httpClientFactory):IOpenAiResponseService
|
||||
{
|
||||
|
||||
public async IAsyncEnumerable<(string, dynamic?)> ResponsesStreamAsync(AiModelDescribe options, OpenAiResponsesInput input,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
using var openai =
|
||||
Activity.Current?.Source.StartActivity("OpenAi 响应");
|
||||
|
||||
|
||||
var client = httpClientFactory.CreateClient();
|
||||
|
||||
var response = await client.HttpRequestRaw(options.Endpoint.TrimEnd('/') + "/responses", input, options.ApiKey);
|
||||
|
||||
openai?.SetTag("Model", input.Model);
|
||||
openai?.SetTag("Response", response.StatusCode.ToString());
|
||||
|
||||
// 大于等于400的状态码都认为是异常
|
||||
if (response.StatusCode >= HttpStatusCode.BadRequest)
|
||||
{
|
||||
var error = await response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false);
|
||||
logger.LogError("OpenAI响应异常 请求地址:{Address}, StatusCode: {StatusCode} Response: {Response}",
|
||||
options.Endpoint,
|
||||
response.StatusCode, error);
|
||||
|
||||
throw new Exception("OpenAI响应异常" + response.StatusCode);
|
||||
}
|
||||
|
||||
using var stream = new StreamReader(await response.Content.ReadAsStreamAsync(cancellationToken));
|
||||
|
||||
using StreamReader reader = new(await response.Content.ReadAsStreamAsync(cancellationToken));
|
||||
string? line = string.Empty;
|
||||
|
||||
string? data = null;
|
||||
string eventType = string.Empty;
|
||||
|
||||
while ((line = await reader.ReadLineAsync(cancellationToken).ConfigureAwait(false)) != null)
|
||||
{
|
||||
line += Environment.NewLine;
|
||||
|
||||
if (line.StartsWith('{'))
|
||||
{
|
||||
logger.LogInformation("OpenAI响应异常 , StatusCode: {StatusCode} Response: {Response}", response.StatusCode,
|
||||
line);
|
||||
|
||||
throw new Exception("OpenAI响应异常" + line);
|
||||
}
|
||||
|
||||
if (string.IsNullOrWhiteSpace(line))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (line.StartsWith("event:"))
|
||||
{
|
||||
eventType = line;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!line.StartsWith(OpenAIConstant.Data)) continue;
|
||||
|
||||
data = line[OpenAIConstant.Data.Length..].Trim();
|
||||
|
||||
var result = JsonSerializer.Deserialize<dynamic>(data,
|
||||
ThorJsonSerializer.DefaultOptions);
|
||||
|
||||
yield return (eventType, result);
|
||||
}
|
||||
}
|
||||
|
||||
public async Task<OpenAiResponsesOutput> ResponsesAsync(AiModelDescribe options, OpenAiResponsesInput chatCompletionCreate,
|
||||
CancellationToken cancellationToken)
|
||||
{
|
||||
using var openai =
|
||||
Activity.Current?.Source.StartActivity("OpenAI 响应");
|
||||
|
||||
var response = await httpClientFactory.CreateClient().PostJsonAsync(
|
||||
options?.Endpoint.TrimEnd('/') + "/responses",
|
||||
chatCompletionCreate, options.ApiKey).ConfigureAwait(false);
|
||||
|
||||
openai?.SetTag("Model", chatCompletionCreate.Model);
|
||||
openai?.SetTag("Response", response.StatusCode.ToString());
|
||||
|
||||
if (response.StatusCode == HttpStatusCode.Unauthorized)
|
||||
{
|
||||
throw new BusinessException("渠道未登录,请联系管理人员", "401");
|
||||
}
|
||||
|
||||
// 如果限流则抛出限流异常
|
||||
if (response.StatusCode == HttpStatusCode.TooManyRequests)
|
||||
{
|
||||
throw new ThorRateLimitException();
|
||||
}
|
||||
|
||||
// 大于等于400的状态码都认为是异常
|
||||
if (response.StatusCode >= HttpStatusCode.BadRequest)
|
||||
{
|
||||
var error = await response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false);
|
||||
logger.LogError("OpenAI 响应异常 请求地址:{Address}, StatusCode: {StatusCode} Response: {Response}", options.Endpoint,
|
||||
response.StatusCode, error);
|
||||
|
||||
throw new BusinessException("OpenAI响应异常", response.StatusCode.ToString());
|
||||
}
|
||||
|
||||
var result =
|
||||
await response.Content.ReadFromJsonAsync<OpenAiResponsesOutput>(
|
||||
cancellationToken: cancellationToken).ConfigureAwait(false);
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,7 @@ using Microsoft.AspNetCore.Http;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Newtonsoft.Json;
|
||||
using Newtonsoft.Json.Linq;
|
||||
using Newtonsoft.Json.Serialization;
|
||||
using Volo.Abp.Domain.Services;
|
||||
using Yi.Framework.AiHub.Domain.AiGateWay;
|
||||
@@ -18,6 +19,7 @@ using Yi.Framework.AiHub.Domain.Shared.Dtos.Anthropic;
|
||||
using Yi.Framework.AiHub.Domain.Shared.Dtos.OpenAi;
|
||||
using Yi.Framework.AiHub.Domain.Shared.Dtos.OpenAi.Embeddings;
|
||||
using Yi.Framework.AiHub.Domain.Shared.Dtos.OpenAi.Images;
|
||||
using Yi.Framework.AiHub.Domain.Shared.Dtos.OpenAi.Responses;
|
||||
using Yi.Framework.AiHub.Domain.Shared.Enums;
|
||||
using Yi.Framework.Core.Extensions;
|
||||
using Yi.Framework.SqlSugarCore.Abstractions;
|
||||
@@ -546,9 +548,9 @@ public class AiGateWayManager : DomainService
|
||||
var chatService =
|
||||
LazyServiceProvider.GetRequiredKeyedService<IAnthropicChatCompletionService>(modelDescribe.HandlerName);
|
||||
var data = await chatService.ChatCompletionsAsync(modelDescribe, request, cancellationToken);
|
||||
|
||||
|
||||
data.SupplementalMultiplier(modelDescribe.Multiplier);
|
||||
|
||||
|
||||
if (userId is not null)
|
||||
{
|
||||
await _aiMessageManager.CreateUserMessageAsync(userId.Value, sessionId,
|
||||
@@ -660,6 +662,183 @@ public class AiGateWayManager : DomainService
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// OpenAi 响应-非流式-缓存处理
|
||||
/// </summary>
|
||||
/// <param name="httpContext"></param>
|
||||
/// <param name="request"></param>
|
||||
/// <param name="userId"></param>
|
||||
/// <param name="sessionId"></param>
|
||||
/// <param name="tokenId"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
public async Task OpenAiResponsesAsyncForStatisticsAsync(HttpContext httpContext,
|
||||
OpenAiResponsesInput request,
|
||||
Guid? userId = null,
|
||||
Guid? sessionId = null,
|
||||
Guid? tokenId = null,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
//todo 1
|
||||
// _specialCompatible.AnthropicCompatible(request);
|
||||
var response = httpContext.Response;
|
||||
// 设置响应头,声明是 json
|
||||
//response.ContentType = "application/json; charset=UTF-8";
|
||||
var modelDescribe = await GetModelAsync(ModelApiTypeEnum.Response, request.Model);
|
||||
|
||||
var chatService =
|
||||
LazyServiceProvider.GetRequiredKeyedService<IOpenAiResponseService>(modelDescribe.HandlerName);
|
||||
var data = await chatService.ResponsesAsync(modelDescribe, request, cancellationToken);
|
||||
|
||||
//todo 2
|
||||
// data.SupplementalMultiplier(modelDescribe.Multiplier);
|
||||
|
||||
//todo 3
|
||||
|
||||
// if (userId is not null)
|
||||
// {
|
||||
// await _aiMessageManager.CreateUserMessageAsync(userId.Value, sessionId,
|
||||
// new MessageInputDto
|
||||
// {
|
||||
// Content = sessionId is null ? "不予存储" : request.Messages?.FirstOrDefault()?.Content ?? string.Empty,
|
||||
// ModelId = request.Model,
|
||||
// TokenUsage = data.TokenUsage,
|
||||
// }, tokenId);
|
||||
//
|
||||
// await _aiMessageManager.CreateSystemMessageAsync(userId.Value, sessionId,
|
||||
// new MessageInputDto
|
||||
// {
|
||||
// Content = sessionId is null ? "不予存储" : data.content?.FirstOrDefault()?.text,
|
||||
// ModelId = request.Model,
|
||||
// TokenUsage = data.TokenUsage
|
||||
// }, tokenId);
|
||||
//
|
||||
// await _usageStatisticsManager.SetUsageAsync(userId.Value, request.Model, data.TokenUsage, tokenId);
|
||||
//
|
||||
// // 扣减尊享token包用量
|
||||
// var totalTokens = data.TokenUsage.TotalTokens ?? 0;
|
||||
// if (totalTokens > 0)
|
||||
// {
|
||||
// await PremiumPackageManager.TryConsumeTokensAsync(userId.Value, totalTokens);
|
||||
// }
|
||||
// }
|
||||
|
||||
await response.WriteAsJsonAsync(data, cancellationToken);
|
||||
}
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// OpenAi 响应-流式
|
||||
/// </summary>
|
||||
/// <param name="request"></param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
/// <returns></returns>
|
||||
public async IAsyncEnumerable<(string, dynamic?)> OpenAiResponsesAsync(
|
||||
OpenAiResponsesInput request,
|
||||
[EnumeratorCancellation] CancellationToken cancellationToken)
|
||||
{
|
||||
//todo cc
|
||||
// _specialCompatible.AnthropicCompatible(request);
|
||||
var modelDescribe = await GetModelAsync(ModelApiTypeEnum.Response, request.Model);
|
||||
var chatService =
|
||||
LazyServiceProvider.GetRequiredKeyedService<IOpenAiResponseService>(modelDescribe.HandlerName);
|
||||
|
||||
await foreach (var result in chatService.ResponsesStreamAsync(modelDescribe, request, cancellationToken))
|
||||
{
|
||||
//todo 倍率
|
||||
// result.Item2.SupplementalMultiplier(modelDescribe.Multiplier);
|
||||
yield return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// OpenAi响应-流式-缓存处理
|
||||
/// </summary>
|
||||
/// <param name="httpContext"></param>
|
||||
/// <param name="request"></param>
|
||||
/// <param name="userId"></param>
|
||||
/// <param name="sessionId"></param>
|
||||
/// <param name="tokenId">Token Id(Web端传null或Guid.Empty)</param>
|
||||
/// <param name="cancellationToken"></param>
|
||||
/// <returns></returns>
|
||||
public async Task OpenAiResponsesStreamForStatisticsAsync(
|
||||
HttpContext httpContext,
|
||||
OpenAiResponsesInput request,
|
||||
Guid? userId = null,
|
||||
Guid? sessionId = null,
|
||||
Guid? tokenId = null,
|
||||
CancellationToken cancellationToken = default)
|
||||
{
|
||||
var response = httpContext.Response;
|
||||
// 设置响应头,声明是 SSE 流
|
||||
response.ContentType = "text/event-stream;charset=utf-8;";
|
||||
response.Headers.TryAdd("Cache-Control", "no-cache");
|
||||
response.Headers.TryAdd("Connection", "keep-alive");
|
||||
|
||||
var completeChatResponse = OpenAiResponsesAsync(request, cancellationToken);
|
||||
ThorUsageResponse? tokenUsage = null;
|
||||
try
|
||||
{
|
||||
await foreach (var responseResult in completeChatResponse)
|
||||
{
|
||||
//message_start是为了保底机制
|
||||
if (responseResult.Item1.Contains("response.completed"))
|
||||
{
|
||||
JObject obj = JObject.FromObject(responseResult.Item2);
|
||||
int inputTokens = (int?)obj["response"]?["usage"]?["input_tokens"] ?? 0;
|
||||
int outputTokens = (int?)obj["response"]?["usage"]?["output_tokens"] ?? 0;
|
||||
tokenUsage = new ThorUsageResponse
|
||||
{
|
||||
PromptTokens = inputTokens,
|
||||
InputTokens = inputTokens,
|
||||
OutputTokens = outputTokens,
|
||||
CompletionTokens = outputTokens,
|
||||
TotalTokens = inputTokens+outputTokens,
|
||||
};
|
||||
}
|
||||
|
||||
await WriteAsEventStreamDataAsync(httpContext, responseResult.Item1, responseResult.Item2,
|
||||
cancellationToken);
|
||||
}
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
_logger.LogError(e, $"Ai响应异常");
|
||||
var errorContent = $"响应Ai异常,异常信息:\n当前Ai模型:{request.Model}\n异常信息:{e.Message}\n异常堆栈:{e}";
|
||||
throw new UserFriendlyException(errorContent);
|
||||
}
|
||||
|
||||
await _aiMessageManager.CreateUserMessageAsync(userId, sessionId,
|
||||
new MessageInputDto
|
||||
{
|
||||
Content = "不予存储" ,
|
||||
ModelId = request.Model,
|
||||
TokenUsage = tokenUsage,
|
||||
}, tokenId);
|
||||
|
||||
await _aiMessageManager.CreateSystemMessageAsync(userId, sessionId,
|
||||
new MessageInputDto
|
||||
{
|
||||
Content = "不予存储" ,
|
||||
ModelId = request.Model,
|
||||
TokenUsage = tokenUsage
|
||||
}, tokenId);
|
||||
|
||||
await _usageStatisticsManager.SetUsageAsync(userId, request.Model, tokenUsage, tokenId);
|
||||
|
||||
// 扣减尊享token包用量
|
||||
if (userId.HasValue && tokenUsage is not null)
|
||||
{
|
||||
var totalTokens = tokenUsage.TotalTokens ?? 0;
|
||||
if (tokenUsage.TotalTokens > 0)
|
||||
{
|
||||
await PremiumPackageManager.TryConsumeTokensAsync(userId.Value, totalTokens);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#region Anthropic格式Http响应
|
||||
|
||||
private static readonly byte[] EventPrefix = "event: "u8.ToArray();
|
||||
@@ -675,7 +854,6 @@ public class AiGateWayManager : DomainService
|
||||
string @event,
|
||||
T value,
|
||||
CancellationToken cancellationToken = default)
|
||||
where T : class
|
||||
{
|
||||
var response = context.Response;
|
||||
var bodyStream = response.Body;
|
||||
|
||||
@@ -56,6 +56,12 @@ namespace Yi.Framework.AiHub.Domain
|
||||
|
||||
#endregion
|
||||
|
||||
#region OpenAi Response
|
||||
|
||||
services.AddKeyedTransient<IOpenAiResponseService, OpenAiResponseService>(
|
||||
nameof(OpenAiResponseService));
|
||||
|
||||
#endregion
|
||||
|
||||
#region Image
|
||||
|
||||
|
||||
Reference in New Issue
Block a user