175 lines
5.8 KiB
C#
175 lines
5.8 KiB
C#
using System.Text;
|
|
using Microsoft.AspNetCore.Http;
|
|
using Microsoft.Extensions.DependencyInjection;
|
|
using Microsoft.Extensions.Options;
|
|
using Newtonsoft.Json;
|
|
using Newtonsoft.Json.Serialization;
|
|
using OpenAI.Chat;
|
|
using Volo.Abp.Application.Services;
|
|
using Yi.Framework.AiHub.Application.Contracts.Dtos;
|
|
using Yi.Framework.AiHub.Application.Contracts.Options;
|
|
using Yi.Framework.AiHub.Domain.Managers;
|
|
|
|
namespace Yi.Framework.AiHub.Application.Services;
|
|
|
|
public class AiService : ApplicationService
|
|
{
|
|
private readonly AiGateWayOptions _options;
|
|
private readonly IHttpContextAccessor _httpContextAccessor;
|
|
|
|
public AiService(IOptions<AiGateWayOptions> options, IHttpContextAccessor httpContextAccessor)
|
|
{
|
|
_options = options.Value;
|
|
this._httpContextAccessor = httpContextAccessor;
|
|
}
|
|
|
|
|
|
/// <summary>
|
|
/// 获取模型列表
|
|
/// </summary>
|
|
/// <returns></returns>
|
|
public async Task<List<ModelGetListOutput>> GetModelAsync()
|
|
{
|
|
var output = _options.Chats.SelectMany(x => x.Value.ModelIds)
|
|
.Select(x => new ModelGetListOutput()
|
|
{
|
|
Id = 001,
|
|
Category = "chat",
|
|
ModelName = x,
|
|
ModelDescribe = "这是一个直连模型",
|
|
ModelPrice = 4,
|
|
ModelType = "1",
|
|
ModelShow = "0",
|
|
Remark = "直连模型"
|
|
}).ToList();
|
|
return output;
|
|
}
|
|
|
|
|
|
/// <summary>
|
|
/// 发送消息
|
|
/// </summary>
|
|
/// <param name="input"></param>
|
|
/// <param name="cancellationToken"></param>
|
|
public async Task PostSendAsync(SendMessageInput input, CancellationToken cancellationToken)
|
|
{
|
|
var httpContext = this._httpContextAccessor.HttpContext;
|
|
var response = httpContext.Response;
|
|
// 设置响应头,声明是 SSE 流
|
|
response.ContentType = "text/event-stream";
|
|
response.Headers.Append("Cache-Control", "no-cache");
|
|
response.Headers.Append("Connection", "keep-alive");
|
|
|
|
|
|
var history = new List<ChatMessage>();
|
|
foreach (var aiChatContextDto in input.Messages)
|
|
{
|
|
if (aiChatContextDto.Role == "ai")
|
|
{
|
|
history.Add(ChatMessage.CreateAssistantMessage(aiChatContextDto.Content));
|
|
}
|
|
else if (aiChatContextDto.Role == "user")
|
|
{
|
|
history.Add(ChatMessage.CreateUserMessage(aiChatContextDto.Content));
|
|
}
|
|
}
|
|
|
|
var gateWay = LazyServiceProvider.GetRequiredService<AiGateWayManager>();
|
|
var completeChatResponse = gateWay.CompleteChatAsync(input.Model, history,cancellationToken);
|
|
await using var writer = new StreamWriter(response.Body, Encoding.UTF8, leaveOpen: true);
|
|
await foreach (var data in completeChatResponse)
|
|
{
|
|
var model = MapToMessage(input.Model, data);
|
|
var message = JsonConvert.SerializeObject(model, new JsonSerializerSettings
|
|
{
|
|
ContractResolver = new CamelCasePropertyNamesContractResolver()
|
|
});
|
|
|
|
await writer.WriteLineAsync($"data: {message}\n");
|
|
await writer.FlushAsync(cancellationToken); // 确保立即推送数据
|
|
}
|
|
|
|
//断开连接
|
|
await writer.WriteLineAsync($"data: done\n");
|
|
await writer.FlushAsync(cancellationToken); // 确保立即推送数据
|
|
}
|
|
|
|
|
|
private SendMessageOutputDto MapToMessage(string modelId, string content)
|
|
{
|
|
var output = new SendMessageOutputDto
|
|
{
|
|
Id = 001,
|
|
Object = "chat.completion.chunk",
|
|
Created = 1750336171,
|
|
Model = modelId,
|
|
Choices = new()
|
|
{
|
|
new Choice
|
|
{
|
|
Index = 0,
|
|
Delta = new Delta
|
|
{
|
|
Content = content,
|
|
Role = "assistant"
|
|
},
|
|
FinishReason = null,
|
|
ContentFilterResults = new()
|
|
{
|
|
Hate = new()
|
|
{
|
|
Filtered = false,
|
|
Detected = null
|
|
},
|
|
SelfHarm = new()
|
|
{
|
|
Filtered = false,
|
|
Detected = null
|
|
},
|
|
Sexual = new()
|
|
{
|
|
Filtered = false,
|
|
Detected = null
|
|
},
|
|
Violence = new()
|
|
{
|
|
Filtered = false,
|
|
Detected = null
|
|
},
|
|
Jailbreak = new()
|
|
{
|
|
Filtered = false,
|
|
Detected = false
|
|
},
|
|
Profanity = new()
|
|
{
|
|
Filtered = false,
|
|
Detected = false
|
|
},
|
|
}
|
|
}
|
|
},
|
|
SystemFingerprint = "",
|
|
Usage = new Usage
|
|
{
|
|
PromptTokens = 75,
|
|
CompletionTokens = 25,
|
|
TotalTokens = 100,
|
|
PromptTokensDetails = new()
|
|
{
|
|
AudioTokens = 0,
|
|
CachedTokens = 0
|
|
},
|
|
CompletionTokensDetails = new()
|
|
{
|
|
AudioTokens = 0,
|
|
ReasoningTokens = 0,
|
|
AcceptedPredictionTokens = 0,
|
|
RejectedPredictionTokens = 0
|
|
}
|
|
}
|
|
};
|
|
|
|
return output;
|
|
}
|
|
} |