feat: center change

This commit is contained in:
2025-04-06 23:25:46 +08:00
parent bfe8463212
commit 226f9a6896
23 changed files with 521 additions and 119 deletions

37
src/logger/index.ts Normal file
View File

@@ -0,0 +1,37 @@
import { pino } from 'pino';
import { useConfig } from '@kevisual/use-config/env';
const config = useConfig();
export const logger = pino({
level: config.LOG_LEVEL || 'info',
transport: {
target: 'pino-pretty',
options: {
colorize: true,
translateTime: 'SYS:standard',
ignore: 'pid,hostname',
},
},
serializers: {
error: pino.stdSerializers.err,
req: pino.stdSerializers.req,
res: pino.stdSerializers.res,
},
base: {
app: 'ai-chat',
env: process.env.NODE_ENV || 'development',
},
});
export const logError = (message: string, data?: any) => logger.error({ data }, message);
export const logWarning = (message: string, data?: any) => logger.warn({ data }, message);
export const logInfo = (message: string, data?: any) => logger.info({ data }, message);
export const logDebug = (message: string, data?: any) => logger.debug({ data }, message);
export const log = {
error: logError,
warn: logWarning,
info: logInfo,
debug: logDebug,
};

View File

@@ -7,6 +7,7 @@ export type OllamaOptions = BaseChatOptions;
*/
export class Custom extends BaseChat {
constructor(options: OllamaOptions) {
super(options);
const baseURL = options.baseURL || 'https://api.deepseek.com/v1/';
super({ ...(options as BaseChatOptions), baseURL: baseURL });
}
}

View File

@@ -3,6 +3,7 @@ import { BaseChat, BaseChatOptions } from '../core/chat.ts';
export type DeepSeekOptions = Partial<BaseChatOptions>;
export class DeepSeek extends BaseChat {
constructor(options: DeepSeekOptions) {
super({ baseURL: 'https://api.deepseek.com/v1/', ...options } as any);
const baseURL = options.baseURL || 'https://api.deepseek.com/v1/';
super({ ...(options as BaseChatOptions), baseURL: baseURL });
}
}

View File

@@ -4,6 +4,7 @@ import { BaseChat, BaseChatOptions } from '../core/chat.ts';
export type ModelScopeOptions = Partial<BaseChatOptions>;
export class ModelScope extends BaseChat {
constructor(options: ModelScopeOptions) {
super({ baseURL: 'https://api-inference.modelscope.cn/v1/', ...options } as any);
const baseURL = options.baseURL || 'https://api-inference.modelscope.cn/v1/';
super({ ...options, baseURL: baseURL } as any);
}
}

View File

@@ -21,7 +21,8 @@ type OllamaModel = {
};
export class Ollama extends BaseChat {
constructor(options: OllamaOptions) {
super({ baseURL: 'http://localhost:11434/v1', ...(options as BaseChatOptions) });
const baseURL = options.baseURL || 'http://localhost:11434/v1';
super({ ...(options as BaseChatOptions), baseURL: baseURL });
}
async chat(messages: ChatMessage[], options?: ChatMessageOptions) {
const res = await super.chat(messages, options);

View File

@@ -25,7 +25,8 @@ type SiliconFlowUsageResponse = {
};
export class SiliconFlow extends BaseChat {
constructor(options: SiliconFlowOptions) {
super({ baseURL: 'https://api.siliconflow.com/v1', ...(options as BaseChatOptions) });
const baseURL = options.baseURL || 'https://api.siliconflow.com/v1';
super({ ...(options as BaseChatOptions), baseURL: baseURL });
}
async getUsageInfo(): Promise<SiliconFlowUsageResponse> {
return this.openai.get('/user/info');

View File

@@ -3,6 +3,7 @@ import { BaseChat, BaseChatOptions } from '../core/chat.ts';
export type VolcesOptions = Partial<BaseChatOptions>;
export class Volces extends BaseChat {
constructor(options: VolcesOptions) {
super({ baseURL: 'https://ark.cn-beijing.volces.com/api/v3/', ...options } as any);
const baseURL = options.baseURL || 'https://ark.cn-beijing.volces.com/api/v3/';
super({ ...(options as BaseChatOptions), baseURL: baseURL });
}
}

View File

@@ -1,6 +1,6 @@
import OpenAI from 'openai';
export type ChatMessage = OpenAI.Chat.Completions.ChatCompletionMessageParam;
export type ChatMessage = OpenAI.Chat.Completions.ChatCompletionMessageParam ;
export type ChatMessageOptions = Partial<OpenAI.Chat.Completions.ChatCompletionCreateParams>;
export type ChatMessageComplete = OpenAI.Chat.Completions.ChatCompletion;
export type ChatMessageStream = OpenAI.Chat.Completions.ChatCompletion;

View File

@@ -40,6 +40,8 @@ export class ProviderManager {
if (!Provider) {
throw new Error(`Provider ${provider} not found`);
}
console.log('pm', 'Provider', ProviderMap[provider]);
this.provider = new Provider({
model,
apiKey,

View File

@@ -0,0 +1,52 @@
import type { Permission } from '@kevisual/permission';
export type AIModel = {
/**
* 提供商
*/
provider: string;
/**
* 模型名称
*/
model: string;
/**
* 模型组
*/
group: string;
/**
* 每日请求频率限制
*/
dayLimit?: number;
/**
* 总的token限制
*/
tokenLimit?: number;
};
export type SecretKey = {
/**
* 组
*/
group: string;
/**
* API密钥
*/
apiKey: string;
/**
* 解密密钥
*/
decryptKey?: string;
};
export type AIConfig = {
title?: string;
description?: string;
models: AIModel[];
secretKeys: SecretKey[];
permission?: Permission;
filter?: {
objectKey: string;
type: 'array' | 'object';
operate: 'removeAttribute' | 'remove';
attribute: string[];
}[];
};

View File

@@ -88,7 +88,11 @@ export class AIConfigParser {
constructor(config: AIConfig) {
this.config = config;
}
/**
* 获取模型配置
* @param opts
* @returns
*/
getProvider(opts: GetProviderOpts): ProviderResult {
const { model, group, decryptKey } = opts;
const modelConfig = this.config.models.find((m) => m.model === model && m.group === group);
@@ -117,16 +121,17 @@ export class AIConfigParser {
this.result = mergeConfig;
return mergeConfig;
}
async getSecretKey({
getCache,
setCache,
providerResult,
}: {
/**
* 获取解密密钥
* @param opts
* @returns
*/
async getSecretKey(opts?: {
getCache?: (key: string) => Promise<string>;
setCache?: (key: string, value: string) => Promise<void>;
providerResult?: ProviderResult;
}) {
const { getCache, setCache, providerResult } = opts || {};
const { apiKey, decryptKey, group = '', model } = providerResult || this.result;
const cacheKey = `${group}--${model}`;
if (!decryptKey) {
@@ -144,11 +149,38 @@ export class AIConfigParser {
}
return secretKey;
}
/**
* 加密
* @param plainText
* @param secretKey
* @returns
*/
encrypt(plainText: string, secretKey: string) {
return encryptAES(plainText, secretKey);
}
/**
* 解密
* @param cipherText
* @param secretKey
* @returns
*/
decrypt(cipherText: string, secretKey: string) {
return decryptAES(cipherText, secretKey);
}
/**
* 获取模型配置
* @returns
*/
getSelectOpts() {
const { models, secretKeys = [] } = this.config;
return models.map((model) => {
const selectOpts = secretKeys.find((m) => m.group === model.group);
return {
...model,
...selectOpts,
};
});
}
}

View File

@@ -0,0 +1,43 @@
import { app } from '@/app.ts';
import { ChatConfigServices } from './services/chat-config-srevices.ts';
import { log } from '@/logger/index.ts';
import { ChatServices } from './services/chat-services.ts';
/**
* 清除缓存
*/
// https://localhost:4000/api/router?path=ai&key=clear-cache
app
.route({
path: 'ai',
key: 'clear-cache',
description: '清除缓存',
middleware: ['auth'],
})
.define(async (ctx) => {
const tokenUser = ctx.state.tokenUser;
const username = tokenUser.username;
const services = new ChatConfigServices(username, username);
await services.clearCache();
log.info('清除缓存成功', { username });
ctx.body = 'success';
})
.addTo(app);
app
.route({
path: 'ai',
key: 'clear-chat-limit',
description: '清除chat使用情况',
middleware: ['auth'],
})
.define(async (ctx) => {
const tokenUser = ctx.state.tokenUser;
const username = tokenUser.username;
const cache = await ChatServices.clearChatLimit(username);
log.debug('清除chat使用情况成功', { username, cache });
ctx.body = {
cache,
};
})
.addTo(app);

View File

@@ -3,6 +3,8 @@ import { ChatServices } from './services/chat-services.ts';
import { ChatConfigServices } from './services/chat-config-srevices.ts';
import { AiChatHistoryModel } from './models/ai-chat-history.ts';
import { UserPermission } from '@kevisual/permission';
import { AIConfigParser } from '@/provider/utils/parse-config.ts';
import { log } from '@/logger/index.ts';
app
.route({
path: 'ai',
@@ -11,10 +13,13 @@ app
})
.define(async (ctx) => {
const data = ctx.query.data || {};
const { id, messages = [], options = {}, title, hook, getFull = false } = data;
let { username, model, group } = ctx.query;
const { id, messages = [], title, type } = data;
const hook = data.data?.hook;
let { username, model, group, getFull = false } = ctx.query;
const tokenUser = ctx.state.tokenUser || {};
const tokenUsername = tokenUser.username;
const options = ctx.query.options || {};
let aiChatHistory: AiChatHistoryModel;
if (id) {
aiChatHistory = await AiChatHistoryModel.findByPk(id);
@@ -58,46 +63,65 @@ app
if (pickMessages.length === 0) {
ctx.throw(400, 'chat messages is empty');
}
const res = await chatServices.chat(pickMessages, options);
if (!aiChatHistory) {
aiChatHistory = await AiChatHistoryModel.create({
username,
model,
group,
title,
type: type || 'keep',
});
if (!title) {
// TODO: 创建标题
}
}
const message = res.choices[0].message;
const newMessage = await chatServices.createNewMessage([...messages, message]);
const usage = chatServices.chatProvider.getChatUsage();
await chatServices.updateChatLimit(usage.total_tokens);
await chatConfigServices.updateUserChatLimit(tokenUsername, usage.total_tokens);
const needUpdateData: any = {
messages: newMessage,
prompt_tokens: aiChatHistory.prompt_tokens + usage.prompt_tokens,
completion_tokens: aiChatHistory.completion_tokens + usage.completion_tokens,
total_tokens: aiChatHistory.total_tokens + usage.total_tokens,
version: aiChatHistory.version + 1,
model: model,
group: group,
username: username,
};
if (hook) {
needUpdateData.data = {
...aiChatHistory.data,
hook,
};
let message;
try {
const res = await chatServices.chat(pickMessages, options);
message = res.choices[0].message;
} catch (error) {
log.error('chat error', {
errorMessage: error.message,
});
ctx.throw(500, error.message);
}
try {
const newMessage = await chatServices.createNewMessage([...messages, message]);
const usage = chatServices.chatProvider.getChatUsage();
await chatServices.updateChatLimit(usage.total_tokens);
await chatConfigServices.updateUserChatLimit(tokenUsername, usage.total_tokens);
const needUpdateData: any = {
messages: newMessage,
prompt_tokens: aiChatHistory.prompt_tokens + usage.prompt_tokens,
completion_tokens: aiChatHistory.completion_tokens + usage.completion_tokens,
total_tokens: aiChatHistory.total_tokens + usage.total_tokens,
version: aiChatHistory.version + 1,
model: model,
group: group,
username: username,
};
if (hook) {
needUpdateData.data = {
...aiChatHistory.data,
hook,
};
}
if (type) {
needUpdateData.type = type;
}
await AiChatHistoryModel.update(needUpdateData, { where: { id: aiChatHistory.id } });
ctx.body = {
message: newMessage[newMessage.length - 1],
updatedAt: aiChatHistory.updatedAt,
version: aiChatHistory.version,
aiChatHistory: getFull || !id ? aiChatHistory : undefined,
};
} catch (error) {
console.error('create new message error', error);
ctx.throw(500, error.message);
}
await AiChatHistoryModel.update(needUpdateData, { where: { id: aiChatHistory.id } });
ctx.body = {
message: newMessage[newMessage.length - 1],
aiChatHistory: getFull || !id ? aiChatHistory : undefined,
};
})
.addTo(app);
@@ -136,50 +160,69 @@ app
path: 'ai',
key: 'get-model-list',
middleware: ['auth'],
description: '获取模型列表',
isDebug: true,
})
.define(async (ctx) => {
const username = ctx.query.username || 'root';
const tokenUser = ctx.state.tokenUser;
const usernames = ctx.query.data?.usernames || [];
const keepSecret = ctx.query.keepSecret || false;
const tokenUsername = tokenUser.username;
const isSameUser = username === tokenUser.username;
const configArray: any[] = [];
const services = new ChatConfigServices(username, tokenUser.username);
const res = await services.getChatConfig(true, ctx.query.token);
configArray.push({
username,
config: res,
});
if (!isSameUser) {
const selfServices = new ChatConfigServices(tokenUser.username, tokenUser.username);
const selfRes = await selfServices.getChatConfig(true, ctx.query.token);
configArray.push({
username: tokenUser.username,
self: true,
config: selfRes,
});
}
for (const username of usernames) {
try {
const services = new ChatConfigServices(username, tokenUser.username);
const res = await services.getChatConfig(true, ctx.query.token);
const aiConfig = services.aiConfig;
const permission = new UserPermission({ permission: aiConfig.permission, owner: username });
const checkPermission = permission.checkPermissionSuccess({ username: tokenUsername, password: '-----------------' });
if (!checkPermission.success) {
// ctx.throw(403, `[${username}] ${checkPermission.message}`);
const res = await services.getChatConfig(services.isOwner && keepSecret, ctx.query.token);
const selectOpts = await services.getSelectOpts(res);
configArray.push({
username,
config: res,
selectOpts,
self: isSameUser,
});
if (!isSameUser) {
const selfServices = new ChatConfigServices(tokenUser.username, tokenUser.username);
const selfRes = await selfServices.getChatConfig(services.isOwner && keepSecret, ctx.query.token);
const selfSelectOpts = await selfServices.getSelectOpts(selfRes);
configArray.push({
username,
config: null,
error: checkPermission.message,
});
} else {
configArray.push({
username,
config: res,
username: tokenUser.username,
self: true,
config: selfRes,
selectOpts: selfSelectOpts,
});
}
for (const username of usernames) {
const services = new ChatConfigServices(username, tokenUser.username);
const res = await services.getChatConfig(services.isOwner && keepSecret, ctx.query.token);
const aiConfig = services.aiConfig;
const permission = new UserPermission({ permission: aiConfig.permission, owner: username });
const checkPermission = permission.checkPermissionSuccess({ username: tokenUsername, password: '-----------------' });
if (!checkPermission.success) {
configArray.push({
username,
config: null,
error: checkPermission.message,
});
} else {
const selectOpts = await services.getSelectOpts(res);
configArray.push({
username,
config: res,
selectOpts,
self: username === tokenUser.username,
});
}
}
ctx.body = { list: configArray };
} catch (error) {
log.error('get model list error', {
username,
errorMessage: error.message,
errorStack: error.stack,
});
ctx.throw(500, error.message);
}
ctx.body = configArray;
})
.addTo(app);

View File

@@ -10,9 +10,11 @@ app
})
.define(async (ctx) => {
const tokenUser = ctx.state.tokenUser;
const type = ctx.query.type || 'keep';
const aiChatList = await AiChatHistoryModel.findAll({
where: {
uid: tokenUser.id,
type,
},
order: [['updatedAt', 'DESC']],
});

View File

@@ -34,6 +34,7 @@ export class AiChatHistoryModel extends Model {
declare completion_tokens: number;
declare version: number;
declare type: string;
declare createdAt: Date;
declare updatedAt: Date;
@@ -87,6 +88,11 @@ AiChatHistoryModel.init(
type: DataTypes.JSONB,
defaultValue: {},
},
type: {
type: DataTypes.STRING,
allowNull: false,
defaultValue: 'keep', // keep 保留 temp 临时
},
version: {
type: DataTypes.INTEGER,
defaultValue: 0,

View File

@@ -1,7 +1,8 @@
import type { AIConfig } from '@/provider/utils/parse-config.ts';
import { AIConfigParser, type AIConfig } from '@/provider/utils/parse-config.ts';
import { redis } from '@/modules/db.ts';
import { CustomError } from '@kevisual/router';
import { queryConfig } from '@/modules/query.ts';
import { log } from '@/logger/index.ts';
export class ChatConfigServices {
cachePrefix = 'ai:chat:config';
// 使用谁的模型
@@ -9,7 +10,7 @@ export class ChatConfigServices {
// 使用者
username: string;
aiConfig?: AIConfig;
isOwner: boolean;
/**
* username 是使用的模型的用户名,使用谁的模型
* @param username
@@ -17,16 +18,17 @@ export class ChatConfigServices {
constructor(owner: string, username: string, token?: string) {
this.owner = owner;
this.username = username;
this.isOwner = owner === username;
}
getKey() {
return `${this.cachePrefix}:${this.owner}`;
}
/**
* 获取chat配置
* @param needClearSecret 是否需要清除secret 默认false
* @param keepSecret 是否需要清除secret 默认 不清除 为true
* @returns
*/
async getChatConfig(needClearSecret = false, token?: string) {
async getChatConfig(keepSecret = true, token?: string) {
const key = this.getKey();
const cache = await redis.get(key);
let modelConfig = null;
@@ -35,7 +37,9 @@ export class ChatConfigServices {
}
if (!modelConfig) {
if (this.owner !== this.username) {
throw new CustomError(`the owner [${this.owner}] config, [${this.username}] not permission to init config, only owner can init config, place connect owner`);
throw new CustomError(
`the owner [${this.owner}] config, [${this.username}] not permission to init config, only owner can init config, place connect owner`,
);
} else {
const res = await queryConfig.getConfigByKey('ai.json', { token });
if (res.code === 200 && res.data?.data) {
@@ -53,14 +57,26 @@ export class ChatConfigServices {
await redis.set(key, JSON.stringify(modelConfig), 'EX', cacheTime);
}
this.aiConfig = modelConfig;
if (needClearSecret) {
if (!keepSecret) {
modelConfig = this.filterApiKey(modelConfig);
}
return modelConfig;
}
async clearCache() {
const key = this.getKey();
await redis.set(key, JSON.stringify({}), 'EX', 1);
}
/**
* 获取模型配置
* @returns
*/
async getSelectOpts(config?: AIConfig) {
const aiConfigParser = new AIConfigParser(config || this.aiConfig);
return aiConfigParser.getSelectOpts();
}
async filterApiKey(chatConfig: AIConfig) {
// 过滤掉secret中的所有apiKey移除掉并返回chatConfig
const { secretKeys, ...rest } = chatConfig;
const { secretKeys = [], ...rest } = chatConfig;
return {
...rest,
secretKeys: secretKeys.map((item) => {
@@ -128,4 +144,9 @@ export class ChatConfigServices {
await redis.set(userCacheKey, JSON.stringify({ token }), 'EX', 60 * 60 * 24 * 30); // 30天
}
}
async clearChatLimit() {
if (this.owner !== 'root') return;
// const userCacheKey = `${this.cachePrefix}:root:chat-limit:${this.username}`;
// await redis.del(userCacheKey);
}
}

View File

@@ -7,6 +7,7 @@ import { pick } from 'lodash-es';
import { ChastHistoryMessage } from '../models/ai-chat-history.ts';
import { nanoid } from '@/utils/uuid.ts';
import dayjs from 'dayjs';
import { log } from '@/logger/index.ts';
export type ChatServicesConfig = {
owner: string;
@@ -78,22 +79,49 @@ export class ChatServices {
const owner = this.owner;
return `${this.cachePrefix}${owner}:${key}`;
}
static chatLimitKey(owner: string, key = 'chat-limit') {
return `ai-chat:model:${owner}:${key}`;
}
static async clearChatLimit(owner: string) {
const key = ChatServices.chatLimitKey(owner);
const cache = await redis.get(key);
if (cache) {
await redis.expire(key, 2);
}
return cache;
}
async getConfig(username: string) {
const services = new ChatConfigServices(this.owner, username);
return services.getChatConfig();
}
async chat(messages: ChatMessage[], options?: ChatMessageOptions) {
async chat(messages: ChatMessage[], options?: ChatMessageOptions, customOptions?: { clearThink?: boolean }) {
const { model, provider, apiKey, baseURL } = this.modelConfig;
const providerManager = await ProviderManager.createProvider({
provider: provider,
model: model,
apiKey: apiKey,
baseURL: baseURL,
});
this.chatProvider = providerManager;
const result = await providerManager.chat(messages, options);
return result;
try {
const providerManager = await ProviderManager.createProvider({
provider: provider,
model: model,
apiKey: apiKey,
baseURL: baseURL,
});
this.chatProvider = providerManager;
const result = await providerManager.chat(messages, options);
const { clearThink = true } = customOptions || {};
if (clearThink) {
result.choices[0].message.content = result.choices[0].message.content.replace(/<think>[\s\S]*?<\/think>/g, '');
}
return result;
} catch (error) {
log.error('chat error', {
errorMessage: error.message,
errorStack: error.stack,
provider,
model,
apiKey,
baseURL,
});
throw error;
}
}
async createTitle(messages: ChastHistoryMessage[]) {
return nanoid();
@@ -135,21 +163,31 @@ export class ChatServices {
const { modelConfig } = this;
const { tokenLimit, dayLimit, group, model } = modelConfig;
const key = this.wrapperKey(`chat-limit`);
const cache = await redis.get(key);
if (cache) {
const cacheData = JSON.parse(cache);
const today = dayjs().format('YYYY-MM-DD');
const current = cacheData.find((item) => item.group === group && item.model === model);
const day = current[today] || 0;
const token = current.token || 0;
if (tokenLimit && token >= tokenLimit) {
throw new CustomError(400, 'token limit exceeded');
}
if (dayLimit && day >= dayLimit) {
throw new CustomError(400, 'day limit exceeded');
try {
const cache = await redis.get(key);
if (cache) {
const cacheData = JSON.parse(cache);
const today = dayjs().format('YYYY-MM-DD');
log.debug('checkCanChat', { cacheData });
let current = cacheData.find((item) => item.group === group && item.model === model);
if (current) {
const day = current[today] || 0;
const token = current.token || 0;
if (tokenLimit && token >= tokenLimit) {
throw new CustomError(400, 'token limit exceeded');
}
if (dayLimit && day >= dayLimit) {
throw new CustomError(400, 'day limit exceeded');
}
}
}
return true;
} catch (error) {
console.error('checkCanChat error', error);
// 如果获取失败则设置一个空的缓存2秒后删除
await redis.set(key, '', 'EX', 2); // 2秒
throw new CustomError(500, 'checkCanChat error, please try again later');
}
return true;
}
/**
* 获取模型的使用情况
@@ -184,19 +222,27 @@ export class ChatServices {
const key = this.wrapperKey(`chat-limit`);
const cache = await redis.get(key);
const today = dayjs().format('YYYY-MM-DD');
if (cache) {
const cacheData = JSON.parse(cache);
const current = cacheData.find((item) => item.group === group && item.model === model);
if (current) {
const day = current[today] || 0;
current[today] = day + 1;
current.token = current.token + token;
try {
if (cache) {
const cacheData = JSON.parse(cache);
const current = cacheData.find((item) => item.group === group && item.model === model);
if (current) {
const day = current[today] || 0;
current[today] = day + 1;
current.token = current.token + token;
} else {
cacheData.push({ group, model, token: token, [today]: 1 });
}
await redis.set(key, JSON.stringify(cacheData), 'EX', 60 * 60 * 24 * 30); // 30天
} else {
cacheData.push({ group, model, token: token, [today]: 1 });
const cacheData = { group, model, token: token, [today]: 1 };
await redis.set(key, JSON.stringify([cacheData]), 'EX', 60 * 60 * 24 * 30); // 30天
}
await redis.set(key, JSON.stringify(cacheData), 'EX', 60 * 60 * 24 * 30); // 30天
} else {
await redis.set(key, JSON.stringify({ group, model, token: token, [today]: 1 }), 'EX', 60 * 60 * 24 * 30); // 30天
} catch (error) {
console.error('updateChatLimit error', error);
// 如果更新失败则设置一个空的缓存2秒后删除
await redis.set(key, '', 'EX', 2); // 2秒
throw new CustomError(500, 'updateChatLimit error, please try again later');
}
}
}

View File

@@ -1,2 +1,3 @@
import './ai-chat/index.ts';
import './ai-chat/list.ts';
import './ai-chat/cache.ts';

View File

@@ -0,0 +1,26 @@
import { ModelScope } from '../../provider/chat-adapter/model-scope.ts';
import { logInfo } from '../../logger/index.ts';
import util from 'util';
import { config } from 'dotenv';
config();
const chat = new ModelScope({
apiKey: process.env.MODEL_SCOPE_API_KEY,
model: 'Qwen/Qwen2.5-Coder-32B-Instruct',
});
// chat.chat([{ role: 'user', content: 'Hello, world! 1 + 1 equals ?' }]);
const chatMessage = [{ role: 'user', content: 'Hello, world! 1 + 1 equals ?' }];
const main = async () => {
const res = await chat.test();
logInfo('test', res);
};
// main();
const mainChat = async () => {
const res = await chat.chat(chatMessage as any);
logInfo('chat', res);
};
mainChat();

View File

@@ -0,0 +1,6 @@
import { ProviderManager } from '../../provider/index.ts';
const providerConfig = { provider: 'ModelScope', model: 'Qwen/Qwen2.5-Coder-32B-Instruct', apiKey: 'a4cc0e94-3633-4374-85a6-06f455e17bea' };
const provider = await ProviderManager.createProvider(providerConfig);
const result = await provider.chat([{ role: 'user', content: '你好' }]);
console.log(result);