This commit is contained in:
2025-12-08 14:29:20 +08:00
parent bcef28cae3
commit 809bf4e373
5 changed files with 249 additions and 2581 deletions

View File

@@ -0,0 +1,14 @@
import { BaseChat, BaseChatOptions } from '../core/chat.ts';
export type KevisualOptions = Partial<BaseChatOptions>;
/**
* Kevisual Chat Adapter
*/
export class Kevisual extends BaseChat {
static BASE_URL = 'https://newapi.kevisual.cn/v1/';
constructor(options: KevisualOptions) {
const baseURL = options.baseURL || Kevisual.BASE_URL;
super({ ...(options as BaseChatOptions), baseURL: baseURL });
}
}

View File

@@ -10,6 +10,7 @@ import { ModelScope } from './chat-adapter/model-scope.ts';
import { BailianChat } from './chat-adapter/dashscope.ts';
import { Zhipu } from './chat-adapter/zhipu.ts';
import { Kimi } from './chat-adapter/kimi.ts';
import { Kevisual } from './chat-adapter/kevisual.ts';
import { ChatMessage } from './core/type.ts';
@@ -25,16 +26,19 @@ export {
Zhipu,
Kimi,
ChatMessage,
Kevisual,
}
export const OllamaProvider = Ollama;
export const SiliconFlowProvider = SiliconFlow;
export const CustomProvider = Custom;
export const VolcesProvider = Volces;
export const DeepSeekProvider = DeepSeek;
export const ModelScopeProvider = ModelScope;
export const BailianProvider = BailianChat;
export const ZhipuProvider = Zhipu;
export const KimiProvider = Kimi;
export class OllamaProvider extends Ollama { }
export class SiliconFlowProvider extends SiliconFlow { }
export class CustomProvider extends Custom { }
export class VolcesProvider extends Volces { }
export class DeepSeekProvider extends DeepSeek { }
export class ModelScopeProvider extends ModelScope { }
export class BailianProvider extends BailianChat { }
export class ZhipuProvider extends Zhipu { }
export class KimiProvider extends Kimi { }
export class KevisualProvider extends Kevisual { }
export const ChatProviderMap = {
Ollama: OllamaProvider,

View File

@@ -1,34 +0,0 @@
import { encoding_for_model, get_encoding } from 'tiktoken';
const MODEL_TO_ENCODING = {
'gpt-4': 'cl100k_base',
'gpt-4-turbo': 'cl100k_base',
'gpt-3.5-turbo': 'cl100k_base',
'text-embedding-ada-002': 'cl100k_base',
'text-davinci-002': 'p50k_base',
'text-davinci-003': 'p50k_base',
} as const;
export function numTokensFromString(text: string, model: keyof typeof MODEL_TO_ENCODING = 'gpt-3.5-turbo'): number {
try {
// 对于特定模型使用专门的编码器
const encoder = encoding_for_model(model);
const tokens = encoder.encode(text);
const tokenCount = tokens.length;
encoder.free(); // 释放编码器
return tokenCount;
} catch (error) {
try {
// 如果模型特定的编码器失败,尝试使用基础编码器
const encoder = get_encoding(MODEL_TO_ENCODING[model]);
const tokens = encoder.encode(text);
const tokenCount = tokens.length;
encoder.free(); // 释放编码器
return tokenCount;
} catch (error) {
// 如果编码失败使用一个粗略的估计平均每个字符0.25个token
return Math.ceil(text.length * 0.25);
}
}
}