generated from tailored/app-template
	init demo
This commit is contained in:
		
							
								
								
									
										2
									
								
								.cursorignore
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								.cursorignore
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,2 @@
 | 
			
		||||
config.json
 | 
			
		||||
.env*
 | 
			
		||||
							
								
								
									
										7
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							@@ -11,4 +11,9 @@ cache-file
 | 
			
		||||
 | 
			
		||||
/apps
 | 
			
		||||
 | 
			
		||||
logs
 | 
			
		||||
logs
 | 
			
		||||
 | 
			
		||||
.env*
 | 
			
		||||
!.env.example
 | 
			
		||||
 | 
			
		||||
config.json
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										14
									
								
								index.html
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								index.html
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,14 @@
 | 
			
		||||
<!DOCTYPE html>
 | 
			
		||||
<html lang="en">
 | 
			
		||||
 | 
			
		||||
<head>
 | 
			
		||||
  <meta charset="UTF-8">
 | 
			
		||||
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
 | 
			
		||||
  <title>Document</title>
 | 
			
		||||
</head>
 | 
			
		||||
 | 
			
		||||
<body>
 | 
			
		||||
  <script type="module" src="/src/page/main.ts"></script>
 | 
			
		||||
</body>
 | 
			
		||||
 | 
			
		||||
</html>
 | 
			
		||||
							
								
								
									
										12
									
								
								package.json
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								package.json
									
									
									
									
									
								
							@@ -23,10 +23,12 @@
 | 
			
		||||
    "@kevisual/mark": "0.0.7",
 | 
			
		||||
    "@kevisual/router": "0.0.10",
 | 
			
		||||
    "cookie": "^1.0.2",
 | 
			
		||||
    "crypto-js": "^4.2.0",
 | 
			
		||||
    "dayjs": "^1.11.13",
 | 
			
		||||
    "formidable": "^3.5.2",
 | 
			
		||||
    "json5": "^2.2.3",
 | 
			
		||||
    "lodash-es": "^4.17.21"
 | 
			
		||||
    "lodash-es": "^4.17.21",
 | 
			
		||||
    "openai": "^4.91.1"
 | 
			
		||||
  },
 | 
			
		||||
  "devDependencies": {
 | 
			
		||||
    "@kevisual/types": "^0.0.6",
 | 
			
		||||
@@ -41,10 +43,14 @@
 | 
			
		||||
    "@types/formidable": "^3.4.5",
 | 
			
		||||
    "@types/lodash-es": "^4.17.12",
 | 
			
		||||
    "@types/node": "^22.14.0",
 | 
			
		||||
    "@vitejs/plugin-basic-ssl": "^2.0.0",
 | 
			
		||||
    "concurrently": "^9.1.2",
 | 
			
		||||
    "cross-env": "^7.0.3",
 | 
			
		||||
    "dotenv": "^16.4.7",
 | 
			
		||||
    "ioredis": "^5.6.0",
 | 
			
		||||
    "jsrepo": "^1.45.3",
 | 
			
		||||
    "nodemon": "^3.1.9",
 | 
			
		||||
    "pino": "^9.6.0",
 | 
			
		||||
    "pm2": "^6.0.5",
 | 
			
		||||
    "rimraf": "^6.0.1",
 | 
			
		||||
    "rollup": "^4.39.0",
 | 
			
		||||
@@ -53,7 +59,9 @@
 | 
			
		||||
    "rollup-plugin-esbuild": "^6.2.1",
 | 
			
		||||
    "sequelize": "^6.37.7",
 | 
			
		||||
    "tape": "^5.9.0",
 | 
			
		||||
    "tiktoken": "^1.0.20",
 | 
			
		||||
    "tsx": "^4.19.3",
 | 
			
		||||
    "typescript": "^5.8.2"
 | 
			
		||||
    "typescript": "^5.8.2",
 | 
			
		||||
    "vite": "^6.2.5"
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										178
									
								
								pnpm-lock.yaml
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										178
									
								
								pnpm-lock.yaml
									
									
									
										generated
									
									
									
								
							@@ -20,6 +20,9 @@ importers:
 | 
			
		||||
      cookie:
 | 
			
		||||
        specifier: ^1.0.2
 | 
			
		||||
        version: 1.0.2
 | 
			
		||||
      crypto-js:
 | 
			
		||||
        specifier: ^4.2.0
 | 
			
		||||
        version: 4.2.0
 | 
			
		||||
      dayjs:
 | 
			
		||||
        specifier: ^1.11.13
 | 
			
		||||
        version: 1.11.13
 | 
			
		||||
@@ -32,6 +35,9 @@ importers:
 | 
			
		||||
      lodash-es:
 | 
			
		||||
        specifier: ^4.17.21
 | 
			
		||||
        version: 4.17.21
 | 
			
		||||
      openai:
 | 
			
		||||
        specifier: ^4.91.1
 | 
			
		||||
        version: 4.91.1(ws@8.18.1)(zod@3.24.2)
 | 
			
		||||
    devDependencies:
 | 
			
		||||
      '@kevisual/types':
 | 
			
		||||
        specifier: ^0.0.6
 | 
			
		||||
@@ -69,18 +75,30 @@ importers:
 | 
			
		||||
      '@types/node':
 | 
			
		||||
        specifier: ^22.14.0
 | 
			
		||||
        version: 22.14.0
 | 
			
		||||
      '@vitejs/plugin-basic-ssl':
 | 
			
		||||
        specifier: ^2.0.0
 | 
			
		||||
        version: 2.0.0(vite@6.2.5(@types/node@22.14.0)(tsx@4.19.3))
 | 
			
		||||
      concurrently:
 | 
			
		||||
        specifier: ^9.1.2
 | 
			
		||||
        version: 9.1.2
 | 
			
		||||
      cross-env:
 | 
			
		||||
        specifier: ^7.0.3
 | 
			
		||||
        version: 7.0.3
 | 
			
		||||
      dotenv:
 | 
			
		||||
        specifier: ^16.4.7
 | 
			
		||||
        version: 16.4.7
 | 
			
		||||
      ioredis:
 | 
			
		||||
        specifier: ^5.6.0
 | 
			
		||||
        version: 5.6.0
 | 
			
		||||
      jsrepo:
 | 
			
		||||
        specifier: ^1.45.3
 | 
			
		||||
        version: 1.45.3(typescript@5.8.2)(ws@8.18.1)(zod@3.24.2)
 | 
			
		||||
      nodemon:
 | 
			
		||||
        specifier: ^3.1.9
 | 
			
		||||
        version: 3.1.9
 | 
			
		||||
      pino:
 | 
			
		||||
        specifier: ^9.6.0
 | 
			
		||||
        version: 9.6.0
 | 
			
		||||
      pm2:
 | 
			
		||||
        specifier: ^6.0.5
 | 
			
		||||
        version: 6.0.5
 | 
			
		||||
@@ -105,12 +123,18 @@ importers:
 | 
			
		||||
      tape:
 | 
			
		||||
        specifier: ^5.9.0
 | 
			
		||||
        version: 5.9.0
 | 
			
		||||
      tiktoken:
 | 
			
		||||
        specifier: ^1.0.20
 | 
			
		||||
        version: 1.0.20
 | 
			
		||||
      tsx:
 | 
			
		||||
        specifier: ^4.19.3
 | 
			
		||||
        version: 4.19.3
 | 
			
		||||
      typescript:
 | 
			
		||||
        specifier: ^5.8.2
 | 
			
		||||
        version: 5.8.2
 | 
			
		||||
      vite:
 | 
			
		||||
        specifier: ^6.2.5
 | 
			
		||||
        version: 6.2.5(@types/node@22.14.0)(tsx@4.19.3)
 | 
			
		||||
 | 
			
		||||
packages:
 | 
			
		||||
 | 
			
		||||
@@ -843,6 +867,12 @@ packages:
 | 
			
		||||
  '@types/validator@13.12.3':
 | 
			
		||||
    resolution: {integrity: sha512-2ipwZ2NydGQJImne+FhNdhgRM37e9lCev99KnqkbFHd94Xn/mErARWI1RSLem1QA19ch5kOhzIZd7e8CA2FI8g==}
 | 
			
		||||
 | 
			
		||||
  '@vitejs/plugin-basic-ssl@2.0.0':
 | 
			
		||||
    resolution: {integrity: sha512-gc9Tjg8bUxBVSTzeWT3Njc0Cl3PakHFKdNfABnZWiUgbxqmHDEn7uECv3fHVylxoYgNzAcmU7ZrILz+BwSo3sA==}
 | 
			
		||||
    engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0}
 | 
			
		||||
    peerDependencies:
 | 
			
		||||
      vite: ^6.0.0
 | 
			
		||||
 | 
			
		||||
  '@vue/compiler-core@3.5.13':
 | 
			
		||||
    resolution: {integrity: sha512-oOdAkwqUfW1WqpwSYJce06wvt6HljgY3fGeM9NcVA1HaYOij3mZG9Rkysn0OHuyUAGMbEbARIpsG+LPVlBJ5/Q==}
 | 
			
		||||
 | 
			
		||||
@@ -980,6 +1010,10 @@ packages:
 | 
			
		||||
  asynckit@0.4.0:
 | 
			
		||||
    resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==}
 | 
			
		||||
 | 
			
		||||
  atomic-sleep@1.0.0:
 | 
			
		||||
    resolution: {integrity: sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ==}
 | 
			
		||||
    engines: {node: '>=8.0.0'}
 | 
			
		||||
 | 
			
		||||
  atomically@2.0.3:
 | 
			
		||||
    resolution: {integrity: sha512-kU6FmrwZ3Lx7/7y3hPS5QnbJfaohcIul5fGqf7ok+4KklIEk9tJ0C2IQPdacSbVUWv6zVHXEBWoWd6NrVMT7Cw==}
 | 
			
		||||
 | 
			
		||||
@@ -1156,6 +1190,9 @@ packages:
 | 
			
		||||
    resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==}
 | 
			
		||||
    engines: {node: '>= 8'}
 | 
			
		||||
 | 
			
		||||
  crypto-js@4.2.0:
 | 
			
		||||
    resolution: {integrity: sha512-KALDyEYgpY+Rlob/iriUtjV6d5Eq+Y191A5g4UqLAi8CyGP9N1+FdVbkc1SxKc2r4YAYqG8JzO2KGL+AizD70Q==}
 | 
			
		||||
 | 
			
		||||
  css-dependency@0.0.3:
 | 
			
		||||
    resolution: {integrity: sha512-jLQuve6jhpjkH3+k2Y8jK3j27Hm3rnIsRW/8oOf9oxFOBI5iu6sndwSv6lj5dNfO9JVP6cNb8Xs+VXhndgtLfQ==}
 | 
			
		||||
 | 
			
		||||
@@ -1427,6 +1464,10 @@ packages:
 | 
			
		||||
  fast-json-patch@3.1.1:
 | 
			
		||||
    resolution: {integrity: sha512-vf6IHUX2SBcA+5/+4883dsIjpBTqmfBjmYiWK1savxQmFk4JfBMLa7ynTYOs1Rolp/T1betJxHiGD3g1Mn8lUQ==}
 | 
			
		||||
 | 
			
		||||
  fast-redact@3.5.0:
 | 
			
		||||
    resolution: {integrity: sha512-dwsoQlS7h9hMeYUq1W++23NDcBLV4KqONnITDV9DjfS3q1SgDGVrBdvvTLUotWtPSD7asWDV9/CmsZPy8Hf70A==}
 | 
			
		||||
    engines: {node: '>=6'}
 | 
			
		||||
 | 
			
		||||
  fast-uri@3.0.6:
 | 
			
		||||
    resolution: {integrity: sha512-Atfo14OibSv5wAp4VWNsFYE1AchQRTv9cBGWET4pZWHzYshFSS9NQI6I57rdKn9croWVMbYFbLhJ+yJvmZIIHw==}
 | 
			
		||||
 | 
			
		||||
@@ -2038,6 +2079,10 @@ packages:
 | 
			
		||||
  ollama@0.5.14:
 | 
			
		||||
    resolution: {integrity: sha512-pvOuEYa2WkkAumxzJP0RdEYHkbZ64AYyyUszXVX7ruLvk5L+EiO2G71da2GqEQ4IAk4j6eLoUbGk5arzFT1wJA==}
 | 
			
		||||
 | 
			
		||||
  on-exit-leak-free@2.1.2:
 | 
			
		||||
    resolution: {integrity: sha512-0eJJY6hXLGf1udHwfNftBqH+g73EU4B504nZeKpz1sYRKafAghwxEJunB2O7rDZkL4PGfsMVnTXZ2EjibbqcsA==}
 | 
			
		||||
    engines: {node: '>=14.0.0'}
 | 
			
		||||
 | 
			
		||||
  once@1.4.0:
 | 
			
		||||
    resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
 | 
			
		||||
 | 
			
		||||
@@ -2168,6 +2213,16 @@ packages:
 | 
			
		||||
    resolution: {integrity: sha512-g0VU+y08pKw5M8EZ2rIGiEBaB8wrQMjYGFfW2QVIfyT8V+fq8YFLkvlz4bz5ljvFDJYNFCWT3PWqcRr2FKO81w==}
 | 
			
		||||
    engines: {node: '>=10'}
 | 
			
		||||
 | 
			
		||||
  pino-abstract-transport@2.0.0:
 | 
			
		||||
    resolution: {integrity: sha512-F63x5tizV6WCh4R6RHyi2Ml+M70DNRXt/+HANowMflpgGFMAym/VKm6G7ZOQRjqN7XbGxK1Lg9t6ZrtzOaivMw==}
 | 
			
		||||
 | 
			
		||||
  pino-std-serializers@7.0.0:
 | 
			
		||||
    resolution: {integrity: sha512-e906FRY0+tV27iq4juKzSYPbUj2do2X2JX4EzSca1631EB2QJQUqGbDuERal7LCtOpxl6x3+nvo9NPZcmjkiFA==}
 | 
			
		||||
 | 
			
		||||
  pino@9.6.0:
 | 
			
		||||
    resolution: {integrity: sha512-i85pKRCt4qMjZ1+L7sy2Ag4t1atFcdbEt76+7iRJn1g2BvsnRMGu9p8pivl9fs63M2kF/A0OacFZhTub+m/qMg==}
 | 
			
		||||
    hasBin: true
 | 
			
		||||
 | 
			
		||||
  pm2-axon-rpc@0.7.1:
 | 
			
		||||
    resolution: {integrity: sha512-FbLvW60w+vEyvMjP/xom2UPhUN/2bVpdtLfKJeYM3gwzYhoTEEChCOICfFzxkxuoEleOlnpjie+n1nue91bDQw==}
 | 
			
		||||
    engines: {node: '>=5'}
 | 
			
		||||
@@ -2230,6 +2285,9 @@ packages:
 | 
			
		||||
    resolution: {integrity: sha512-4yf0QO/sllf/1zbZWYnvWw3NxCQwLXKzIj0G849LSufP15BXKM0rbD2Z3wVnkMfjdn/CB0Dpp444gYAACdsplg==}
 | 
			
		||||
    engines: {node: '>=18'}
 | 
			
		||||
 | 
			
		||||
  process-warning@4.0.1:
 | 
			
		||||
    resolution: {integrity: sha512-3c2LzQ3rY9d0hc1emcsHhfT9Jwz0cChib/QN89oME2R451w5fy3f0afAhERFZAwrbDU43wk12d0ORBpDVME50Q==}
 | 
			
		||||
 | 
			
		||||
  promptly@2.2.0:
 | 
			
		||||
    resolution: {integrity: sha512-aC9j+BZsRSSzEsXBNBwDnAxujdx19HycZoKgRgzWnS8eOHg1asuf9heuLprfbe739zY3IdUQx+Egv6Jn135WHA==}
 | 
			
		||||
 | 
			
		||||
@@ -2246,6 +2304,9 @@ packages:
 | 
			
		||||
  queue-microtask@1.2.3:
 | 
			
		||||
    resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==}
 | 
			
		||||
 | 
			
		||||
  quick-format-unescaped@4.0.4:
 | 
			
		||||
    resolution: {integrity: sha512-tYC1Q1hgyRuHgloV/YXs2w15unPVh8qfu/qCTfhTYamaw7fyhumKa2yGpdSo87vY32rIclj+4fWYQXUMs9EHvg==}
 | 
			
		||||
 | 
			
		||||
  read@1.0.7:
 | 
			
		||||
    resolution: {integrity: sha512-rSOKNYUmaxy0om1BNjMN4ezNT6VKK+2xF4GBhc81mkH7L60i6dp8qPYrkndNLT3QPphoII3maL9PVC9XmhHwVQ==}
 | 
			
		||||
    engines: {node: '>=0.8'}
 | 
			
		||||
@@ -2254,6 +2315,10 @@ packages:
 | 
			
		||||
    resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==}
 | 
			
		||||
    engines: {node: '>=8.10.0'}
 | 
			
		||||
 | 
			
		||||
  real-require@0.2.0:
 | 
			
		||||
    resolution: {integrity: sha512-57frrGM/OCTLqLOAh0mhVA9VBMHd+9U7Zb2THMGdBUoZVOtGbJzjxsYGDJ3A9AYYCP4hn6y1TVbaOfzWtm5GFg==}
 | 
			
		||||
    engines: {node: '>= 12.13.0'}
 | 
			
		||||
 | 
			
		||||
  redis-errors@1.2.0:
 | 
			
		||||
    resolution: {integrity: sha512-1qny3OExCf0UvUV/5wpYKf2YwPcOqXzkwKKSmKHiE6ZMQs5heeE/c8eXK+PNllPvmjgAbfnsbpkGZWy8cBpn9w==}
 | 
			
		||||
    engines: {node: '>=4'}
 | 
			
		||||
@@ -2360,6 +2425,10 @@ packages:
 | 
			
		||||
    resolution: {integrity: sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==}
 | 
			
		||||
    engines: {node: '>= 0.4'}
 | 
			
		||||
 | 
			
		||||
  safe-stable-stringify@2.5.0:
 | 
			
		||||
    resolution: {integrity: sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==}
 | 
			
		||||
    engines: {node: '>=10'}
 | 
			
		||||
 | 
			
		||||
  safer-buffer@2.1.2:
 | 
			
		||||
    resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==}
 | 
			
		||||
 | 
			
		||||
@@ -2501,6 +2570,9 @@ packages:
 | 
			
		||||
    resolution: {integrity: sha512-D3YaD0aRxR3mEcqnidIs7ReYJFVzWdd6fXJYUM8ixcQcJRGTka/b3saV0KflYhyVJXKhb947GndU35SxYNResQ==}
 | 
			
		||||
    engines: {node: '>= 10.0.0', npm: '>= 3.0.0'}
 | 
			
		||||
 | 
			
		||||
  sonic-boom@4.2.0:
 | 
			
		||||
    resolution: {integrity: sha512-INb7TM37/mAcsGmc9hyyI6+QR3rR1zVRu36B0NeGXKnOOLiZOfER5SA+N7X7k3yUYRzLWafduTDvJAfDswwEww==}
 | 
			
		||||
 | 
			
		||||
  source-map-js@1.2.1:
 | 
			
		||||
    resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==}
 | 
			
		||||
    engines: {node: '>=0.10.0'}
 | 
			
		||||
@@ -2602,6 +2674,12 @@ packages:
 | 
			
		||||
    resolution: {integrity: sha512-czbGgxSVwRlbB3Ly/aqQrNwrDAzKHDW/kVXegp4hSFmR2c8qqm3hCgZbUy1+3QAQFGhPDG7J56UsV1uNilBFCA==}
 | 
			
		||||
    hasBin: true
 | 
			
		||||
 | 
			
		||||
  thread-stream@3.1.0:
 | 
			
		||||
    resolution: {integrity: sha512-OqyPZ9u96VohAyMfJykzmivOrY2wfMSf3C5TtFJVgN+Hm6aj+voFhlK+kZEIv2FBh1X6Xp3DlnCOfEQ3B2J86A==}
 | 
			
		||||
 | 
			
		||||
  tiktoken@1.0.20:
 | 
			
		||||
    resolution: {integrity: sha512-zVIpXp84kth/Ni2me1uYlJgl2RZ2EjxwDaWLeDY/s6fZiyO9n1QoTOM5P7ZSYfToPvAvwYNMbg5LETVYVKyzfQ==}
 | 
			
		||||
 | 
			
		||||
  to-regex-range@5.0.1:
 | 
			
		||||
    resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==}
 | 
			
		||||
    engines: {node: '>=8.0'}
 | 
			
		||||
@@ -2726,6 +2804,46 @@ packages:
 | 
			
		||||
    resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==}
 | 
			
		||||
    engines: {node: '>= 0.8'}
 | 
			
		||||
 | 
			
		||||
  vite@6.2.5:
 | 
			
		||||
    resolution: {integrity: sha512-j023J/hCAa4pRIUH6J9HemwYfjB5llR2Ps0CWeikOtdR8+pAURAk0DoJC5/mm9kd+UgdnIy7d6HE4EAvlYhPhA==}
 | 
			
		||||
    engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0}
 | 
			
		||||
    hasBin: true
 | 
			
		||||
    peerDependencies:
 | 
			
		||||
      '@types/node': ^18.0.0 || ^20.0.0 || >=22.0.0
 | 
			
		||||
      jiti: '>=1.21.0'
 | 
			
		||||
      less: '*'
 | 
			
		||||
      lightningcss: ^1.21.0
 | 
			
		||||
      sass: '*'
 | 
			
		||||
      sass-embedded: '*'
 | 
			
		||||
      stylus: '*'
 | 
			
		||||
      sugarss: '*'
 | 
			
		||||
      terser: ^5.16.0
 | 
			
		||||
      tsx: ^4.8.1
 | 
			
		||||
      yaml: ^2.4.2
 | 
			
		||||
    peerDependenciesMeta:
 | 
			
		||||
      '@types/node':
 | 
			
		||||
        optional: true
 | 
			
		||||
      jiti:
 | 
			
		||||
        optional: true
 | 
			
		||||
      less:
 | 
			
		||||
        optional: true
 | 
			
		||||
      lightningcss:
 | 
			
		||||
        optional: true
 | 
			
		||||
      sass:
 | 
			
		||||
        optional: true
 | 
			
		||||
      sass-embedded:
 | 
			
		||||
        optional: true
 | 
			
		||||
      stylus:
 | 
			
		||||
        optional: true
 | 
			
		||||
      sugarss:
 | 
			
		||||
        optional: true
 | 
			
		||||
      terser:
 | 
			
		||||
        optional: true
 | 
			
		||||
      tsx:
 | 
			
		||||
        optional: true
 | 
			
		||||
      yaml:
 | 
			
		||||
        optional: true
 | 
			
		||||
 | 
			
		||||
  vizion@2.2.1:
 | 
			
		||||
    resolution: {integrity: sha512-sfAcO2yeSU0CSPFI/DmZp3FsFE9T+8913nv1xWBOyzODv13fwkn6Vl7HqxGpkr9F608M+8SuFId3s+BlZqfXww==}
 | 
			
		||||
    engines: {node: '>=4.0'}
 | 
			
		||||
@@ -3597,6 +3715,10 @@ snapshots:
 | 
			
		||||
 | 
			
		||||
  '@types/validator@13.12.3': {}
 | 
			
		||||
 | 
			
		||||
  '@vitejs/plugin-basic-ssl@2.0.0(vite@6.2.5(@types/node@22.14.0)(tsx@4.19.3))':
 | 
			
		||||
    dependencies:
 | 
			
		||||
      vite: 6.2.5(@types/node@22.14.0)(tsx@4.19.3)
 | 
			
		||||
 | 
			
		||||
  '@vue/compiler-core@3.5.13':
 | 
			
		||||
    dependencies:
 | 
			
		||||
      '@babel/parser': 7.27.0
 | 
			
		||||
@@ -3751,6 +3873,8 @@ snapshots:
 | 
			
		||||
 | 
			
		||||
  asynckit@0.4.0: {}
 | 
			
		||||
 | 
			
		||||
  atomic-sleep@1.0.0: {}
 | 
			
		||||
 | 
			
		||||
  atomically@2.0.3:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      stubborn-fs: 1.2.5
 | 
			
		||||
@@ -3930,6 +4054,8 @@ snapshots:
 | 
			
		||||
      shebang-command: 2.0.0
 | 
			
		||||
      which: 2.0.2
 | 
			
		||||
 | 
			
		||||
  crypto-js@4.2.0: {}
 | 
			
		||||
 | 
			
		||||
  css-dependency@0.0.3:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      ansi-regex: 6.1.0
 | 
			
		||||
@@ -4286,6 +4412,8 @@ snapshots:
 | 
			
		||||
 | 
			
		||||
  fast-json-patch@3.1.1: {}
 | 
			
		||||
 | 
			
		||||
  fast-redact@3.5.0: {}
 | 
			
		||||
 | 
			
		||||
  fast-uri@3.0.6: {}
 | 
			
		||||
 | 
			
		||||
  fastq@1.19.1:
 | 
			
		||||
@@ -4944,6 +5072,8 @@ snapshots:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      whatwg-fetch: 3.6.20
 | 
			
		||||
 | 
			
		||||
  on-exit-leak-free@2.1.2: {}
 | 
			
		||||
 | 
			
		||||
  once@1.4.0:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      wrappy: 1.0.2
 | 
			
		||||
@@ -5083,6 +5213,26 @@ snapshots:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      safe-buffer: 5.2.1
 | 
			
		||||
 | 
			
		||||
  pino-abstract-transport@2.0.0:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      split2: 4.2.0
 | 
			
		||||
 | 
			
		||||
  pino-std-serializers@7.0.0: {}
 | 
			
		||||
 | 
			
		||||
  pino@9.6.0:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      atomic-sleep: 1.0.0
 | 
			
		||||
      fast-redact: 3.5.0
 | 
			
		||||
      on-exit-leak-free: 2.1.2
 | 
			
		||||
      pino-abstract-transport: 2.0.0
 | 
			
		||||
      pino-std-serializers: 7.0.0
 | 
			
		||||
      process-warning: 4.0.1
 | 
			
		||||
      quick-format-unescaped: 4.0.4
 | 
			
		||||
      real-require: 0.2.0
 | 
			
		||||
      safe-stable-stringify: 2.5.0
 | 
			
		||||
      sonic-boom: 4.2.0
 | 
			
		||||
      thread-stream: 3.1.0
 | 
			
		||||
 | 
			
		||||
  pm2-axon-rpc@0.7.1:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      debug: 4.4.0(supports-color@5.5.0)
 | 
			
		||||
@@ -5185,6 +5335,8 @@ snapshots:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      parse-ms: 4.0.0
 | 
			
		||||
 | 
			
		||||
  process-warning@4.0.1: {}
 | 
			
		||||
 | 
			
		||||
  promptly@2.2.0:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      read: 1.0.7
 | 
			
		||||
@@ -5208,6 +5360,8 @@ snapshots:
 | 
			
		||||
 | 
			
		||||
  queue-microtask@1.2.3: {}
 | 
			
		||||
 | 
			
		||||
  quick-format-unescaped@4.0.4: {}
 | 
			
		||||
 | 
			
		||||
  read@1.0.7:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      mute-stream: 0.0.8
 | 
			
		||||
@@ -5216,6 +5370,8 @@ snapshots:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      picomatch: 2.3.1
 | 
			
		||||
 | 
			
		||||
  real-require@0.2.0: {}
 | 
			
		||||
 | 
			
		||||
  redis-errors@1.2.0: {}
 | 
			
		||||
 | 
			
		||||
  redis-parser@3.0.0:
 | 
			
		||||
@@ -5371,6 +5527,8 @@ snapshots:
 | 
			
		||||
      es-errors: 1.3.0
 | 
			
		||||
      is-regex: 1.2.1
 | 
			
		||||
 | 
			
		||||
  safe-stable-stringify@2.5.0: {}
 | 
			
		||||
 | 
			
		||||
  safer-buffer@2.1.2: {}
 | 
			
		||||
 | 
			
		||||
  sax@1.4.1: {}
 | 
			
		||||
@@ -5528,6 +5686,10 @@ snapshots:
 | 
			
		||||
      ip-address: 9.0.5
 | 
			
		||||
      smart-buffer: 4.2.0
 | 
			
		||||
 | 
			
		||||
  sonic-boom@4.2.0:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      atomic-sleep: 1.0.0
 | 
			
		||||
 | 
			
		||||
  source-map-js@1.2.1: {}
 | 
			
		||||
 | 
			
		||||
  source-map-support@0.5.21:
 | 
			
		||||
@@ -5664,6 +5826,12 @@ snapshots:
 | 
			
		||||
      resolve: 2.0.0-next.5
 | 
			
		||||
      string.prototype.trim: 1.2.10
 | 
			
		||||
 | 
			
		||||
  thread-stream@3.1.0:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      real-require: 0.2.0
 | 
			
		||||
 | 
			
		||||
  tiktoken@1.0.20: {}
 | 
			
		||||
 | 
			
		||||
  to-regex-range@5.0.1:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      is-number: 7.0.0
 | 
			
		||||
@@ -5773,6 +5941,16 @@ snapshots:
 | 
			
		||||
 | 
			
		||||
  vary@1.1.2: {}
 | 
			
		||||
 | 
			
		||||
  vite@6.2.5(@types/node@22.14.0)(tsx@4.19.3):
 | 
			
		||||
    dependencies:
 | 
			
		||||
      esbuild: 0.25.2
 | 
			
		||||
      postcss: 8.5.3
 | 
			
		||||
      rollup: 4.39.0
 | 
			
		||||
    optionalDependencies:
 | 
			
		||||
      '@types/node': 22.14.0
 | 
			
		||||
      fsevents: 2.3.3
 | 
			
		||||
      tsx: 4.19.3
 | 
			
		||||
 | 
			
		||||
  vizion@2.2.1:
 | 
			
		||||
    dependencies:
 | 
			
		||||
      async: 2.6.4
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,5 @@
 | 
			
		||||
import { app } from './app.ts';
 | 
			
		||||
import { useConfig } from '@kevisual/use-config';
 | 
			
		||||
import { useConfig } from '@kevisual/use-config/env';
 | 
			
		||||
 | 
			
		||||
app
 | 
			
		||||
  .route({
 | 
			
		||||
@@ -13,4 +13,4 @@ app
 | 
			
		||||
 | 
			
		||||
const config = useConfig();
 | 
			
		||||
 | 
			
		||||
console.log('run demo: http://localhost:' + config.port + '/api/router?path=demo&key=demo');
 | 
			
		||||
console.log('run demo: http://localhost:' + config.PORT + '/api/router?path=demo&key=demo');
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										15
									
								
								src/dev.ts
									
									
									
									
									
								
							
							
						
						
									
										15
									
								
								src/dev.ts
									
									
									
									
									
								
							@@ -1,8 +1,15 @@
 | 
			
		||||
import { useConfig } from '@kevisual/use-config';
 | 
			
		||||
import { app } from './index.ts';
 | 
			
		||||
import { useConfig } from '@kevisual/use-config/env';
 | 
			
		||||
import { useContextKey } from '@kevisual/use-config/context';
 | 
			
		||||
import { Redis } from 'ioredis';
 | 
			
		||||
export const redis = useContextKey('redis', () => {
 | 
			
		||||
  return new Redis();
 | 
			
		||||
});
 | 
			
		||||
import { app } from './index.ts'; // 开发环境
 | 
			
		||||
 | 
			
		||||
const config = useConfig();
 | 
			
		||||
 | 
			
		||||
app.listen(config.port, () => {
 | 
			
		||||
  console.log(`server is running at http://localhost:${config.port}`);
 | 
			
		||||
const port = config.PORT || 6666;
 | 
			
		||||
 | 
			
		||||
app.listen(port, () => {
 | 
			
		||||
  console.log(`server is running at http://localhost:${port}`);
 | 
			
		||||
});
 | 
			
		||||
 
 | 
			
		||||
@@ -1,10 +1,10 @@
 | 
			
		||||
// 单应用实例启动
 | 
			
		||||
 | 
			
		||||
import { useConfig } from '@kevisual/use-config';
 | 
			
		||||
import { useConfig } from '@kevisual/use-config/env';
 | 
			
		||||
import { app } from './index.ts';
 | 
			
		||||
 | 
			
		||||
const config = useConfig();
 | 
			
		||||
 | 
			
		||||
app.listen(config.port, () => {
 | 
			
		||||
app.listen(config.PORT, () => {
 | 
			
		||||
  console.log(`server is running at http://localhost:${config.port}`);
 | 
			
		||||
});
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										7
									
								
								src/modules/chat-config.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								src/modules/chat-config.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,7 @@
 | 
			
		||||
import fs from 'fs';
 | 
			
		||||
import path from 'path';
 | 
			
		||||
 | 
			
		||||
export const getChatConfig = () => {
 | 
			
		||||
  const config = fs.readFileSync(path.resolve(process.cwd(), 'config.json'), 'utf-8');
 | 
			
		||||
  return JSON.parse(config);
 | 
			
		||||
};
 | 
			
		||||
							
								
								
									
										21
									
								
								src/modules/config.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								src/modules/config.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,21 @@
 | 
			
		||||
import { useConfig } from '@kevisual/use-config/env';
 | 
			
		||||
export const envConfig = useConfig() as any;
 | 
			
		||||
type ConfigType = {
 | 
			
		||||
  /**
 | 
			
		||||
   * 主机 http://localhost:3000
 | 
			
		||||
   */
 | 
			
		||||
  host: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 路径 /api
 | 
			
		||||
   */
 | 
			
		||||
  path: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 端口
 | 
			
		||||
   */
 | 
			
		||||
  port: number;
 | 
			
		||||
};
 | 
			
		||||
export const config: ConfigType = {
 | 
			
		||||
  host: envConfig.API_HOST || 'http://localhost:4005',
 | 
			
		||||
  path: envConfig.API_PATH || '/api/router',
 | 
			
		||||
  port: envConfig.PORT || 6666,
 | 
			
		||||
};
 | 
			
		||||
							
								
								
									
										3
									
								
								src/modules/db.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								src/modules/db.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,3 @@
 | 
			
		||||
import { useContextKey } from '@kevisual/use-config/context';
 | 
			
		||||
 | 
			
		||||
export const redis = useContextKey('redis');
 | 
			
		||||
@@ -1,9 +0,0 @@
 | 
			
		||||
import { Mark, markModelInit } from '@kevisual/mark';
 | 
			
		||||
 | 
			
		||||
export { Mark, markModelInit };
 | 
			
		||||
 | 
			
		||||
export const init = () => {
 | 
			
		||||
  markModelInit({
 | 
			
		||||
    tableName: '',
 | 
			
		||||
  });
 | 
			
		||||
};
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
export { sequelize, redis } from '@kevisual/code-center-module';
 | 
			
		||||
@@ -1,6 +1,5 @@
 | 
			
		||||
import { sequelize, User, UserInit, Org, OrgInit } from '@kevisual/code-center-module';
 | 
			
		||||
 | 
			
		||||
export { sequelize, User, UserInit, Org, OrgInit };
 | 
			
		||||
import { User, UserInit, Org, OrgInit } from '@kevisual/code-center-module/models';
 | 
			
		||||
export { User, Org };
 | 
			
		||||
 | 
			
		||||
export const init = () => {
 | 
			
		||||
  UserInit();
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										37
									
								
								src/page/main.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								src/page/main.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,37 @@
 | 
			
		||||
import { BaseChat } from '../provider/index.ts';
 | 
			
		||||
import { Knowledge } from '../provider/knowledge/knowledge-base.ts';
 | 
			
		||||
const chat = new BaseChat({
 | 
			
		||||
  baseURL: 'https://ollama.xiongxiao.me/v1',
 | 
			
		||||
  apiKey: '',
 | 
			
		||||
  model: 'qwq:latest',
 | 
			
		||||
  stream: false,
 | 
			
		||||
  isBrowser: true,
 | 
			
		||||
});
 | 
			
		||||
 | 
			
		||||
// chat.chat([{ role: 'user', content: 'Hello, world!' }]);
 | 
			
		||||
 | 
			
		||||
const main = async () => {
 | 
			
		||||
  const res = await chat.test();
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// main();
 | 
			
		||||
// @ts-ignore
 | 
			
		||||
window.main = main;
 | 
			
		||||
 | 
			
		||||
const knowledge = new Knowledge({
 | 
			
		||||
  embeddingModel: 'bge-m3:latest',
 | 
			
		||||
  baseURL: 'https://ollama.xiongxiao.me/v1',
 | 
			
		||||
  model: 'qwq:latest',
 | 
			
		||||
  apiKey: '',
 | 
			
		||||
  isBrowser: true,
 | 
			
		||||
});
 | 
			
		||||
 | 
			
		||||
const createEmbedding = async () => {
 | 
			
		||||
  const res = await knowledge.generateEmbeddingCore('Hello, world! this is a test');
 | 
			
		||||
  console.log('res', res.data[0].embedding.length);
 | 
			
		||||
  console.log(res);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// createEmbedding();
 | 
			
		||||
// @ts-ignore
 | 
			
		||||
window.createEmbedding = createEmbedding;
 | 
			
		||||
							
								
								
									
										12
									
								
								src/provider/chat-adapter/custom.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								src/provider/chat-adapter/custom.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,12 @@
 | 
			
		||||
import { BaseChat, BaseChatOptions } from '../core/chat.ts';
 | 
			
		||||
 | 
			
		||||
export type OllamaOptions = BaseChatOptions;
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * 自定义模型
 | 
			
		||||
 */
 | 
			
		||||
export class Custom extends BaseChat {
 | 
			
		||||
  constructor(options: OllamaOptions) {
 | 
			
		||||
    super(options);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										8
									
								
								src/provider/chat-adapter/deepseek.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								src/provider/chat-adapter/deepseek.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,8 @@
 | 
			
		||||
import { BaseChat, BaseChatOptions } from '../core/chat.ts';
 | 
			
		||||
 | 
			
		||||
export type DeepSeekOptions = Partial<BaseChatOptions>;
 | 
			
		||||
export class DeepSeek extends BaseChat {
 | 
			
		||||
  constructor(options: DeepSeekOptions) {
 | 
			
		||||
    super({ baseURL: 'https://api.deepseek.com/v1/', ...options } as any);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										9
									
								
								src/provider/chat-adapter/model-scope.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								src/provider/chat-adapter/model-scope.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,9 @@
 | 
			
		||||
// https://api-inference.modelscope.cn/v1/
 | 
			
		||||
import { BaseChat, BaseChatOptions } from '../core/chat.ts';
 | 
			
		||||
 | 
			
		||||
export type ModelScopeOptions = Partial<BaseChatOptions>;
 | 
			
		||||
export class ModelScope extends BaseChat {
 | 
			
		||||
  constructor(options: ModelScopeOptions) {
 | 
			
		||||
    super({ baseURL: 'https://api-inference.modelscope.cn/v1/', ...options } as any);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										45
									
								
								src/provider/chat-adapter/ollama.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								src/provider/chat-adapter/ollama.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,45 @@
 | 
			
		||||
import { BaseChat, BaseChatOptions } from '../core/index.ts';
 | 
			
		||||
import type { ChatMessage, ChatMessageOptions } from '../core/index.ts';
 | 
			
		||||
 | 
			
		||||
export type OllamaOptions = Partial<BaseChatOptions>;
 | 
			
		||||
 | 
			
		||||
type OllamaModel = {
 | 
			
		||||
  name: string;
 | 
			
		||||
  model: string;
 | 
			
		||||
  modified_at: string;
 | 
			
		||||
 | 
			
		||||
  size: number;
 | 
			
		||||
  digest: string;
 | 
			
		||||
  details: {
 | 
			
		||||
    parent_model: string;
 | 
			
		||||
    format: string; // example: gguf
 | 
			
		||||
    family: string; // example qwen
 | 
			
		||||
    families: string[];
 | 
			
		||||
    parameter_size: string;
 | 
			
		||||
    quantization_level: string; // example: Q4_K_M Q4_0
 | 
			
		||||
  };
 | 
			
		||||
};
 | 
			
		||||
export class Ollama extends BaseChat {
 | 
			
		||||
  constructor(options: OllamaOptions) {
 | 
			
		||||
    super({ baseURL: 'http://localhost:11434/v1', ...(options as BaseChatOptions) });
 | 
			
		||||
  }
 | 
			
		||||
  async chat(messages: ChatMessage[], options?: ChatMessageOptions) {
 | 
			
		||||
    const res = await super.chat(messages, options);
 | 
			
		||||
    console.log('thunk', this.getChatUsage());
 | 
			
		||||
    return res;
 | 
			
		||||
  }
 | 
			
		||||
  /**
 | 
			
		||||
   * 获取模型列表
 | 
			
		||||
   * @returns
 | 
			
		||||
   */
 | 
			
		||||
  async listModels(): Promise<{ models: OllamaModel[] }> {
 | 
			
		||||
    const _url = new URL(this.baseURL);
 | 
			
		||||
    const tagsURL = new URL('/api/tags', _url);
 | 
			
		||||
    return this.openai.get(tagsURL.toString());
 | 
			
		||||
  }
 | 
			
		||||
  async listRunModels(): Promise<{ models: OllamaModel[] }> {
 | 
			
		||||
    const _url = new URL(this.baseURL);
 | 
			
		||||
    const tagsURL = new URL('/api/ps', _url);
 | 
			
		||||
    return this.openai.get(tagsURL.toString());
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										37
									
								
								src/provider/chat-adapter/siliconflow.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								src/provider/chat-adapter/siliconflow.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,37 @@
 | 
			
		||||
import { BaseChat, BaseChatOptions } from '../core/chat.ts';
 | 
			
		||||
import { OpenAI } from 'openai';
 | 
			
		||||
 | 
			
		||||
type SiliconFlowOptions = Partial<BaseChatOptions>;
 | 
			
		||||
 | 
			
		||||
type SiliconFlowUsageData = {
 | 
			
		||||
  id: string;
 | 
			
		||||
  name: string;
 | 
			
		||||
  image: string;
 | 
			
		||||
  email: string;
 | 
			
		||||
  isAdmin: boolean;
 | 
			
		||||
  balance: string;
 | 
			
		||||
  status: 'normal' | 'suspended' | 'expired' | string; // 状态
 | 
			
		||||
  introduce: string;
 | 
			
		||||
  role: string;
 | 
			
		||||
  chargeBalance: string;
 | 
			
		||||
  totalBalance: string;
 | 
			
		||||
  category: string;
 | 
			
		||||
};
 | 
			
		||||
type SiliconFlowUsageResponse = {
 | 
			
		||||
  code: number;
 | 
			
		||||
  message: string;
 | 
			
		||||
  status: boolean;
 | 
			
		||||
  data: SiliconFlowUsageData;
 | 
			
		||||
};
 | 
			
		||||
export class SiliconFlow extends BaseChat {
 | 
			
		||||
  constructor(options: SiliconFlowOptions) {
 | 
			
		||||
    super({ baseURL: 'https://api.siliconflow.com/v1', ...(options as BaseChatOptions) });
 | 
			
		||||
  }
 | 
			
		||||
  async getUsageInfo(): Promise<SiliconFlowUsageResponse> {
 | 
			
		||||
    return this.openai.get('/user/info');
 | 
			
		||||
  }
 | 
			
		||||
  async chat(messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], options?: Partial<OpenAI.Chat.Completions.ChatCompletionCreateParams>) {
 | 
			
		||||
    const res = await super.chat(messages, options);
 | 
			
		||||
    return res;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										8
									
								
								src/provider/chat-adapter/volces.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								src/provider/chat-adapter/volces.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,8 @@
 | 
			
		||||
import { BaseChat, BaseChatOptions } from '../core/chat.ts';
 | 
			
		||||
 | 
			
		||||
export type VolcesOptions = Partial<BaseChatOptions>;
 | 
			
		||||
export class Volces extends BaseChat {
 | 
			
		||||
  constructor(options: VolcesOptions) {
 | 
			
		||||
    super({ baseURL: 'https://ark.cn-beijing.volces.com/api/v3/', ...options } as any);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										110
									
								
								src/provider/core/chat.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										110
									
								
								src/provider/core/chat.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,110 @@
 | 
			
		||||
import { OpenAI } from 'openai';
 | 
			
		||||
import type { BaseChatInterface, ChatMessageComplete, ChatMessage, ChatMessageOptions, BaseChatUsageInterface } from './type.ts';
 | 
			
		||||
 | 
			
		||||
export type BaseChatOptions<T = Record<string, any>> = {
 | 
			
		||||
  /**
 | 
			
		||||
   * 默认baseURL
 | 
			
		||||
   */
 | 
			
		||||
  baseURL: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 默认模型
 | 
			
		||||
   */
 | 
			
		||||
  model: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 默认apiKey
 | 
			
		||||
   */
 | 
			
		||||
  apiKey: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 是否在浏览器中使用
 | 
			
		||||
   */
 | 
			
		||||
  isBrowser?: boolean;
 | 
			
		||||
  /**
 | 
			
		||||
   * 是否流式输出, 默认 false
 | 
			
		||||
   */
 | 
			
		||||
  stream?: boolean;
 | 
			
		||||
} & T;
 | 
			
		||||
 | 
			
		||||
export class BaseChat implements BaseChatInterface, BaseChatUsageInterface {
 | 
			
		||||
  /**
 | 
			
		||||
   * 默认baseURL
 | 
			
		||||
   */
 | 
			
		||||
  baseURL: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 默认模型
 | 
			
		||||
   */
 | 
			
		||||
  model: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 默认apiKey
 | 
			
		||||
   */
 | 
			
		||||
  apiKey: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 是否在浏览器中使用
 | 
			
		||||
   */
 | 
			
		||||
  isBrowser: boolean;
 | 
			
		||||
  /**
 | 
			
		||||
   * openai实例
 | 
			
		||||
   */
 | 
			
		||||
  openai: OpenAI;
 | 
			
		||||
 | 
			
		||||
  prompt_tokens: number;
 | 
			
		||||
  total_tokens: number;
 | 
			
		||||
  completion_tokens: number;
 | 
			
		||||
 | 
			
		||||
  constructor(options: BaseChatOptions) {
 | 
			
		||||
    this.baseURL = options.baseURL;
 | 
			
		||||
    this.model = options.model;
 | 
			
		||||
    this.apiKey = options.apiKey;
 | 
			
		||||
    this.isBrowser = options.isBrowser ?? false;
 | 
			
		||||
    this.openai = new OpenAI({
 | 
			
		||||
      apiKey: this.apiKey,
 | 
			
		||||
      baseURL: this.baseURL,
 | 
			
		||||
      dangerouslyAllowBrowser: this.isBrowser,
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
  /**
 | 
			
		||||
   * 聊天
 | 
			
		||||
   */
 | 
			
		||||
  async chat(messages: ChatMessage[], options?: ChatMessageOptions): Promise<ChatMessageComplete> {
 | 
			
		||||
    const createParams: OpenAI.Chat.Completions.ChatCompletionCreateParams = {
 | 
			
		||||
      model: this.model,
 | 
			
		||||
      messages,
 | 
			
		||||
      ...options,
 | 
			
		||||
      stream: false,
 | 
			
		||||
    };
 | 
			
		||||
    const res = (await this.openai.chat.completions.create(createParams)) as ChatMessageComplete;
 | 
			
		||||
    this.prompt_tokens = res.usage?.prompt_tokens ?? 0;
 | 
			
		||||
    this.total_tokens = res.usage?.total_tokens ?? 0;
 | 
			
		||||
    this.completion_tokens = res.usage?.completion_tokens ?? 0;
 | 
			
		||||
    return res;
 | 
			
		||||
  }
 | 
			
		||||
  async chatStream(messages: ChatMessage[], options?: ChatMessageOptions) {
 | 
			
		||||
    const createParams: OpenAI.Chat.Completions.ChatCompletionCreateParams = {
 | 
			
		||||
      model: this.model,
 | 
			
		||||
      messages,
 | 
			
		||||
      ...options,
 | 
			
		||||
      stream: true,
 | 
			
		||||
    };
 | 
			
		||||
    if (createParams.response_format) {
 | 
			
		||||
      throw new Error('response_format is not supported in stream mode');
 | 
			
		||||
    }
 | 
			
		||||
    return this.openai.chat.completions.create(createParams) as any;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * 测试
 | 
			
		||||
   */
 | 
			
		||||
  test() {
 | 
			
		||||
    return this.chat([{ role: 'user', content: 'Hello, world!' }]);
 | 
			
		||||
  }
 | 
			
		||||
  /**
 | 
			
		||||
   * 获取聊天使用情况
 | 
			
		||||
   * @returns
 | 
			
		||||
   */
 | 
			
		||||
  getChatUsage() {
 | 
			
		||||
    return {
 | 
			
		||||
      prompt_tokens: this.prompt_tokens,
 | 
			
		||||
      total_tokens: this.total_tokens,
 | 
			
		||||
      completion_tokens: this.completion_tokens,
 | 
			
		||||
    };
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										2
									
								
								src/provider/core/index.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								src/provider/core/index.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,2 @@
 | 
			
		||||
export * from './chat.ts';
 | 
			
		||||
export * from './type.ts';
 | 
			
		||||
							
								
								
									
										105
									
								
								src/provider/core/text-regex.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										105
									
								
								src/provider/core/text-regex.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,105 @@
 | 
			
		||||
// Updated: Aug. 20, 2024
 | 
			
		||||
// Live demo: https://jina.ai/tokenizer
 | 
			
		||||
// LICENSE: Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0)
 | 
			
		||||
// COPYRIGHT: Jina AI
 | 
			
		||||
 | 
			
		||||
// Define variables for magic numbers
 | 
			
		||||
const MAX_HEADING_LENGTH = 7;
 | 
			
		||||
const MAX_HEADING_CONTENT_LENGTH = 200;
 | 
			
		||||
const MAX_HEADING_UNDERLINE_LENGTH = 200;
 | 
			
		||||
const MAX_HTML_HEADING_ATTRIBUTES_LENGTH = 100;
 | 
			
		||||
const MAX_LIST_ITEM_LENGTH = 200;
 | 
			
		||||
const MAX_NESTED_LIST_ITEMS = 6;
 | 
			
		||||
const MAX_LIST_INDENT_SPACES = 7;
 | 
			
		||||
const MAX_BLOCKQUOTE_LINE_LENGTH = 200;
 | 
			
		||||
const MAX_BLOCKQUOTE_LINES = 15;
 | 
			
		||||
const MAX_CODE_BLOCK_LENGTH = 1500;
 | 
			
		||||
const MAX_CODE_LANGUAGE_LENGTH = 20;
 | 
			
		||||
const MAX_INDENTED_CODE_LINES = 20;
 | 
			
		||||
const MAX_TABLE_CELL_LENGTH = 200;
 | 
			
		||||
const MAX_TABLE_ROWS = 20;
 | 
			
		||||
const MAX_HTML_TABLE_LENGTH = 2000;
 | 
			
		||||
const MIN_HORIZONTAL_RULE_LENGTH = 3;
 | 
			
		||||
const MAX_SENTENCE_LENGTH = 400;
 | 
			
		||||
const MAX_QUOTED_TEXT_LENGTH = 300;
 | 
			
		||||
const MAX_PARENTHETICAL_CONTENT_LENGTH = 200;
 | 
			
		||||
const MAX_NESTED_PARENTHESES = 5;
 | 
			
		||||
const MAX_MATH_INLINE_LENGTH = 100;
 | 
			
		||||
const MAX_MATH_BLOCK_LENGTH = 500;
 | 
			
		||||
const MAX_PARAGRAPH_LENGTH = 1000;
 | 
			
		||||
const MAX_STANDALONE_LINE_LENGTH = 800;
 | 
			
		||||
const MAX_HTML_TAG_ATTRIBUTES_LENGTH = 100;
 | 
			
		||||
const MAX_HTML_TAG_CONTENT_LENGTH = 1000;
 | 
			
		||||
const LOOKAHEAD_RANGE = 100;  // Number of characters to look ahead for a sentence boundary
 | 
			
		||||
 | 
			
		||||
const AVOID_AT_START = `[\\s\\]})>,']`;
 | 
			
		||||
const PUNCTUATION = `[.!?…]|\\.{3}|[\\u2026\\u2047-\\u2049]|[\\p{Emoji_Presentation}\\p{Extended_Pictographic}]`;
 | 
			
		||||
const QUOTE_END = `(?:'(?=\`)|''(?=\`\`))`;
 | 
			
		||||
const SENTENCE_END = `(?:${PUNCTUATION}(?<!${AVOID_AT_START}(?=${PUNCTUATION}))|${QUOTE_END})(?=\\S|$)`;
 | 
			
		||||
const SENTENCE_BOUNDARY = `(?:${SENTENCE_END}|(?=[\\r\\n]|$))`;
 | 
			
		||||
const LOOKAHEAD_PATTERN = `(?:(?!${SENTENCE_END}).){1,${LOOKAHEAD_RANGE}}${SENTENCE_END}`;
 | 
			
		||||
const NOT_PUNCTUATION_SPACE = `(?!${PUNCTUATION}\\s)`;
 | 
			
		||||
const SENTENCE_PATTERN = `${NOT_PUNCTUATION_SPACE}(?:[^\\r\\n]{1,{MAX_LENGTH}}${SENTENCE_BOUNDARY}|[^\\r\\n]{1,{MAX_LENGTH}}(?=${PUNCTUATION}|${QUOTE_END})(?:${LOOKAHEAD_PATTERN})?)${AVOID_AT_START}*`;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
export const textSplitter = new RegExp(
 | 
			
		||||
    "(" +
 | 
			
		||||
    // 1. Headings (Setext-style, Markdown, and HTML-style, with length constraints)
 | 
			
		||||
    `(?:^(?:[#*=-]{1,${MAX_HEADING_LENGTH}}|\\w[^\\r\\n]{0,${MAX_HEADING_CONTENT_LENGTH}}\\r?\\n[-=]{2,${MAX_HEADING_UNDERLINE_LENGTH}}|<h[1-6][^>]{0,${MAX_HTML_HEADING_ATTRIBUTES_LENGTH}}>)[^\\r\\n]{1,${MAX_HEADING_CONTENT_LENGTH}}(?:</h[1-6]>)?(?:\\r?\\n|$))` +
 | 
			
		||||
    "|" +
 | 
			
		||||
    // New pattern for citations
 | 
			
		||||
    `(?:\\[[0-9]+\\][^\\r\\n]{1,${MAX_STANDALONE_LINE_LENGTH}})` +
 | 
			
		||||
    "|" +
 | 
			
		||||
    // 2. List items (bulleted, numbered, lettered, or task lists, including nested, up to three levels, with length constraints)
 | 
			
		||||
    `(?:(?:^|\\r?\\n)[ \\t]{0,3}(?:[-*+•]|\\d{1,3}\\.\\w\\.|\\[[ xX]\\])[ \\t]+${SENTENCE_PATTERN.replace(/{MAX_LENGTH}/g, String(MAX_LIST_ITEM_LENGTH))}` +
 | 
			
		||||
    `(?:(?:\\r?\\n[ \\t]{2,5}(?:[-*+•]|\\d{1,3}\\.\\w\\.|\\[[ xX]\\])[ \\t]+${SENTENCE_PATTERN.replace(/{MAX_LENGTH}/g, String(MAX_LIST_ITEM_LENGTH))}){0,${MAX_NESTED_LIST_ITEMS}}` +
 | 
			
		||||
    `(?:\\r?\\n[ \\t]{4,${MAX_LIST_INDENT_SPACES}}(?:[-*+•]|\\d{1,3}\\.\\w\\.|\\[[ xX]\\])[ \\t]+${SENTENCE_PATTERN.replace(/{MAX_LENGTH}/g, String(MAX_LIST_ITEM_LENGTH))}){0,${MAX_NESTED_LIST_ITEMS}})?)` +
 | 
			
		||||
    "|" +
 | 
			
		||||
    // 3. Block quotes (including nested quotes and citations, up to three levels, with length constraints)
 | 
			
		||||
    `(?:(?:^>(?:>|\\s{2,}){0,2}${SENTENCE_PATTERN.replace(/{MAX_LENGTH}/g, String(MAX_BLOCKQUOTE_LINE_LENGTH))}\\r?\\n?){1,${MAX_BLOCKQUOTE_LINES}})` +
 | 
			
		||||
    "|" +
 | 
			
		||||
    // 4. Code blocks (fenced, indented, or HTML pre/code tags, with length constraints)
 | 
			
		||||
    `(?:(?:^|\\r?\\n)(?:\`\`\`|~~~)(?:\\w{0,${MAX_CODE_LANGUAGE_LENGTH}})?\\r?\\n[\\s\\S]{0,${MAX_CODE_BLOCK_LENGTH}}?(?:\`\`\`|~~~)\\r?\\n?` +
 | 
			
		||||
    `|(?:(?:^|\\r?\\n)(?: {4}|\\t)[^\\r\\n]{0,${MAX_LIST_ITEM_LENGTH}}(?:\\r?\\n(?: {4}|\\t)[^\\r\\n]{0,${MAX_LIST_ITEM_LENGTH}}){0,${MAX_INDENTED_CODE_LINES}}\\r?\\n?)` +
 | 
			
		||||
    `|(?:<pre>(?:<code>)?[\\s\\S]{0,${MAX_CODE_BLOCK_LENGTH}}?(?:</code>)?</pre>))` +
 | 
			
		||||
    "|" +
 | 
			
		||||
    // 5. Tables (Markdown, grid tables, and HTML tables, with length constraints)
 | 
			
		||||
    `(?:(?:^|\\r?\\n)(?:\\|[^\\r\\n]{0,${MAX_TABLE_CELL_LENGTH}}\\|(?:\\r?\\n\\|[-:]{1,${MAX_TABLE_CELL_LENGTH}}\\|){0,1}(?:\\r?\\n\\|[^\\r\\n]{0,${MAX_TABLE_CELL_LENGTH}}\\|){0,${MAX_TABLE_ROWS}}` +
 | 
			
		||||
    `|<table>[\\s\\S]{0,${MAX_HTML_TABLE_LENGTH}}?</table>))` +
 | 
			
		||||
    "|" +
 | 
			
		||||
    // 6. Horizontal rules (Markdown and HTML hr tag)
 | 
			
		||||
    `(?:^(?:[-*_]){${MIN_HORIZONTAL_RULE_LENGTH},}\\s*$|<hr\\s*/?>)` +
 | 
			
		||||
    "|" +
 | 
			
		||||
    // 10. Standalone lines or phrases (including single-line blocks and HTML elements, with length constraints)
 | 
			
		||||
    `(?!${AVOID_AT_START})(?:^(?:<[a-zA-Z][^>]{0,${MAX_HTML_TAG_ATTRIBUTES_LENGTH}}>)?${SENTENCE_PATTERN.replace(/{MAX_LENGTH}/g, String(MAX_STANDALONE_LINE_LENGTH))}(?:</[a-zA-Z]+>)?(?:\\r?\\n|$))` +
 | 
			
		||||
    "|" +
 | 
			
		||||
    // 7. Sentences or phrases ending with punctuation (including ellipsis and Unicode punctuation)
 | 
			
		||||
    `(?!${AVOID_AT_START})${SENTENCE_PATTERN.replace(/{MAX_LENGTH}/g, String(MAX_SENTENCE_LENGTH))}` +
 | 
			
		||||
    "|" +
 | 
			
		||||
    // 8. Quoted text, parenthetical phrases, or bracketed content (with length constraints)
 | 
			
		||||
    "(?:" +
 | 
			
		||||
    `(?<!\\w)\"\"\"[^\"]{0,${MAX_QUOTED_TEXT_LENGTH}}\"\"\"(?!\\w)` +
 | 
			
		||||
    `|(?<!\\w)(?:['\"\`'"])[^\\r\\n]{0,${MAX_QUOTED_TEXT_LENGTH}}\\1(?!\\w)` +
 | 
			
		||||
    `|(?<!\\w)\`[^\\r\\n]{0,${MAX_QUOTED_TEXT_LENGTH}}'(?!\\w)` +
 | 
			
		||||
    `|(?<!\\w)\`\`[^\\r\\n]{0,${MAX_QUOTED_TEXT_LENGTH}}''(?!\\w)` +
 | 
			
		||||
    `|\\([^\\r\\n()]{0,${MAX_PARENTHETICAL_CONTENT_LENGTH}}(?:\\([^\\r\\n()]{0,${MAX_PARENTHETICAL_CONTENT_LENGTH}}\\)[^\\r\\n()]{0,${MAX_PARENTHETICAL_CONTENT_LENGTH}}){0,${MAX_NESTED_PARENTHESES}}\\)` +
 | 
			
		||||
    `|\\[[^\\r\\n\\[\\]]{0,${MAX_PARENTHETICAL_CONTENT_LENGTH}}(?:\\[[^\\r\\n\\[\\]]{0,${MAX_PARENTHETICAL_CONTENT_LENGTH}}\\][^\\r\\n\\[\\]]{0,${MAX_PARENTHETICAL_CONTENT_LENGTH}}){0,${MAX_NESTED_PARENTHESES}}\\]` +
 | 
			
		||||
    `|\\$[^\\r\\n$]{0,${MAX_MATH_INLINE_LENGTH}}\\$` +
 | 
			
		||||
    `|\`[^\`\\r\\n]{0,${MAX_MATH_INLINE_LENGTH}}\`` +
 | 
			
		||||
    ")" +
 | 
			
		||||
    "|" +
 | 
			
		||||
    // 9. Paragraphs (with length constraints)
 | 
			
		||||
    `(?!${AVOID_AT_START})(?:(?:^|\\r?\\n\\r?\\n)(?:<p>)?${SENTENCE_PATTERN.replace(/{MAX_LENGTH}/g, String(MAX_PARAGRAPH_LENGTH))}(?:</p>)?(?=\\r?\\n\\r?\\n|$))` +
 | 
			
		||||
    "|" +
 | 
			
		||||
    // 11. HTML-like tags and their content (including self-closing tags and attributes, with length constraints)
 | 
			
		||||
    `(?:<[a-zA-Z][^>]{0,${MAX_HTML_TAG_ATTRIBUTES_LENGTH}}(?:>[\\s\\S]{0,${MAX_HTML_TAG_CONTENT_LENGTH}}?</[a-zA-Z]+>|\\s*/>))` +
 | 
			
		||||
    "|" +
 | 
			
		||||
    // 12. LaTeX-style math expressions (inline and block, with length constraints)
 | 
			
		||||
    `(?:(?:\\$\\$[\\s\\S]{0,${MAX_MATH_BLOCK_LENGTH}}?\\$\\$)|(?:\\$[^\\$\\r\\n]{0,${MAX_MATH_INLINE_LENGTH}}\\$))` +
 | 
			
		||||
    "|" +
 | 
			
		||||
    // 14. Fallback for any remaining content (with length constraints)
 | 
			
		||||
    `(?!${AVOID_AT_START})${SENTENCE_PATTERN.replace(/{MAX_LENGTH}/g, String(MAX_STANDALONE_LINE_LENGTH))}` +
 | 
			
		||||
    ")",
 | 
			
		||||
    "gmu"
 | 
			
		||||
);
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										28
									
								
								src/provider/core/type.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								src/provider/core/type.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,28 @@
 | 
			
		||||
import OpenAI from 'openai';
 | 
			
		||||
import { APIPromise } from 'openai/core.mjs';
 | 
			
		||||
import { ChatCompletionChunk } from 'openai/resources.mjs';
 | 
			
		||||
import { Stream } from 'openai/streaming.mjs';
 | 
			
		||||
 | 
			
		||||
export type ChatMessage = OpenAI.Chat.Completions.ChatCompletionMessageParam;
 | 
			
		||||
export type ChatMessageOptions = Partial<OpenAI.Chat.Completions.ChatCompletionCreateParams>;
 | 
			
		||||
export type ChatMessageComplete = OpenAI.Chat.Completions.ChatCompletion;
 | 
			
		||||
export type ChatMessageStream = OpenAI.Chat.Completions.ChatCompletion;
 | 
			
		||||
 | 
			
		||||
export interface BaseChatInterface {
 | 
			
		||||
  chat(messages: ChatMessage[], options?: ChatMessageOptions): Promise<ChatMessageComplete>;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface BaseChatUsageInterface {
 | 
			
		||||
  /**
 | 
			
		||||
   * 提示词令牌
 | 
			
		||||
   */
 | 
			
		||||
  prompt_tokens: number;
 | 
			
		||||
  /**
 | 
			
		||||
   *  总令牌
 | 
			
		||||
   */
 | 
			
		||||
  total_tokens: number;
 | 
			
		||||
  /**
 | 
			
		||||
   * 完成令牌
 | 
			
		||||
   */
 | 
			
		||||
  completion_tokens: number;
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										59
									
								
								src/provider/index.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										59
									
								
								src/provider/index.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,59 @@
 | 
			
		||||
export * from './core/index.ts';
 | 
			
		||||
import { BaseChat } from './core/chat.ts';
 | 
			
		||||
 | 
			
		||||
import { Ollama } from './chat-adapter/ollama.ts';
 | 
			
		||||
import { SiliconFlow } from './chat-adapter/siliconflow.ts';
 | 
			
		||||
import { Custom } from './chat-adapter/custom.ts';
 | 
			
		||||
import { Volces } from './chat-adapter/volces.ts';
 | 
			
		||||
import { DeepSeek } from './chat-adapter/deepseek.ts';
 | 
			
		||||
import { ModelScope } from './chat-adapter/model-scope.ts';
 | 
			
		||||
import { ChatMessage } from './core/type.ts';
 | 
			
		||||
 | 
			
		||||
export const OllamaProvider = Ollama;
 | 
			
		||||
export const SiliconFlowProvider = SiliconFlow;
 | 
			
		||||
export const CustomProvider = Custom;
 | 
			
		||||
export const VolcesProvider = Volces;
 | 
			
		||||
export const DeepSeekProvider = DeepSeek;
 | 
			
		||||
export const ModelScopeProvider = ModelScope;
 | 
			
		||||
 | 
			
		||||
export const ProviderMap = {
 | 
			
		||||
  Ollama: OllamaProvider,
 | 
			
		||||
  SiliconFlow: SiliconFlowProvider,
 | 
			
		||||
  Custom: CustomProvider,
 | 
			
		||||
  Volces: VolcesProvider,
 | 
			
		||||
  DeepSeek: DeepSeekProvider,
 | 
			
		||||
  ModelScope: ModelScopeProvider,
 | 
			
		||||
  BaseChat: BaseChat,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
type ProviderManagerConfig = {
 | 
			
		||||
  provider: string;
 | 
			
		||||
  model: string;
 | 
			
		||||
  apiKey: string;
 | 
			
		||||
  baseURL?: string;
 | 
			
		||||
};
 | 
			
		||||
export class ProviderManager {
 | 
			
		||||
  provider: BaseChat;
 | 
			
		||||
  constructor(config: ProviderManagerConfig) {
 | 
			
		||||
    const { provider, model, apiKey, baseURL } = config;
 | 
			
		||||
    const Provider = ProviderMap[provider] as typeof BaseChat;
 | 
			
		||||
    if (!Provider) {
 | 
			
		||||
      throw new Error(`Provider ${provider} not found`);
 | 
			
		||||
    }
 | 
			
		||||
    this.provider = new Provider({
 | 
			
		||||
      model,
 | 
			
		||||
      apiKey,
 | 
			
		||||
      baseURL,
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
  static async createProvider(config: ProviderManagerConfig) {
 | 
			
		||||
    if (!config.baseURL) {
 | 
			
		||||
      delete config.baseURL;
 | 
			
		||||
    }
 | 
			
		||||
    const pm = new ProviderManager(config);
 | 
			
		||||
    return pm.provider;
 | 
			
		||||
  }
 | 
			
		||||
  async chat(messages: ChatMessage[]) {
 | 
			
		||||
    return this.provider.chat(messages);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										126
									
								
								src/provider/knowledge/knowledge-base.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										126
									
								
								src/provider/knowledge/knowledge-base.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,126 @@
 | 
			
		||||
import { BaseChat, BaseChatOptions } from '../core/chat.ts';
 | 
			
		||||
import { numTokensFromString } from '../utils/token.ts';
 | 
			
		||||
 | 
			
		||||
export type KnowledgeOptions<T = Record<string, string>> = BaseChatOptions<
 | 
			
		||||
  {
 | 
			
		||||
    embeddingModel: string;
 | 
			
		||||
    splitSize?: number; // 分块大小 默认 2000
 | 
			
		||||
    splitOverlap?: number; // 分块重叠 默认 200
 | 
			
		||||
    batchSize?: number; // 批量大小 默认 4, 4*2000=8000
 | 
			
		||||
  } & T
 | 
			
		||||
>;
 | 
			
		||||
/**
 | 
			
		||||
 * 知识库构建
 | 
			
		||||
 * 1. Embedding generate
 | 
			
		||||
 * 2. retriever
 | 
			
		||||
 * 3. reranker
 | 
			
		||||
 */
 | 
			
		||||
export class KnowledgeBase extends BaseChat {
 | 
			
		||||
  embeddingModel: string;
 | 
			
		||||
  splitSize: number;
 | 
			
		||||
  splitOverlap: number;
 | 
			
		||||
  batchSize: number;
 | 
			
		||||
  constructor(options: KnowledgeOptions) {
 | 
			
		||||
    super(options);
 | 
			
		||||
    this.embeddingModel = options.embeddingModel;
 | 
			
		||||
    this.splitSize = options.splitSize || 2000;
 | 
			
		||||
    this.splitOverlap = options.splitOverlap || 200;
 | 
			
		||||
    this.prompt_tokens = 0;
 | 
			
		||||
    this.total_tokens = 0;
 | 
			
		||||
    this.batchSize = options.batchSize || 4;
 | 
			
		||||
  }
 | 
			
		||||
  /**
 | 
			
		||||
   * 生成embedding 内部
 | 
			
		||||
   * @param text
 | 
			
		||||
   * @returns
 | 
			
		||||
   */
 | 
			
		||||
  async generateEmbeddingCore(text: string | string[]) {
 | 
			
		||||
    const res = await this.openai.embeddings.create({
 | 
			
		||||
      model: this.embeddingModel,
 | 
			
		||||
      input: text,
 | 
			
		||||
      encoding_format: 'float',
 | 
			
		||||
    });
 | 
			
		||||
    this.prompt_tokens += res.usage.prompt_tokens;
 | 
			
		||||
    this.total_tokens += res.usage.total_tokens;
 | 
			
		||||
    return res;
 | 
			
		||||
  }
 | 
			
		||||
  async generateEmbeddingBatchCore(text: string[]) {
 | 
			
		||||
    const res = await this.openai.embeddings.create({
 | 
			
		||||
      model: this.embeddingModel,
 | 
			
		||||
      input: text,
 | 
			
		||||
      encoding_format: 'float',
 | 
			
		||||
    });
 | 
			
		||||
    this.prompt_tokens += res.usage.prompt_tokens;
 | 
			
		||||
    this.total_tokens += res.usage.total_tokens;
 | 
			
		||||
    return res.data.map((item) => item.embedding);
 | 
			
		||||
  }
 | 
			
		||||
  /**
 | 
			
		||||
   * 生成embedding
 | 
			
		||||
   * @param text
 | 
			
		||||
   * @returns
 | 
			
		||||
   */
 | 
			
		||||
  async generateEmbedding(text: string | string[]) {
 | 
			
		||||
    if (Array.isArray(text)) {
 | 
			
		||||
      // size token 不能超过 8192
 | 
			
		||||
      const allSize = text.reduce((acc, item) => acc + numTokensFromString(item), 0);
 | 
			
		||||
      if (allSize > 8192) {
 | 
			
		||||
        throw new Error('text size 不能超过 8192');
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    const res = await this.generateEmbeddingCore(text);
 | 
			
		||||
    if (Array.isArray(text)) {
 | 
			
		||||
      return res.data.map((item) => item.embedding);
 | 
			
		||||
    }
 | 
			
		||||
    return [res.data[0].embedding];
 | 
			
		||||
  }
 | 
			
		||||
  /**
 | 
			
		||||
   * 批量生成embedding
 | 
			
		||||
   * @param text
 | 
			
		||||
   * @returns
 | 
			
		||||
   */
 | 
			
		||||
  async generateEmbeddingBatch(textArray: string[]) {
 | 
			
		||||
    const batchSize = this.batchSize || 4;
 | 
			
		||||
    const embeddings: number[][] = [];
 | 
			
		||||
    for (let i = 0; i < textArray.length; i += batchSize) {
 | 
			
		||||
      const batch = textArray.slice(i, i + batchSize);
 | 
			
		||||
      const res = await this.generateEmbeddingBatchCore(batch);
 | 
			
		||||
      embeddings.push(...res);
 | 
			
		||||
    }
 | 
			
		||||
    return embeddings;
 | 
			
		||||
  }
 | 
			
		||||
  /**
 | 
			
		||||
   * 分割长文本, 生成对应的embedding
 | 
			
		||||
   * @param text
 | 
			
		||||
   * @returns
 | 
			
		||||
   */
 | 
			
		||||
  async splitLongText(text: string) {
 | 
			
		||||
    // 分割文本
 | 
			
		||||
    const chunks: string[] = [];
 | 
			
		||||
    let startIndex = 0;
 | 
			
		||||
 | 
			
		||||
    while (startIndex < text.length) {
 | 
			
		||||
      // 计算当前chunk的结束位置
 | 
			
		||||
      const endIndex = Math.min(startIndex + this.splitSize, text.length);
 | 
			
		||||
 | 
			
		||||
      // 提取当前chunk
 | 
			
		||||
      const chunk = text.substring(startIndex, endIndex);
 | 
			
		||||
      chunks.push(chunk);
 | 
			
		||||
 | 
			
		||||
      // 移动到下一个起始位置,考虑重叠
 | 
			
		||||
      startIndex = endIndex - this.splitOverlap;
 | 
			
		||||
 | 
			
		||||
      // 如果下一个起始位置已经超出或者太接近文本结尾,就结束循环
 | 
			
		||||
      if (startIndex >= text.length - this.splitOverlap) {
 | 
			
		||||
        break;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // 为每个chunk生成embedding
 | 
			
		||||
    const embeddings = await this.generateEmbeddingBatch(chunks);
 | 
			
		||||
    // 返回文本片段和对应的embedding
 | 
			
		||||
    return chunks.map((chunk, index) => ({
 | 
			
		||||
      text: chunk,
 | 
			
		||||
      embedding: embeddings[index],
 | 
			
		||||
    }));
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										7
									
								
								src/provider/knowledge/knowledge.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								src/provider/knowledge/knowledge.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,7 @@
 | 
			
		||||
import { KnowledgeBase, KnowledgeOptions } from './knowledge-base.ts';
 | 
			
		||||
 | 
			
		||||
export class Knowledge extends KnowledgeBase {
 | 
			
		||||
  constructor(options: KnowledgeOptions) {
 | 
			
		||||
    super(options);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										86
									
								
								src/provider/utils/chunk.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										86
									
								
								src/provider/utils/chunk.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,86 @@
 | 
			
		||||
import { numTokensFromString } from './token.ts';
 | 
			
		||||
 | 
			
		||||
// 常量定义
 | 
			
		||||
const CHUNK_SIZE = 512; // 每个chunk的最大token数
 | 
			
		||||
const MAGIC_SEPARATOR = '🦛';
 | 
			
		||||
const DELIMITER = [',', '.', '!', '?', '\n', ',', '。', '!', '?'];
 | 
			
		||||
const PARAGRAPH_DELIMITER = '\n\n';
 | 
			
		||||
 | 
			
		||||
export interface Chunk {
 | 
			
		||||
  chunkId: number;
 | 
			
		||||
  text: string;
 | 
			
		||||
  tokens: number;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * 确保每个chunk的大小不超过最大token数
 | 
			
		||||
 * @param chunk 输入的文本块
 | 
			
		||||
 * @returns 分割后的文本块及其token数的数组
 | 
			
		||||
 */
 | 
			
		||||
function ensureChunkSize(chunk: string): Array<[string, number]> {
 | 
			
		||||
  const tokens = numTokensFromString(chunk);
 | 
			
		||||
  if (tokens <= CHUNK_SIZE) {
 | 
			
		||||
    return [[chunk, tokens]];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // 在分隔符后添加魔法分隔符
 | 
			
		||||
  let processedChunk = chunk;
 | 
			
		||||
  for (const delimiter of DELIMITER) {
 | 
			
		||||
    // 转义特殊字符
 | 
			
		||||
    const escapedDelimiter = delimiter.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
 | 
			
		||||
    processedChunk = processedChunk.replace(new RegExp(escapedDelimiter, 'g'), delimiter + MAGIC_SEPARATOR);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  const chunks: Array<[string, number]> = [];
 | 
			
		||||
  let tail = '';
 | 
			
		||||
 | 
			
		||||
  // 按CHUNK_SIZE分割文本
 | 
			
		||||
  for (let i = 0; i < processedChunk.length; i += CHUNK_SIZE) {
 | 
			
		||||
    const sentences = (processedChunk.slice(i, i + CHUNK_SIZE) + ' ').split(MAGIC_SEPARATOR);
 | 
			
		||||
    const currentChunk = tail + sentences.slice(0, -1).join('');
 | 
			
		||||
    if (currentChunk.trim()) {
 | 
			
		||||
      const tokenCount = numTokensFromString(currentChunk);
 | 
			
		||||
      chunks.push([currentChunk, tokenCount]);
 | 
			
		||||
    }
 | 
			
		||||
    tail = sentences[sentences.length - 1].trim();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // 处理最后剩余的tail
 | 
			
		||||
  if (tail) {
 | 
			
		||||
    const tokenCount = numTokensFromString(tail);
 | 
			
		||||
    chunks.push([tail, tokenCount]);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return chunks;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * 将文本分割成chunks
 | 
			
		||||
 * @param text 输入文本
 | 
			
		||||
 * @returns 分割后的chunks数组
 | 
			
		||||
 */
 | 
			
		||||
export async function getChunks(text: string): Promise<Chunk[]> {
 | 
			
		||||
  // 按段落分割文本
 | 
			
		||||
  const paragraphs = text
 | 
			
		||||
    .split(PARAGRAPH_DELIMITER)
 | 
			
		||||
    .map((p) => p.trim())
 | 
			
		||||
    .filter((p) => p);
 | 
			
		||||
 | 
			
		||||
  const chunks: Chunk[] = [];
 | 
			
		||||
  let currentIndex = 0;
 | 
			
		||||
 | 
			
		||||
  // 处理每个段落
 | 
			
		||||
  for (const paragraph of paragraphs) {
 | 
			
		||||
    const splittedParagraph = ensureChunkSize(paragraph);
 | 
			
		||||
    for (const [text, tokens] of splittedParagraph) {
 | 
			
		||||
      chunks.push({
 | 
			
		||||
        chunkId: currentIndex,
 | 
			
		||||
        text,
 | 
			
		||||
        tokens,
 | 
			
		||||
      });
 | 
			
		||||
      currentIndex++;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return chunks;
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										141
									
								
								src/provider/utils/parse-config.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										141
									
								
								src/provider/utils/parse-config.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,141 @@
 | 
			
		||||
import { AES, enc } from 'crypto-js';
 | 
			
		||||
 | 
			
		||||
// 加密函数
 | 
			
		||||
export function encryptAES(plainText: string, secretKey: string) {
 | 
			
		||||
  return AES.encrypt(plainText, secretKey).toString();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// 解密函数
 | 
			
		||||
export function decryptAES(cipherText: string, secretKey: string) {
 | 
			
		||||
  const bytes = AES.decrypt(cipherText, secretKey);
 | 
			
		||||
  return bytes.toString(enc.Utf8);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type AIModel = {
 | 
			
		||||
  /**
 | 
			
		||||
   * 提供商
 | 
			
		||||
   */
 | 
			
		||||
  provider: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 模型名称
 | 
			
		||||
   */
 | 
			
		||||
  model: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 模型组
 | 
			
		||||
   */
 | 
			
		||||
  group: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 每日限制
 | 
			
		||||
   */
 | 
			
		||||
  dayLimit?: number;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
type SecretKey = {
 | 
			
		||||
  /**
 | 
			
		||||
   * 组
 | 
			
		||||
   */
 | 
			
		||||
  group: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * API密钥
 | 
			
		||||
   */
 | 
			
		||||
  apiKey: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 解密密钥
 | 
			
		||||
   */
 | 
			
		||||
  decryptKey?: string;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export type GetProviderOpts = {
 | 
			
		||||
  model: string;
 | 
			
		||||
  group: string;
 | 
			
		||||
  decryptKey?: string;
 | 
			
		||||
};
 | 
			
		||||
export type ProviderResult = {
 | 
			
		||||
  provider: string;
 | 
			
		||||
  model: string;
 | 
			
		||||
  group: string;
 | 
			
		||||
  apiKey: string;
 | 
			
		||||
  dayLimit?: number;
 | 
			
		||||
  baseURL?: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 解密密钥
 | 
			
		||||
   */
 | 
			
		||||
  decryptKey?: string;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export type AIConfig = {
 | 
			
		||||
  title?: string;
 | 
			
		||||
  description?: string;
 | 
			
		||||
  models: AIModel[];
 | 
			
		||||
  secretKeys: SecretKey[];
 | 
			
		||||
};
 | 
			
		||||
export class AIConfigParser {
 | 
			
		||||
  private config: AIConfig;
 | 
			
		||||
  result: ProviderResult;
 | 
			
		||||
  constructor(config: AIConfig) {
 | 
			
		||||
    this.config = config;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  getProvider(opts: GetProviderOpts): ProviderResult {
 | 
			
		||||
    const { model, group, decryptKey } = opts;
 | 
			
		||||
    const modelConfig = this.config.models.find((m) => m.model === model && m.group === group);
 | 
			
		||||
    const groupConfig = this.config.secretKeys.find((m) => m.group === group);
 | 
			
		||||
    if (!modelConfig) {
 | 
			
		||||
      throw new Error(`在模型组 ${group} 中未找到模型 ${model}`);
 | 
			
		||||
    }
 | 
			
		||||
    const mergeConfig = {
 | 
			
		||||
      ...modelConfig,
 | 
			
		||||
      ...groupConfig,
 | 
			
		||||
      decryptKey: decryptKey || groupConfig?.decryptKey,
 | 
			
		||||
    };
 | 
			
		||||
    // 验证模型配置
 | 
			
		||||
    if (!mergeConfig.provider) {
 | 
			
		||||
      throw new Error(`模型 ${model} 未配置提供商`);
 | 
			
		||||
    }
 | 
			
		||||
    if (!mergeConfig.model) {
 | 
			
		||||
      throw new Error(`模型 ${model} 未配置模型名称`);
 | 
			
		||||
    }
 | 
			
		||||
    if (!mergeConfig.apiKey) {
 | 
			
		||||
      throw new Error(`组 ${group} 未配置 API 密钥`);
 | 
			
		||||
    }
 | 
			
		||||
    if (!mergeConfig.group) {
 | 
			
		||||
      throw new Error(`组 ${group} 未配置`);
 | 
			
		||||
    }
 | 
			
		||||
    this.result = mergeConfig;
 | 
			
		||||
    return mergeConfig;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async getSecretKey({
 | 
			
		||||
    getCache,
 | 
			
		||||
    setCache,
 | 
			
		||||
    providerResult,
 | 
			
		||||
  }: {
 | 
			
		||||
    getCache?: (key: string) => Promise<string>;
 | 
			
		||||
    setCache?: (key: string, value: string) => Promise<void>;
 | 
			
		||||
    providerResult?: ProviderResult;
 | 
			
		||||
  }) {
 | 
			
		||||
    const { apiKey, decryptKey, group = '', model } = providerResult || this.result;
 | 
			
		||||
    const cacheKey = `${group}--${model}`;
 | 
			
		||||
    if (!decryptKey) {
 | 
			
		||||
      return apiKey;
 | 
			
		||||
    }
 | 
			
		||||
    if (getCache) {
 | 
			
		||||
      const cache = await getCache(cacheKey);
 | 
			
		||||
      if (cache) {
 | 
			
		||||
        return cache;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    const secretKey = decryptAES(apiKey, decryptKey);
 | 
			
		||||
    if (setCache) {
 | 
			
		||||
      await setCache(cacheKey, secretKey);
 | 
			
		||||
    }
 | 
			
		||||
    return secretKey;
 | 
			
		||||
  }
 | 
			
		||||
  encrypt(plainText: string, secretKey: string) {
 | 
			
		||||
    return encryptAES(plainText, secretKey);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  decrypt(cipherText: string, secretKey: string) {
 | 
			
		||||
    return decryptAES(cipherText, secretKey);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										34
									
								
								src/provider/utils/token.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								src/provider/utils/token.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,34 @@
 | 
			
		||||
import { encoding_for_model, get_encoding } from 'tiktoken';
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
const MODEL_TO_ENCODING = {
 | 
			
		||||
  'gpt-4': 'cl100k_base',
 | 
			
		||||
  'gpt-4-turbo': 'cl100k_base',
 | 
			
		||||
  'gpt-3.5-turbo': 'cl100k_base',
 | 
			
		||||
  'text-embedding-ada-002': 'cl100k_base',
 | 
			
		||||
  'text-davinci-002': 'p50k_base',
 | 
			
		||||
  'text-davinci-003': 'p50k_base',
 | 
			
		||||
} as const;
 | 
			
		||||
 | 
			
		||||
export function numTokensFromString(text: string, model: keyof typeof MODEL_TO_ENCODING = 'gpt-3.5-turbo'): number {
 | 
			
		||||
  try {
 | 
			
		||||
    // 对于特定模型使用专门的编码器
 | 
			
		||||
    const encoder = encoding_for_model(model);
 | 
			
		||||
    const tokens = encoder.encode(text);
 | 
			
		||||
    const tokenCount = tokens.length;
 | 
			
		||||
    encoder.free(); // 释放编码器
 | 
			
		||||
    return tokenCount;
 | 
			
		||||
  } catch (error) {
 | 
			
		||||
    try {
 | 
			
		||||
      // 如果模型特定的编码器失败,尝试使用基础编码器
 | 
			
		||||
      const encoder = get_encoding(MODEL_TO_ENCODING[model]);
 | 
			
		||||
      const tokens = encoder.encode(text);
 | 
			
		||||
      const tokenCount = tokens.length;
 | 
			
		||||
      encoder.free(); // 释放编码器
 | 
			
		||||
      return tokenCount;
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      // 如果编码失败,使用一个粗略的估计:平均每个字符0.25个token
 | 
			
		||||
      return Math.ceil(text.length * 0.25);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										11
									
								
								src/routes/ai-chat/index.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								src/routes/ai-chat/index.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,11 @@
 | 
			
		||||
import { app } from '@/app.ts';
 | 
			
		||||
 | 
			
		||||
app
 | 
			
		||||
  .route({
 | 
			
		||||
    path: 'ai',
 | 
			
		||||
    key: 'chat',
 | 
			
		||||
  })
 | 
			
		||||
  .define(async () => {
 | 
			
		||||
    //
 | 
			
		||||
  })
 | 
			
		||||
  .addTo(app);
 | 
			
		||||
							
								
								
									
										91
									
								
								src/routes/ai-chat/services/chat-services.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										91
									
								
								src/routes/ai-chat/services/chat-services.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,91 @@
 | 
			
		||||
import { AIConfigParser, ProviderResult } from '@/provider/utils/parse-config.ts';
 | 
			
		||||
import { ProviderManager, ChatMessage, BaseChat } from '@/provider/index.ts';
 | 
			
		||||
import { getChatConfig } from '@/modules/chat-config.ts';
 | 
			
		||||
import { redis } from '@/modules/db.ts';
 | 
			
		||||
export type ChatServicesConfig = {
 | 
			
		||||
  username: string;
 | 
			
		||||
  model: string;
 | 
			
		||||
  group: string;
 | 
			
		||||
  decryptKey?: string;
 | 
			
		||||
};
 | 
			
		||||
export class ChatServices {
 | 
			
		||||
  cachePrefix = 'ai-chat:model:';
 | 
			
		||||
  /**
 | 
			
		||||
   * 用户名
 | 
			
		||||
   */
 | 
			
		||||
  username: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 模型
 | 
			
		||||
   */
 | 
			
		||||
  model: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 组
 | 
			
		||||
   */
 | 
			
		||||
  group: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 解密密钥
 | 
			
		||||
   */
 | 
			
		||||
  decryptKey?: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 模型配置
 | 
			
		||||
   */
 | 
			
		||||
  modelConfig?: ProviderResult;
 | 
			
		||||
  chatProvider?: BaseChat;
 | 
			
		||||
  constructor(opts: ChatServicesConfig) {
 | 
			
		||||
    this.username = opts.username;
 | 
			
		||||
    this.model = opts.model;
 | 
			
		||||
    this.group = opts.group;
 | 
			
		||||
    this.decryptKey = opts.decryptKey;
 | 
			
		||||
  }
 | 
			
		||||
  /**
 | 
			
		||||
   * 初始化
 | 
			
		||||
   * @returns
 | 
			
		||||
   */
 | 
			
		||||
  async init() {
 | 
			
		||||
    const config = await this.getConfig();
 | 
			
		||||
    const aiConfigParser = new AIConfigParser(config);
 | 
			
		||||
    const model = this.model;
 | 
			
		||||
    const group = this.group;
 | 
			
		||||
    const decryptKey = this.decryptKey;
 | 
			
		||||
    const providerResult = aiConfigParser.getProvider({ model, group, decryptKey });
 | 
			
		||||
    const that = this;
 | 
			
		||||
    const apiKey = await aiConfigParser.getSecretKey({
 | 
			
		||||
      getCache: async (key) => {
 | 
			
		||||
        const cache = await redis.get(that.wrapperKey(key));
 | 
			
		||||
        return cache;
 | 
			
		||||
      },
 | 
			
		||||
      setCache: async (key, value) => {
 | 
			
		||||
        await redis.set(that.wrapperKey(key), value);
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
    that.modelConfig = { ...providerResult, apiKey };
 | 
			
		||||
    return that.modelConfig;
 | 
			
		||||
  }
 | 
			
		||||
  async wrapperKey(key: string) {
 | 
			
		||||
    const username = this.username;
 | 
			
		||||
    return `${this.cachePrefix}${username}:${key}`;
 | 
			
		||||
  }
 | 
			
		||||
  async getConfig() {
 | 
			
		||||
    return getChatConfig();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async chat(messages: ChatMessage[]) {
 | 
			
		||||
    const { model, provider, apiKey, baseURL } = this.modelConfig;
 | 
			
		||||
    const providerManager = await ProviderManager.createProvider({
 | 
			
		||||
      provider: provider,
 | 
			
		||||
      model: model,
 | 
			
		||||
      apiKey: apiKey,
 | 
			
		||||
      baseURL: baseURL,
 | 
			
		||||
    });
 | 
			
		||||
    this.chatProvider = providerManager;
 | 
			
		||||
    const result = await providerManager.chat(messages);
 | 
			
		||||
    return result;
 | 
			
		||||
  }
 | 
			
		||||
  static async createServices(opts: Partial<ChatServicesConfig>) {
 | 
			
		||||
    const username = opts.username || 'root';
 | 
			
		||||
    const model = opts.model || 'deepseek-r1-250120';
 | 
			
		||||
    const group = opts.group || 'deepseek';
 | 
			
		||||
    const decryptKey = opts.decryptKey;
 | 
			
		||||
    return new ChatServices({ username, model, group, decryptKey });
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -1,78 +0,0 @@
 | 
			
		||||
import { sequelize } from '@kevisual/code-center-module';
 | 
			
		||||
import { DataTypes, Model } from 'sequelize';
 | 
			
		||||
export type Provider = Partial<InstanceType<typeof ProviderModel>>;
 | 
			
		||||
 | 
			
		||||
type ModelItem = {
 | 
			
		||||
  /**
 | 
			
		||||
   * 模型
 | 
			
		||||
   */
 | 
			
		||||
  model: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 提供者
 | 
			
		||||
   */
 | 
			
		||||
  provider: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 配置, 自有配置
 | 
			
		||||
   */
 | 
			
		||||
  config: Record<string, any>;
 | 
			
		||||
  /**
 | 
			
		||||
   * 标题
 | 
			
		||||
   */
 | 
			
		||||
  title: string;
 | 
			
		||||
  /**
 | 
			
		||||
   * 描述
 | 
			
		||||
   */
 | 
			
		||||
  description: string;
 | 
			
		||||
};
 | 
			
		||||
export type ProviderData = {
 | 
			
		||||
  models: ModelItem[];
 | 
			
		||||
  config: Record<string, any>; // 共享配置
 | 
			
		||||
};
 | 
			
		||||
export class ProviderModel extends Model {
 | 
			
		||||
  declare id: string;
 | 
			
		||||
  declare title: string;
 | 
			
		||||
  declare description: string;
 | 
			
		||||
 | 
			
		||||
  declare config: Record<string, any>;
 | 
			
		||||
  declare data: Record<string, any>;
 | 
			
		||||
 | 
			
		||||
  declare uid: string;
 | 
			
		||||
  declare createdAt: Date;
 | 
			
		||||
  declare updatedAt: Date;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ProviderModel.init(
 | 
			
		||||
  {
 | 
			
		||||
    id: {
 | 
			
		||||
      type: DataTypes.UUID,
 | 
			
		||||
      primaryKey: true,
 | 
			
		||||
      defaultValue: DataTypes.UUIDV4,
 | 
			
		||||
      comment: 'id',
 | 
			
		||||
    },
 | 
			
		||||
    title: {
 | 
			
		||||
      type: DataTypes.TEXT,
 | 
			
		||||
      defaultValue: '',
 | 
			
		||||
    },
 | 
			
		||||
    description: {
 | 
			
		||||
      type: DataTypes.TEXT,
 | 
			
		||||
      defaultValue: '',
 | 
			
		||||
    },
 | 
			
		||||
    config: {
 | 
			
		||||
      type: DataTypes.JSON,
 | 
			
		||||
      defaultValue: {},
 | 
			
		||||
    },
 | 
			
		||||
    data: {
 | 
			
		||||
      type: DataTypes.JSON,
 | 
			
		||||
      defaultValue: {},
 | 
			
		||||
    },
 | 
			
		||||
    uid: {
 | 
			
		||||
      type: DataTypes.UUID,
 | 
			
		||||
      allowNull: true,
 | 
			
		||||
    },
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
    sequelize,
 | 
			
		||||
    tableName: 'kv_provider',
 | 
			
		||||
    paranoid: true,
 | 
			
		||||
  },
 | 
			
		||||
);
 | 
			
		||||
							
								
								
									
										65
									
								
								src/test/chunks/01-get.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										65
									
								
								src/test/chunks/01-get.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,65 @@
 | 
			
		||||
import { getChunks } from '../../provider/utils/chunk.ts';
 | 
			
		||||
 | 
			
		||||
const str = 'Hello world this is a test 你好沙盒 very big';
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
const str2 = `不能直接使用 tiktoken(OpenAI的分词器)来计算 Qwen 模型的 Token 数量,因为两者的分词规则(Tokenization)和词表(Vocabulary)完全不同。
 | 
			
		||||
 | 
			
		||||
为什么不能混用?
 | 
			
		||||
词表不同
 | 
			
		||||
 | 
			
		||||
tiktoken 是 OpenAI 为 GPT 系列设计的(如 gpt-3.5-turbo, gpt-4),其词表针对英语和代码优化。
 | 
			
		||||
 | 
			
		||||
Qwen 使用独立训练的 BPE 词表,对中文、多语言的支持更友好,分词粒度可能不同。
 | 
			
		||||
 | 
			
		||||
分词结果差异大
 | 
			
		||||
同一段文本,tiktoken 和 Qwen 的分词结果可能完全不同。例如:
 | 
			
		||||
 | 
			
		||||
OpenAI (tiktoken): "你好" → ['你', '好'](2 Tokens)
 | 
			
		||||
 | 
			
		||||
Qwen: "你好" → ['你好'](1 Token,如果词表中包含该组合)
 | 
			
		||||
 | 
			
		||||
性能问题
 | 
			
		||||
即使强制使用 tiktoken 计算 Qwen 的 Token,结果也不准确,可能导致:
 | 
			
		||||
 | 
			
		||||
输入超出模型上下文限制(因统计偏差)。
 | 
			
		||||
 | 
			
		||||
API 计费或本地推理时出现意外错误。
 | 
			
		||||
 | 
			
		||||
正确方法:用 Qwen 的分词器
 | 
			
		||||
通过 Hugging Face transformers 加载 Qwen 的原生分词器:
 | 
			
		||||
 | 
			
		||||
python
 | 
			
		||||
复制
 | 
			
		||||
from transformers import AutoTokenizer
 | 
			
		||||
 | 
			
		||||
# 加载 Qwen 的分词器(以 Qwen-7B 为例)
 | 
			
		||||
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B", trust_remote_code=True)
 | 
			
		||||
 | 
			
		||||
text = "你好,Qwen模型!"
 | 
			
		||||
tokens = tokenizer.tokenize(text)  # 查看分词结果
 | 
			
		||||
token_count = len(tokenizer.encode(text, add_special_tokens=False))
 | 
			
		||||
 | 
			
		||||
print("分词结果:", tokens)
 | 
			
		||||
print("Token数量:", token_count)
 | 
			
		||||
常见问题
 | 
			
		||||
为什么需要 trust_remote_code=True?
 | 
			
		||||
Qwen 的分词器是自定义实现的(非 Hugging Face 原生),此参数允许从模型仓库加载运行代码。
 | 
			
		||||
 | 
			
		||||
其他语言的 Token 计算?
 | 
			
		||||
Qwen 对非英语(如中文、日文)的分词效率较高,但仍需用其原生分词器统计。
 | 
			
		||||
 | 
			
		||||
与 tiktoken 的速度对比?
 | 
			
		||||
tiktoken 是纯 Python 实现,速度较快;Qwen 的分词器基于 Hugging Face,可能稍慢但对齐模型需求。
 | 
			
		||||
 | 
			
		||||
总结
 | 
			
		||||
禁止混用:tiktoken ≠ Qwen 分词器。
 | 
			
		||||
 | 
			
		||||
始终使用模型配套工具:Qwen 需通过 transformers 加载其官方分词器。
 | 
			
		||||
 | 
			
		||||
中文场景特别注意:Qwen 对中文的分词更高效,直接使用可避免偏差。
 | 
			
		||||
 | 
			
		||||
如果需要验证分词规则,可通过 tokenizer.vocab 查看词表内容(但注意词表通常较大)。`
 | 
			
		||||
 | 
			
		||||
const chunks = getChunks(str2);
 | 
			
		||||
console.log(chunks);
 | 
			
		||||
							
								
								
									
										37
									
								
								src/test/ollama-knowledge.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								src/test/ollama-knowledge.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,37 @@
 | 
			
		||||
import { Knowledge } from '../provider/knowledge/knowledge.ts';
 | 
			
		||||
import fs from 'fs';
 | 
			
		||||
import dotenv from 'dotenv';
 | 
			
		||||
 | 
			
		||||
dotenv.config();
 | 
			
		||||
const knowledge = new Knowledge({
 | 
			
		||||
  embeddingModel: 'bge-m3:latest',
 | 
			
		||||
  baseURL: 'https://ollama.xiongxiao.me/v1',
 | 
			
		||||
  model: 'qwq:latest',
 | 
			
		||||
  apiKey: process.env.OLLAMA_API_KEY,
 | 
			
		||||
});
 | 
			
		||||
 | 
			
		||||
const main = async () => {
 | 
			
		||||
  const res = await knowledge.generateEmbeddingCore('Hello world this is a test 你好沙盒 very big');
 | 
			
		||||
  fs.writeFileSync('docs/embedding.json', JSON.stringify(res, null, 2));
 | 
			
		||||
  console.log(res);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
main();
 | 
			
		||||
 | 
			
		||||
const main2 = async () => {
 | 
			
		||||
  const text1 = 'Hello, world! this is a test';
 | 
			
		||||
  const text2 = 'Hello, world! this is a test 2';
 | 
			
		||||
  const text3 = 'Hello, world! this is a test 3';
 | 
			
		||||
  const text4 = 'Hello, world! this is a test 4';
 | 
			
		||||
  const text5 = 'Hello, world! this is a test 5';
 | 
			
		||||
  const text6 = 'Hello, world! this is a test 6';
 | 
			
		||||
  const text7 = 'Hello, world! this is a test 7';
 | 
			
		||||
  const text8 = 'Hello, world! this is a test 8';
 | 
			
		||||
  const text9 = 'Hello, world! this is a test 9';
 | 
			
		||||
  const text10 = 'Hello, world! this is a test 10';
 | 
			
		||||
  const res = await knowledge.generateEmbeddingCore([text1, text2, text3, text4, text5, text6, text7, text8, text9, text10]);
 | 
			
		||||
  fs.writeFileSync('docs/embedding2.json', JSON.stringify(res, null, 2));
 | 
			
		||||
  console.log(res);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// main2();
 | 
			
		||||
							
								
								
									
										86
									
								
								src/test/ollama.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										86
									
								
								src/test/ollama.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,86 @@
 | 
			
		||||
import { Ollama } from '../provider/chat-adapter/ollama.ts';
 | 
			
		||||
import util from 'util';
 | 
			
		||||
const chat = new Ollama({
 | 
			
		||||
  baseURL: 'https://ollama.xiongxiao.me/v1',
 | 
			
		||||
  apiKey: 'xiongxiao2233',
 | 
			
		||||
  model: 'qwq:latest',
 | 
			
		||||
});
 | 
			
		||||
 | 
			
		||||
// chat.chat([{ role: 'user', content: 'Hello, world!' }]);
 | 
			
		||||
 | 
			
		||||
const main = async () => {
 | 
			
		||||
  const res = await chat.test();
 | 
			
		||||
  console.log(util.inspect(res, { depth: null, colors: true }));
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// main();
 | 
			
		||||
 | 
			
		||||
const getJson = async () => {
 | 
			
		||||
  const res = await chat.chat(
 | 
			
		||||
    [
 | 
			
		||||
      { role: 'system', content: '把发送的数据,返回给我对应的json,只处理完发送的数据。如果发送了多个,给我一个数组' },
 | 
			
		||||
      // { role: 'user', content: '{"name":"John","age":30}' },
 | 
			
		||||
      { role: 'user', content: 'name: 张三' },
 | 
			
		||||
      { role: 'user', content: 'name: 李四, age: 18' },
 | 
			
		||||
    ],
 | 
			
		||||
    {
 | 
			
		||||
      response_format: {
 | 
			
		||||
        type: 'json_schema',
 | 
			
		||||
        json_schema: {
 | 
			
		||||
          name: 'user',
 | 
			
		||||
          description: '用户信息',
 | 
			
		||||
          schema: {
 | 
			
		||||
            type: 'object',
 | 
			
		||||
            // properties: {
 | 
			
		||||
            //   name: { type: 'string' },
 | 
			
		||||
            //   // age: { type: 'number' },
 | 
			
		||||
            // },
 | 
			
		||||
            // // required: ['name', 'age'],
 | 
			
		||||
            // required: ['name'],
 | 
			
		||||
            properties: {
 | 
			
		||||
              name: { type: 'string' },
 | 
			
		||||
              age: { type: 'number' },
 | 
			
		||||
            },
 | 
			
		||||
            required: ['name', 'age'],
 | 
			
		||||
          },
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
      n: 10,
 | 
			
		||||
    },
 | 
			
		||||
  );
 | 
			
		||||
  console.log(util.inspect(res, { depth: null, colors: true }));
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// getJson();
 | 
			
		||||
 | 
			
		||||
const createChat1 = async () => {
 | 
			
		||||
  const res = await chat.chat(
 | 
			
		||||
    [
 | 
			
		||||
      { role: 'user', content: 'a=1, b=2, c=3' },
 | 
			
		||||
      { role: 'user', content: 'a+b+c=?' },
 | 
			
		||||
      { role: 'assistant', content: '给定的值为 \\( a = 1 \\), \\( b = 2 \\), \\( c = 3 \\)。\n' + '\n' + '因此,\\( a + b + c = 1 + 2 + 3 = 6 \\)。' },
 | 
			
		||||
      { role: 'user', content: 'a+b+c+4=?' },
 | 
			
		||||
    ],
 | 
			
		||||
    {
 | 
			
		||||
      model: 'qwen2.5:7b',
 | 
			
		||||
    },
 | 
			
		||||
  );
 | 
			
		||||
  console.log(util.inspect(res, { depth: null, colors: true }));
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
//  createChat1();
 | 
			
		||||
 | 
			
		||||
const getTags = async () => {
 | 
			
		||||
  const res = await chat.listModels();
 | 
			
		||||
  console.log(util.inspect(res, { depth: null, colors: true }));
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// getTags();
 | 
			
		||||
 | 
			
		||||
const getRunModels = async () => {
 | 
			
		||||
  const res = await chat.listRunModels();
 | 
			
		||||
  console.log('current', new Date().toISOString());
 | 
			
		||||
  console.log(util.inspect(res, { depth: null, colors: true }));
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// getRunModels();
 | 
			
		||||
							
								
								
									
										15
									
								
								src/test/siliconflow/get.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								src/test/siliconflow/get.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,15 @@
 | 
			
		||||
import { SiliconFlow } from '../../provider/chat-adapter/siliconflow.ts';
 | 
			
		||||
import dotenv from 'dotenv';
 | 
			
		||||
 | 
			
		||||
dotenv.config();
 | 
			
		||||
const siliconflow = new SiliconFlow({
 | 
			
		||||
  apiKey: process.env.SILICONFLOW_API_KEY,
 | 
			
		||||
  model: 'Qwen/Qwen2-7B-Instruct',
 | 
			
		||||
});
 | 
			
		||||
 | 
			
		||||
const main = async () => {
 | 
			
		||||
  const usage = await siliconflow.getUsage();
 | 
			
		||||
  console.log(usage);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
main();
 | 
			
		||||
							
								
								
									
										9
									
								
								vite.config.mjs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								vite.config.mjs
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,9 @@
 | 
			
		||||
import { defineConfig } from 'vite';
 | 
			
		||||
import basicSsl from '@vitejs/plugin-basic-ssl';
 | 
			
		||||
 | 
			
		||||
export default defineConfig({
 | 
			
		||||
  plugins: [basicSsl()],
 | 
			
		||||
  server: {
 | 
			
		||||
    port: 3000,
 | 
			
		||||
  },
 | 
			
		||||
});
 | 
			
		||||
		Reference in New Issue
	
	Block a user