Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: 修复tokens描述错误的缺陷 #1192

Merged
merged 1 commit into from
Sep 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion apps/dataset/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
path('dataset/<str:dataset_id>/document/_bach', views.Document.Batch.as_view()),
path('dataset/<str:dataset_id>/document/batch_hit_handling', views.Document.BatchEditHitHandling.as_view()),
path('dataset/<str:dataset_id>/document/<int:current_page>/<int:page_size>', views.Document.Page.as_view()),
path('dataset/<str:dataset_id>/document/batch_refresh', views.Document.BatchRefresh.as_view()),
path('dataset/<str:dataset_id>/document/<str:document_id>', views.Document.Operate.as_view(),
name="document_operate"),
path('dataset/document/split', views.Document.Split.as_view(),
Expand All @@ -34,7 +35,6 @@
name="document_export"),
path('dataset/<str:dataset_id>/document/<str:document_id>/sync', views.Document.SyncWeb.as_view()),
path('dataset/<str:dataset_id>/document/<str:document_id>/refresh', views.Document.Refresh.as_view()),
path('dataset/<str:dataset_id>/document/batch_refresh', views.Document.BatchRefresh.as_view()),
path('dataset/<str:dataset_id>/document/<str:document_id>/paragraph', views.Paragraph.as_view()),
path(
'dataset/<str:dataset_id>/document/<str:document_id>/paragraph/migrate/dataset/<str:target_dataset_id>/document/<str:target_document_id>',
Expand Down
2 changes: 1 addition & 1 deletion apps/dataset/views/document.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ def put(self, request: Request, dataset_id: str, document_id: str):
class BatchRefresh(APIView):
authentication_classes = [TokenAuth]

@action(methods=['POST'], detail=False)
@action(methods=['PUT'], detail=False)
@swagger_auto_schema(operation_summary="批量刷新文档向量库",
operation_id="批量刷新文档向量库",
request_body=
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class BedrockLLMModelParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class AzureLLMModelParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class DeepSeekLLMModelParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class GeminiLLMModelParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class KimiLLMModelParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ class OllamaLLMModelParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class OpenAILLMModelParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class QwenModelParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=2048,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class VLLMModelParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class VolcanicEngineLLMModelParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class WenxinLLMModelParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=2,
_max=2048,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class XunFeiLLMModelGeneralParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=4096,
_min=1,
_max=4096,
Expand All @@ -42,7 +42,7 @@ class XunFeiLLMModelProParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=4096,
_min=1,
_max=8192,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class XinferenceLLMModelParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class ZhiPuLLMModelParams(BaseForm):
precision=2)

max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,
Expand Down
89 changes: 52 additions & 37 deletions ui/src/api/document.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import { get, post, del, put, exportExcel } from '@/request/index'
import type { Ref } from 'vue'
import type { KeyValue } from '@/api/type/common'
import type { pageRequest } from '@/api/type/common'

const prefix = '/dataset'

/**
Expand All @@ -26,14 +27,14 @@ const listSplitPattern: (

/**
* 文档分页列表
* @param 参数 dataset_id,
* @param 参数 dataset_id,
* page {
"current_page": "string",
"page_size": "string",
}
* param {
"name": "string",
}
"current_page": "string",
"page_size": "string",
}
* param {
"name": "string",
}
*/

const getDocument: (
Expand All @@ -58,22 +59,22 @@ const getAllDocument: (dataset_id: string, loading?: Ref<boolean>) => Promise<Re

/**
* 创建批量文档
* @param 参数
* @param 参数
* {
"name": "string",
"paragraphs": [
{
"content": "string",
"title": "string",
"problem_list": [
{
"id": "string",
"content": "string"
}
]
}
]
}
"name": "string",
"paragraphs": [
{
"content": "string",
"title": "string",
"problem_list": [
{
"id": "string",
"content": "string"
}
]
}
]
}
*/
const postDocument: (
dataset_id: string,
Expand All @@ -85,13 +86,13 @@ const postDocument: (

/**
* 修改文档
* @param 参数
* dataset_id, document_id,
* @param 参数
* dataset_id, document_id,
* {
"name": "string",
"is_active": true,
"meta": {}
}
"name": "string",
"is_active": true,
"meta": {}
}
*/
const putDocument: (
dataset_id: string,
Expand Down Expand Up @@ -124,6 +125,19 @@ const delMulDocument: (
) => Promise<Result<boolean>> = (dataset_id, data, loading) => {
return del(`${prefix}/${dataset_id}/document/_bach`, undefined, { id_list: data }, loading)
}

const batchRefresh: (
dataset_id: string,
data: any,
loading?: Ref<boolean>
) => Promise<Result<boolean>> = (dataset_id, data, loading) => {
return put(
`${prefix}/${dataset_id}/document/batch_refresh`,
{ id_list: data },
undefined,
loading
)
}
/**
* 文档详情
* @param 参数 dataset_id
Expand Down Expand Up @@ -180,14 +194,14 @@ const delMulSyncDocument: (

/**
* 创建Web站点文档
* @param 参数
* @param 参数
* {
"source_url_list": [
"string"
],
"selector": "string"
"source_url_list": [
"string"
],
"selector": "string"
}
}
}
*/
const postWebDocument: (
dataset_id: string,
Expand All @@ -199,9 +213,9 @@ const postWebDocument: (

/**
* 导入QA文档
* @param 参数
* @param 参数
* file
}
}
*/
const postQADocument: (
dataset_id: string,
Expand Down Expand Up @@ -323,5 +337,6 @@ export default {
exportTableTemplate,
postQADocument,
postTableDocument,
exportDocument
exportDocument,
batchRefresh
}
13 changes: 12 additions & 1 deletion ui/src/views/application/component/AccessSettingDrawer.vue
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,10 @@
</el-input>
</el-form-item>
</template>
<div v-if="configType === 'wechat'" class="flex align-center" style="margin-bottom: 8px">
<span class="el-form-item__label">是否是订阅号</span>
<el-switch v-if="configType === 'wechat'" v-model="form[configType].is_personal" />
</div>

<h4 class="title-decoration-1 mb-16">回调地址</h4>
<el-form-item label="URL" prop="callback_url">
Expand Down Expand Up @@ -102,7 +106,14 @@ const {
} = route as any

const form = reactive<any>({
wechat: { app_id: '', app_secret: '', token: '', encoding_aes_key: '', callback_url: '' },
wechat: {
app_id: '',
app_secret: '',
token: '',
encoding_aes_key: '',
is_personal: false,
callback_url: ''
},
dingtalk: { client_id: '', client_secret: '', callback_url: '' },
wecom: {
app_id: '',
Expand Down
16 changes: 16 additions & 0 deletions ui/src/views/document/index.vue
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@
<el-button @click="openDatasetDialog()" :disabled="multipleSelection.length === 0">
迁移
</el-button>
<el-button @click="batchRefresh" :disabled="multipleSelection.length === 0">
重新向量化
</el-button>
<el-button @click="openBatchEditDocument" :disabled="multipleSelection.length === 0">
设置
</el-button>
Expand Down Expand Up @@ -538,6 +541,19 @@ function deleteMulDocument() {
})
}

function batchRefresh() {
const arr: string[] = []
multipleSelection.value.map((v) => {
if (v) {
arr.push(v.id)
}
})
documentApi.batchRefresh(id, arr, loading).then(() => {
MsgSuccess('批量重新向量化成功')
multipleTableRef.value?.clearSelection()
})
}

function deleteDocument(row: any) {
MsgConfirm(
`是否删除文档:${row.name} ?`,
Expand Down
Loading