|
3 | 3 | "Run": "Execute", |
4 | 4 | "Team Tags Set": "Team tags", |
5 | 5 | "Team_Tags": "Team tags", |
| 6 | + "ai_point_price": "Billing", |
6 | 7 | "ai_settings": "AI Configuration", |
7 | 8 | "all_apps": "All Applications", |
8 | 9 | "app.Version name": "Version Name", |
|
19 | 20 | "auto_execute_tip": "After turning it on, the workflow will be automatically triggered when the user enters the conversation interface. \nExecution order: 1. Dialogue starter; 2. Global variables; 3. Automatic execution.", |
20 | 21 | "auto_save": "Auto save", |
21 | 22 | "chat_debug": "Chat Preview", |
22 | | - "chat_logs": "Conversation Logs", |
| 23 | + "chat_logs": "Logs", |
23 | 24 | "chat_logs_tips": "Logs will record the online, shared, and API (requires chatId) conversation records of this app.", |
| 25 | + "config_ai_model_params": "Click to configure AI model related properties", |
24 | 26 | "config_file_upload": "Click to Configure File Upload Rules", |
25 | 27 | "config_question_guide": "Configuration guess you want to ask", |
26 | 28 | "confirm_copy_app_tip": "The system will create an app with the same configuration for you, but permissions will not be copied. Please confirm!", |
|
68 | 70 | "interval.per_hour": "Every Hour", |
69 | 71 | "intro": "A comprehensive model application orchestration system that offers out-of-the-box data processing and model invocation capabilities. It allows for rapid Dataset construction and workflow orchestration through Flow visualization, enabling complex Dataset scenarios!", |
70 | 72 | "llm_not_support_vision": "This model does not support image recognition", |
71 | | - "llm_use_vision": "Enable Image Recognition", |
| 73 | + "llm_use_vision": "Vision", |
72 | 74 | "llm_use_vision_tip": "After clicking on the model selection, you can see whether the model supports image recognition and the ability to control whether to start image recognition. \nAfter starting image recognition, the model will read the image content in the file link, and if the user question is less than 500 words, it will automatically parse the image in the user question.", |
73 | 75 | "logs_chat_user": "user", |
74 | 76 | "logs_empty": "No logs yet~", |
75 | 77 | "logs_message_total": "Total Messages", |
76 | 78 | "logs_title": "Title", |
| 79 | + "look_ai_point_price": "View all model billing standards", |
77 | 80 | "mark_count": "Number of Marked Answers", |
| 81 | + "max_histories_number": "Max histories", |
78 | 82 | "module.Custom Title Tip": "This title will be displayed during the conversation.", |
79 | 83 | "module.No Modules": "No Plugins Found", |
80 | 84 | "module.type": "\"{{type}}\" type\n{{description}}", |
|
96 | 100 | "plugin_cost_per_times": "{{cost}} points/time", |
97 | 101 | "plugin_dispatch": "Plugin Invocation", |
98 | 102 | "plugin_dispatch_tip": "Adds extra capabilities to the model. The specific plugins to be invoked will be autonomously decided by the model.\nIf a plugin is selected, the Dataset invocation will automatically be treated as a special plugin.", |
99 | | - "publish_channel": "Publish Channel", |
| 103 | + "publish_channel": "Publish", |
100 | 104 | "publish_success": "Publish Successful", |
101 | 105 | "question_guide_tip": "After the conversation, 3 guiding questions will be generated for you.", |
102 | 106 | "saved_success": "Save Successful", |
103 | 107 | "search_app": "Search Application", |
104 | | - "setting_app": "Application Settings", |
105 | | - "setting_plugin": "Plugin Settings", |
| 108 | + "setting_app": "Workflow", |
| 109 | + "setting_plugin": "Workflow", |
| 110 | + "stream_response": "Stream", |
| 111 | + "stream_response_tip": "Turning this switch off forces the model to use non-streaming mode and will not output content directly. \nIn the output of the AI reply, the content output by this model can be obtained for secondary processing.", |
106 | 112 | "template.hard_strict": "Strict Q&A template", |
107 | 113 | "template.hard_strict_des": "Based on the question and answer template, stricter requirements are imposed on the model's answers.", |
108 | 114 | "template.qa_template": "Q&A template", |
|
164 | 170 | "workflow.form_input_description_placeholder": "For example: \nAdd your information", |
165 | 171 | "workflow.form_input_tip": " This module can configure multiple inputs to guide users in entering specific content.", |
166 | 172 | "workflow.input_description_tip": "You can add a description to explain to users what they need to input", |
167 | | - "workflow.read_files": "Document Parsing", |
| 173 | + "workflow.read_files": "Document Parse", |
168 | 174 | "workflow.read_files_result": "Document Parsing Result", |
169 | 175 | "workflow.read_files_result_desc": "Original document text, consisting of file names and document content, separated by hyphens between multiple files.", |
170 | 176 | "workflow.read_files_tip": "Parse the documents uploaded in this round of dialogue and return the corresponding document content", |
|
175 | 181 | "workflow.template.communication": "Communication", |
176 | 182 | "workflow.user_file_input": "File Link", |
177 | 183 | "workflow.user_file_input_desc": "Links to documents and images uploaded by users.", |
178 | | - "workflow.user_select": "User Selection", |
| 184 | + "workflow.user_select": "User Select", |
179 | 185 | "workflow.user_select_tip": "This module can configure multiple options for selection during the dialogue. Different options can lead to different workflow branches." |
180 | 186 | } |
0 commit comments