From bcdbc4fb8aefef387aadcf07646664a2d0d3f92d Mon Sep 17 00:00:00 2001 From: orenzhang Date: Fri, 28 Feb 2025 17:47:27 +0800 Subject: [PATCH] i18n(zh-cn): add chinese translation --- src/lib/i18n/locales/zh-CN/translation.json | 60 ++++++++++----------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index c3fe7c09e..f46835953 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "允许通话中的打断语音", "Allowed Endpoints": "允许的端点", "Already have an account?": "已经拥有账号了?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "top_p 的替代方法,旨在确保质量和多样性之间的平衡。参数 p 表示相对于最可能令牌的概率,一个令牌被考虑的最小概率。例如,当 p=0.05 且最可能的令牌概率为 0.9 时,概率值小于 0.045 的词元将被过滤掉。", "Always": "保持", "Amazing": "很棒", "an assistant": "一个助手", @@ -86,7 +86,7 @@ "Archive All Chats": "归档所有对话记录", "Archived Chats": "已归档对话", "archived-chat-export": "导出已归档对话", - "Are you sure you want to clear all memories? This action cannot be undone.": "", + "Are you sure you want to clear all memories? This action cannot be undone.": "是否确认清除所有记忆?清除后无法还原", "Are you sure you want to delete this channel?": "是否确认删除此频道?", "Are you sure you want to delete this message?": "是否确认删除此消息?", "Are you sure you want to unarchive all archived chats?": "是否确认取消所有已归档的对话?", @@ -129,7 +129,7 @@ "Bocha Search API Key": "Bocha Search API 密钥", "Brave Search API Key": "Brave Search API 密钥", "By {{name}}": "由 {{name}} 提供", - "Bypass Embedding and Retrieval": "", + "Bypass Embedding and Retrieval": "绕过嵌入和检索", "Bypass SSL verification for Websites": "绕过网站的 SSL 验证", "Calendar": "日历", "Call": "呼叫", @@ -163,7 +163,7 @@ "Ciphers": "加密算法 (Ciphers)", "Citation": "引文", "Clear memory": "清除记忆", - "Clear Memory": "", + "Clear Memory": "清除记忆", "click here": "点击此处", "Click here for filter guides.": "点击此处查看 filter 指南。", "Click here for help.": "点击这里获取帮助。", @@ -208,19 +208,19 @@ "Confirm your new password": "确认新密码", "Connect to your own OpenAI compatible API endpoints.": "连接到你自己的与 OpenAI 兼容的 API 接口端点。", "Connections": "外部连接", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "约束推理模型的推理努力程度。仅适用于支持推理努力控制的特定提供商的推理模型。", "Contact Admin for WebUI Access": "请联系管理员以获取访问权限", "Content": "内容", - "Content Extraction Engine": "", + "Content Extraction Engine": "内容提取引擎", "Context Length": "上下文长度", "Continue Response": "继续生成", "Continue with {{provider}}": "使用 {{provider}} 继续", "Continue with Email": "使用邮箱登录", "Continue with LDAP": "使用 LDAP 登录", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "控制消息文本如何拆分以用于 TTS 请求。“Punctuation”拆分为句子,“paragraphs”拆分为段落,“none”将消息保留为单个字符串。", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "控制生成文本中标记序列的重复度。较高的值(例如1.5)将更强烈地惩罚重复,而较低的值(例如1.1)则更为宽松。当值为1时,此功能将被禁用。", "Controls": "对话高级设置", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "控制输出文本中连贯性和多样性之间的平衡。较低的值将产生更加专注和连贯的文本。", "Copied": "已复制", "Copied shared chat URL to clipboard!": "已复制此对话分享链接至剪贴板!", "Copied to clipboard": "已复制到剪贴板", @@ -248,7 +248,7 @@ "Current Model": "当前模型", "Current Password": "当前密码", "Custom": "自定义", - "Danger Zone": "", + "Danger Zone": "危险区域", "Dark": "暗色", "Database": "数据库", "December": "十二月", @@ -346,7 +346,7 @@ "ElevenLabs": "ElevenLabs", "Email": "电子邮箱", "Embark on adventures": "踏上冒险之旅", - "Embedding": "", + "Embedding": "嵌入", "Embedding Batch Size": "嵌入层批处理大小 (Embedding Batch Size)", "Embedding Model": "语义向量模型", "Embedding Model Engine": "语义向量模型引擎", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "启用内存锁定(mlock)以防止模型数据被交换出RAM。此选项将模型的工作集页面锁定在RAM中,确保它们不会被交换到磁盘。这可以通过避免页面错误和确保快速数据访问来帮助维持性能。", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "启用内存映射(mmap)以加载模型数据。此选项允许系统通过将磁盘文件视为在RAM中来使用磁盘存储作为RAM的扩展。这可以通过更快的数据访问来提高模型性能。然而,它可能无法在所有系统上正常工作,并且可能会消耗大量磁盘空间。", "Enable Message Rating": "启用回复评价", - "Enable Mirostat sampling for controlling perplexity.": "", + "Enable Mirostat sampling for controlling perplexity.": "启用Mirostat采样以控制困惑度", "Enable New Sign Ups": "允许新用户注册", "Enabled": "启用", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "确保您的 CSV 文件按以下顺序包含 4 列: 姓名、电子邮箱、密码、角色。", @@ -566,12 +566,12 @@ "Include": "包括", "Include `--api-auth` flag when running stable-diffusion-webui": "运行 stable-diffusion-webui 时包含 `--api-auth` 参数", "Include `--api` flag when running stable-diffusion-webui": "运行 stable-diffusion-webui 时包含 `--api` 参数", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "影响算法对生成文本反馈的响应速度。较低的学习率将导致调整更慢,而较高的学习率将使算法反应更灵敏。", "Info": "信息", "Input commands": "输入命令", "Install from Github URL": "从 Github URL 安装", "Instant Auto-Send After Voice Transcription": "语音转录文字后即时自动发送", - "Integration": "", + "Integration": "集成", "Interface": "界面", "Invalid file format.": "无效文件格式。", "Invalid Tag": "无效标签", @@ -619,7 +619,7 @@ "Listening...": "正在倾听...", "Llama.cpp": "Llama.cpp", "LLMs can make mistakes. Verify important information.": "大语言模型可能会生成误导性错误信息,请对关键信息加以验证。", - "Loader": "", + "Loader": "加载器", "Loading Kokoro.js...": "载入 Kokoro.js...", "Local": "本地", "Local Models": "本地模型", @@ -697,7 +697,7 @@ "No HTML, CSS, or JavaScript content found.": "未找到 HTML、CSS 或 JavaScript 内容。", "No inference engine with management support found": "未找到支持管理的推理引擎", "No knowledge found": "未找到知识", - "No memories to clear": "", + "No memories to clear": "记忆为空,无须清理", "No model IDs": "没有模型 ID", "No models found": "未找到任何模型", "No models selected": "未选择任何模型", @@ -809,7 +809,7 @@ "Reasoning Effort": "推理努力", "Record voice": "录音", "Redirecting you to Open WebUI Community": "正在将您重定向到 OpenWebUI 社区", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "降低生成无意义内容的概率。较高的值(如100)将产生更多样化的回答,而较低的值(如10)则更加保守。", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "使用\"User\" (用户) 来指代自己(例如:“User 正在学习西班牙语”)", "References from": "来自", "Refused when it shouldn't have": "无理拒绝", @@ -835,7 +835,7 @@ "Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "无法激活回复时发送通知。请检查浏览器设置,并授予必要的访问权限。", "Response splitting": "拆分回复", "Result": "结果", - "Retrieval": "", + "Retrieval": "检索", "Retrieval Query Generation": "检索查询生成", "Rich Text Input for Chat": "对话富文本输入", "RK": "排名", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "设置用于计算的工作线程数量。该选项可控制并发处理传入请求的线程数量。增加该值可以提高高并发工作负载下的性能,但也可能消耗更多的 CPU 资源。", "Set Voice": "设置音色", "Set whisper model": "设置 whisper 模型", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", - "Sets how far back for the model to look back to prevent repetition.": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", - "Sets the size of the context window used to generate the next token.": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "对至少出现过一次的标记设置固定偏置值。较高的值(例如1.5)将更强烈地惩罚重复,而较低的值(例如0.9)则更为宽松。当值为0时,此功能将被禁用。", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "根据标记出现的次数,设置一个缩放偏置值来惩罚重复。较高的值(例如1.5)将更强烈地惩罚重复,而较低的值(例如0.9)则更为宽松。当值为0时,此功能将被禁用。", + "Sets how far back for the model to look back to prevent repetition.": "设置模型回溯的范围,以防止重复。", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "设置用于生成的随机数种子。将其设置为特定数字将使模型针对同一提示生成相同的文本。", + "Sets the size of the context window used to generate the next token.": "设置用于生成下一个 Token 的上下文窗口的大小。", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "设置要使用的停止序列。遇到这种模式时,大语言模型将停止生成文本并返回。可以通过在模型文件中指定多个单独的停止参数来设置多个停止模式。", "Settings": "设置", "Settings saved successfully!": "设置已成功保存!", @@ -964,7 +964,7 @@ "System Prompt": "系统提示词 (System Prompt)", "Tags Generation": "标签生成", "Tags Generation Prompt": "标签生成提示词", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "无尾采样用于减少输出中出现概率较小的 Token 的影响。较高的值(例如 2.0)将进一步减少影响,而值 1.0 则禁用此设置。", "Talk to model": "与模型交谈", "Tap to interrupt": "点击以中断", "Tasks": "任务", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "感谢您的反馈!", "The Application Account DN you bind with for search": "您所绑定用于搜索的 Application Account DN", "The base to search for users": "搜索用户的 Base", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "批处理大小决定了一次可以处理多少个文本请求。更高的批处理大小可以提高模型的性能和速度,但也需要更多内存。", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "本插件的背后开发者是社区中热情的志愿者。如果此插件有帮助到您,烦请考虑一下为它的开发做出贡献。", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "排行榜基于 Elo 评级系统并实时更新。", "The LDAP attribute that maps to the mail that users use to sign in.": "映射到用户登录时使用的邮箱的 LDAP 属性。", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "最大文件大小(MB)。如果文件大小超过此限制,则无法上传该文件。", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "在单次对话中可以使用的最大文件数。如果文件数超过此限制,则文件不会上传。", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "分值应介于 0.0(0%)和 1.0(100%)之间。", - "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "模型的温度。增加温度将使模型的回答更有创意。", "Theme": "主题", "Thinking...": "正在思考...", "This action cannot be undone. Do you wish to continue?": "此操作无法撤销。是否确认继续?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "这将确保您的宝贵对话被安全地保存到后台数据库中。感谢!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "这是一个实验功能,可能不会如预期那样工作,而且可能随时发生变化。", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "此选项控制刷新上下文时保留多少 Token。例如,如果设置为 2,则将保留对话上下文的最后 2 个 Token。保留上下文有助于保持对话的连续性,但可能会降低响应新主题的能力。", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "此项用于设置模型在其响应中可以生成的最大 Token 数。增加此限制可让模型提供更长的答案,但也可能增加生成无用或不相关内容的可能性。", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "此选项将会删除文件集中所有文件,并用新上传的文件替换。", "This response was generated by \"{{model}}\"": "此回复由 \"{{model}}\" 生成", "This will delete": "这将删除", @@ -1132,7 +1132,7 @@ "Why?": "为什么?", "Widescreen Mode": "宽屏模式", "Won": "获胜", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "与 top-k 配合使用。较高的值(例如 0.95)将产生更加多样化的文本,而较低的值(例如 0.5)将产生更加集中和保守的文本。", "Workspace": "工作空间", "Workspace Permissions": "工作空间权限", "Write": "写作", @@ -1155,6 +1155,6 @@ "Your account status is currently pending activation.": "您的账号当前状态为待激活。", "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "您的全部捐款将直接给到插件开发者,Open WebUI 不会收取任何比例。但众筹平台可能会有服务费、抽成。", "Youtube": "YouTube", - "Youtube Language": "", - "Youtube Proxy URL": "" + "Youtube Language": "Youtube 语言", + "Youtube Proxy URL": "Youtube 代理 URL" }