Skip to content

Commit

Permalink
refactor(i18n): adapt new fields in Kafka connector and source
Browse files Browse the repository at this point in the history
  • Loading branch information
Kinplemelon authored and ysfscream committed Jul 29, 2024
1 parent 7beb337 commit 96355be
Show file tree
Hide file tree
Showing 6 changed files with 9 additions and 0 deletions.
1 change: 1 addition & 0 deletions packages/i18n/lib/enActionsLabel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,7 @@ export const enActionsLabel: Record<string, Record<string, string>> = {
offset_reset_policy: 'Offset Reset Policy',
offset_commit_interval_seconds: 'Offset Commit Interval',
topic: 'Kafka Topic',
group_id: 'Group ID',
},
rabbitmq: {
exchange: 'Exchange',
Expand Down
1 change: 1 addition & 0 deletions packages/i18n/lib/enConnectorsLabel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ export const enConnectorsLabel: Record<string, Record<string, string>> = {
url: 'Server URL',
authentication: 'Authentication',
disable_prepared_statements: 'Disable Prepared Statements',
health_check_topic: 'Health Check Topic',
},
kafka_producer: {
kerberos_principal: 'Kerberos Principal',
Expand Down
3 changes: 3 additions & 0 deletions packages/i18n/lib/enIntegrationDesc.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ export const enIntegrationDesc: Record<string, Record<string, string>> = {
'Applicable when buffer mode is set to <code>memory</code><br/>EMQX will drop old buffered messages under high memory pressure. The high memory threshold is defined in config <code>sysmon.os.sysmem_high_watermark</code>. NOTE: This config only works on Linux.',
disable_prepared_statements:
'Disables the usage of prepared statements in the connections. Some endpoints, like PGBouncer or Supabase in Transaction mode, do not support session features such as prepared statements. For such connections, this option should be enabled.',
health_check_topic: 'Topic name used exclusively for more accurate health checks.',
},
mqtt: {
bridge_mode:
Expand Down Expand Up @@ -240,6 +241,8 @@ export const enIntegrationDesc: Record<string, Record<string, string>> = {
offset_commit_interval_seconds:
'Defines the time interval between two offset commit requests sent for each consumer group.',
topic: 'Kafka topic',
group_id:
'Consumer group identifier to be used for this source. If omitted, one based off the source name will be automatically generated.',
},
mongodb: {
collection: 'The collection where data will be stored into',
Expand Down
1 change: 1 addition & 0 deletions packages/i18n/lib/zhActionsLabel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,7 @@ export const zhActionsLabel: Record<string, Record<string, string>> = {
offset_reset_policy: '偏移重置策略',
offset_commit_interval_seconds: '偏移提交间隔',
topic: 'Kafka 主题名称',
group_id: '消费组 ID',
},
rabbitmq: {
exchange: '交换机',
Expand Down
1 change: 1 addition & 0 deletions packages/i18n/lib/zhConnectorsLabel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ export const zhConnectorsLabel: Record<string, Record<string, string>> = {
url: '服务器地址',
authentication: '认证',
disable_prepared_statements: '禁用预处理语句',
health_check_topic: '健康检查主题',
},
kafka_producer: {
kerberos_principal: 'Kerberos Principal',
Expand Down
2 changes: 2 additions & 0 deletions packages/i18n/lib/zhIntegrationDesc.ts
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ export const zhIntegrationDesc: Record<string, Record<string, string>> = {
'在缓冲区模式设置为 <code>memory</code> 时适用\n在内存压力较大时,EMQX 将删除旧的缓冲消息。高内存阈值在配置<code>sysmon.os.sysmem_high_watermark</code>中定义。注意:此配置仅适用于 Linux。',
disable_prepared_statements:
'在连接中禁用预处理语句。某些端点(如事务模式下的 PGBouncer 或 Supabase)不支持会话功能(如预处理语句)。对于此类连接,应启用此选项。',
health_check_topic: '专用于精确检查健康状态的主题名称。',
},
mqtt: {
bridge_mode:
Expand Down Expand Up @@ -219,6 +220,7 @@ export const zhIntegrationDesc: Record<string, Record<string, string>> = {
'设置每次从 Kafka 拉取数据的字节数。如该配置小于 Kafka 消息的大小,可能会影响消费性能。',
offset_commit_interval_seconds: '指定 Kafka 消费组偏移量提交的时间间隔。',
topic: 'Kafka 主题名称',
group_id: '用于此 Source 的消费者组 ID。如果未指定,系统将自动生成一个基于 Source 名称的 ID。',
},
mongodb: {
collection: '数据将被存储到的集合',
Expand Down

0 comments on commit 96355be

Please sign in to comment.