-
Notifications
You must be signed in to change notification settings - Fork 4.6k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Support filter restrict network interface (#14638)
- Loading branch information
1 parent
5a550dd
commit 2b99451
Showing
7 changed files
with
128 additions
and
80 deletions.
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -200,37 +200,38 @@ common.properties配置文件目前主要是配置hadoop/s3/yarn/applicationId | |
|
||
默认配置如下: | ||
|
||
| 参数 | 默认值 | 描述 | | ||
|--|--|--| | ||
|data.basedir.path | /tmp/dolphinscheduler | 本地工作目录,用于存放临时文件| | ||
|resource.storage.type | NONE | 资源文件存储类型: HDFS,S3,OSS,GCS,ABS,NONE| | ||
|resource.upload.path | /dolphinscheduler | 资源文件存储路径| | ||
|aws.access.key.id | minioadmin | S3 access key| | ||
|aws.secret.access.key | minioadmin | S3 secret access key| | ||
|aws.region | us-east-1 | S3 区域| | ||
|aws.s3.endpoint | http://minio:9000 | S3 endpoint地址| | ||
|hdfs.root.user | hdfs | 如果存储类型为HDFS,需要配置拥有对应操作权限的用户| | ||
|fs.defaultFS | hdfs://mycluster:8020 | 请求地址如果resource.storage.type=S3,该值类似为: s3a://dolphinscheduler. 如果resource.storage.type=HDFS, 如果 hadoop 配置了 HA,需要复制core-site.xml 和 hdfs-site.xml 文件到conf目录| | ||
|hadoop.security.authentication.startup.state | false | hadoop是否开启kerberos权限| | ||
|java.security.krb5.conf.path | /opt/krb5.conf | kerberos配置目录| | ||
|login.user.keytab.username | [email protected] | kerberos登录用户| | ||
|login.user.keytab.path | /opt/hdfs.headless.keytab | kerberos登录用户keytab| | ||
|kerberos.expire.time | 2 | kerberos过期时间,整数,单位为小时| | ||
|yarn.resourcemanager.ha.rm.ids | 192.168.xx.xx,192.168.xx.xx | yarn resourcemanager 地址, 如果resourcemanager开启了HA, 输入HA的IP地址(以逗号分隔),如果resourcemanager为单节点, 该值为空即可| | ||
|yarn.application.status.address | http://ds1:8088/ws/v1/cluster/apps/%s | 如果resourcemanager开启了HA或者没有使用resourcemanager,保持默认值即可. 如果resourcemanager为单节点,你需要将ds1 配置为resourcemanager对应的hostname| | ||
|development.state | false | 是否处于开发模式| | ||
|dolphin.scheduler.network.interface.preferred | NONE | 网卡名称| | ||
|dolphin.scheduler.network.priority.strategy | default | ip获取策略 default优先获取内网| | ||
|resource.manager.httpaddress.port | 8088 | resource manager的端口| | ||
|yarn.job.history.status.address | http://ds1:19888/ws/v1/history/mapreduce/jobs/%s | yarn的作业历史状态URL| | ||
|datasource.encryption.enable | false | 是否启用datasource 加密| | ||
|datasource.encryption.salt | !@#$%^&* | datasource加密使用的salt| | ||
|data-quality.jar.name | dolphinscheduler-data-quality-dev-SNAPSHOT.jar | 配置数据质量使用的jar包| | ||
|support.hive.oneSession | false | 设置hive SQL是否在同一个session中执行| | ||
|sudo.enable | true | 是否开启sudo| | ||
|alert.rpc.port | 50052 | Alert Server的RPC端口| | ||
|zeppelin.rest.url | http://localhost:8080 | zeppelin RESTful API 接口地址| | ||
|appId.collect | log | 收集applicationId方式, 如果用aop方法,将配置log替换为aop,并将`bin/env/dolphinscheduler_env.sh`自动收集applicationId相关环境变量配置的注释取消掉,注意:aop不支持远程主机提交yarn作业的方式比如Beeline客户端提交,且如果用户环境覆盖了dolphinscheduler_env.sh收集applicationId相关环境变量配置,aop方法会失效| | ||
| 参数 | 默认值 | 描述 | | ||
|-----------------------------------------------|--|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | ||
| data.basedir.path | /tmp/dolphinscheduler | 本地工作目录,用于存放临时文件 | | ||
| resource.storage.type | NONE | 资源文件存储类型: HDFS,S3,OSS,GCS,ABS,NONE | | ||
| resource.upload.path | /dolphinscheduler | 资源文件存储路径 | | ||
| aws.access.key.id | minioadmin | S3 access key | | ||
| aws.secret.access.key | minioadmin | S3 secret access key | | ||
| aws.region | us-east-1 | S3 区域 | | ||
| aws.s3.endpoint | http://minio:9000 | S3 endpoint地址 | | ||
| hdfs.root.user | hdfs | 如果存储类型为HDFS,需要配置拥有对应操作权限的用户 | | ||
| fs.defaultFS | hdfs://mycluster:8020 | 请求地址如果resource.storage.type=S3,该值类似为: s3a://dolphinscheduler. 如果resource.storage.type=HDFS, 如果 hadoop 配置了 HA,需要复制core-site.xml 和 hdfs-site.xml 文件到conf目录 | | ||
| hadoop.security.authentication.startup.state | false | hadoop是否开启kerberos权限 | | ||
| java.security.krb5.conf.path | /opt/krb5.conf | kerberos配置目录 | | ||
| login.user.keytab.username | [email protected] | kerberos登录用户 | | ||
| login.user.keytab.path | /opt/hdfs.headless.keytab | kerberos登录用户keytab | | ||
| kerberos.expire.time | 2 | kerberos过期时间,整数,单位为小时 | | ||
| yarn.resourcemanager.ha.rm.ids | 192.168.xx.xx,192.168.xx.xx | yarn resourcemanager 地址, 如果resourcemanager开启了HA, 输入HA的IP地址(以逗号分隔),如果resourcemanager为单节点, 该值为空即可 | | ||
| yarn.application.status.address | http://ds1:8088/ws/v1/cluster/apps/%s | 如果resourcemanager开启了HA或者没有使用resourcemanager,保持默认值即可. 如果resourcemanager为单节点,你需要将ds1 配置为resourcemanager对应的hostname | | ||
| development.state | false | 是否处于开发模式 | | ||
| dolphin.scheduler.network.interface.preferred | NONE | 将会被使用的网卡名称 | | ||
| dolphin.scheduler.network.interface.restrict | NONE | 禁止使用的网卡名称 | | ||
| dolphin.scheduler.network.priority.strategy | default | ip获取策略 default优先获取内网 | | ||
| resource.manager.httpaddress.port | 8088 | resource manager的端口 | | ||
| yarn.job.history.status.address | http://ds1:19888/ws/v1/history/mapreduce/jobs/%s | yarn的作业历史状态URL | | ||
| datasource.encryption.enable | false | 是否启用datasource 加密 | | ||
| datasource.encryption.salt | !@#$%^&* | datasource加密使用的salt | | ||
| data-quality.jar.name | dolphinscheduler-data-quality-dev-SNAPSHOT.jar | 配置数据质量使用的jar包 | | ||
| support.hive.oneSession | false | 设置hive SQL是否在同一个session中执行 | | ||
| sudo.enable | true | 是否开启sudo | | ||
| alert.rpc.port | 50052 | Alert Server的RPC端口 | | ||
| zeppelin.rest.url | http://localhost:8080 | zeppelin RESTful API 接口地址 | | ||
| appId.collect | log | 收集applicationId方式, 如果用aop方法,将配置log替换为aop,并将`bin/env/dolphinscheduler_env.sh`自动收集applicationId相关环境变量配置的注释取消掉,注意:aop不支持远程主机提交yarn作业的方式比如Beeline客户端提交,且如果用户环境覆盖了dolphinscheduler_env.sh收集applicationId相关环境变量配置,aop方法会失效 | | ||
|
||
## Api-server相关配置 | ||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters