在开启Kerberos的情况下,默认两个CDH集群是无法互相访问的,要使两个CDH集群互相访问,可采取以下方式:
1. 相同kdc相同域
2. 不同kdc不同域,并配置跨域互信
注:不同kdc相同域,无法实现跨集群访问
一、相同kdc相同域
第一个CDH集群已经开启kerberos,kdc server hostname是master,域是HADOOP.NET。
第二个CDH集群使用以上的kdc和域,进行以下配置:
1.安装kdc client
# kdc client
yum -y install krb5-libs krb5-workstation
2.拷贝/etc/krb5.conf
3.配置kdc server的主机名映射
vim /etc/hosts
192.168.100.200 master
4.CDH集群启用Kerberos
相同kdc相同域的配置方法,和单个CDH集群开启kerberos一样,可参考《CDH6开启Kerberos验证》
5.验证跨集群访问
# 验证kerberos
[root@node1 cdhInstall]# klist
klist: No credentials cache found (filename: /tmp/krb5cc_0)
[root@node1 cdhInstall]# kinit cloudera-scm/admin
Password for cloudera-scm/admin@HADOOP.NET:
[root@node1 cdhInstall]# klist
Ticket cache: FILE:/tmp/krb5cc_0
Default principal: cloudera-scm/admin@HADOOP.NET
Valid starting Expires Service principal
03/28/2022 16:25:36 03/29/2022 16:25:36 krbtgt/HADOOP.NET@HADOOP.NET
renew until 04/04/2022 16:25:36
# 访问自身集群hdfs
[root@node1 cdhInstall]# hdfs dfs -ls /tmp
Found 1 items
d--------- - hdfs supergroup 0 2022-03-28 16:25 /tmp/.cloudera_health_monitoring_canary_files
# 访问另一个集群hdfs
[root@node1 cdhInstall]# hdfs dfs -ls hdfs://master:8020/tmp
Found 1 items
d--------- - hdfs supergroup 0 2022-03-28 16:25 hdfs://master:8020/tmp/.cloudera_health_monitoring_canary_files
二、不同kdc不同域,并配置跨域互信
第一个CDH集群已经开启kerberos,kdc server hostname:master,域:HADOOP.NET。
第二个CDH集群使用kdc server hostname:node01,域:HADOOP2.NET。
1.配置KDC之间的信任ticket
# HADOOP.NET集群
kadmin.local -q "addprinc -pw admin krbtgt/HADOOP2.NET@HADOOP.NET"
kadmin.local -q "addprinc -pw admin krbtgt/HADOOP.NET@HADOOP2.NET"
# HADOOP2.NET集群
kadmin.local -q "addprinc -pw admin krbtgt/HADOOP2.NET@HADOOP.NET"
kadmin.local -q "addprinc -pw admin krbtgt/HADOOP.NET@HADOOP2.NET"
2.配置hdfs
HDFS->配置->搜索hadoop.security.auth_to_local,设置两个kdc的域
HDFS->配置->搜索hdfs-site,设置dfs.namenode.kerberos.principal.pattern为"*"
重启服务
3.在krb5.conf中配置信任关系
# HADOOP.NET集群
[root@master ~]# cat /etc/krb5.conf
[libdefaults]
default_realm = HADOOP.NET
dns_lookup_kdc = false
dns_lookup_realm = false
ticket_lifetime = 86400
renew_lifetime = 604800
forwardable = true
default_tgs_enctypes = rc4-hmac
default_tkt_enctypes = rc4-hmac
permitted_enctypes = rc4-hmac
udp_preference_limit = 1
kdc_timeout = 3000
[realms]
HADOOP.NET = {
kdc = master
admin_server = master
}
HADOOP2.NET = {
kdc = node01
admin_server = node01
}
[capaths]
HADOOP.NET = {
HADOOP2.NET = .
}
[domain_realm]
.hadoop.net = HADOOP.NET
hadoop.net = HADOOP.NET
.hadoop2.net = HADOOP2.NET
hadoop2.net = HADOOP2.NET
master = HADOOP.NET
node01 = HADOOP2.NET
# HADOOP2.NET集群
[root@node01 ~]# cat /etc/krb5.conf
[libdefaults]
default_realm = HADOOP2.NET
dns_lookup_kdc = false
dns_lookup_realm = false
ticket_lifetime = 86400
renew_lifetime = 604800
forwardable = true
default_tgs_enctypes = rc4-hmac
default_tkt_enctypes = rc4-hmac
permitted_enctypes = rc4-hmac
udp_preference_limit = 1
kdc_timeout = 3000
[realms]
HADOOP2.NET = {
kdc = node01
admin_server = node01
}
HADOOP.NET = {
kdc = master
admin_server = master
}
[capaths]
HADOOP2.NET = {
HADOOP.NET = .
}
[domain_realm]
.hadoop.net = HADOOP.NET
hadoop.net = HADOOP.NET
.hadoop2.net = HADOOP2.NET
hadoop2.net = HADOOP2.NET
master = HADOOP.NET
node01 = HADOOP2.NET
4.重启kdc
systemctl restart krb5kdc
5.验证跨集群访问
# HADOOP.NET集群
[root@master ~]# kinit cloudera-scm/admin
Password for cloudera-scm/admin@HADOOP.NET:
[root@master ~]# hdfs dfs -ls /tmp
Found 1 items
d--------- - hdfs supergroup 0 2022-03-28 19:32 /tmp/.cloudera_health_monitoring_canary_files
[root@master ~]# hdfs dfs -ls hdfs://node01:8020/tmp
Found 1 items
d--------- - hdfs supergroup 0 2022-03-28 19:32 hdfs://node01:8020/tmp/.cloudera_health_monitoring_canary_files
# HADOOP2.NET集群
[root@node01 ~]# kinit cloudera-scm/admin
Password for cloudera-scm/admin@HADOOP2.NET:
[root@node01 ~]# hdfs dfs -ls /tmp
Found 1 items
d--------- - hdfs supergroup 0 2022-03-28 19:31 /tmp/.cloudera_health_monitoring_canary_files
[root@node01 ~]# hdfs dfs -ls hdfs://master:8020/tmp
Found 1 items
d--------- - hdfs supergroup 0 2022-03-28 19:31 hdfs://master:8020/tmp/.cloudera_health_monitoring_canary_files