K8S master 節點 10.10.10.12
k8s Minion節點10.10.10.14
系統centos 7.4
部署操作鏈接地址:文檔:k8s部署手冊.note
鏈接:http://note.youdao.com/noteshare?id=cbaacc7678f024ed6670a5eb803da920&sub=90A090192E4D44F58C79E91A14D960ED
一、master點安裝部署步驟:
1、關閉防火墻,開啟NTP服務
systemctl stop firewalld #停止防火墻 systemctl disable firewalld #禁用防火墻 yum -y install ntp #安裝NTP服務器 ntpdate pool.ntp.org #更新時間同步 systemctl start ntpd #啟動NTP setenforce 0 systemctl enable ntpd#enable #NTP關機開機后不需要在開啟
2、yum安裝相關組件,ETCD可以單獨用一臺服務器部署,也可以和 MASTER合在一塊
yum install epel-release -y #安裝eple 擴展源 yum install kubernetes-master etcd flannel -y
3、相關配置文件配置
1)ETCD配置文件如下所示:
備注:可以將原有配置文件情況,直接復制下面內容即可,但是需要修改配置文件對應的IP地址
[root@localhost ~]# more /etc/etcd/etcd.conf # [member] ETCD_NAME=etcd1 ETCD_DATA_DIR="/data/etcd" #ETCD_WAL_DIR="" #ETCD_SNAPSHOT_COUNT="10000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" ETCD_LISTEN_PEER_URLS="http://10.10.10.12:2380" ETCD_LISTEN_CLIENT_URLS="http://10.10.10.12:2379,http://127.0.0.1:2379" ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" #ETCD_CORS="" # #[cluster] ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.10.10.12:2380" # if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..." ETCD_INITIAL_CLUSTER="etcd1=http://10.10.10.12:2380" #ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_ADVERTISE_CLIENT_URLS="http://10.10.10.12:2379" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_SRV="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" # #[proxy] #ETCD_PROXY="off" #ETCD_PROXY_FAILURE_WAIT="5000" #ETCD_PROXY_REFRESH_INTERVAL="30000" #ETCD_PROXY_DIAL_TIMEOUT="1000" #ETCD_PROXY_WRITE_TIMEOUT="5000" #ETCD_PROXY_READ_TIMEOUT="0" # #[security] #ETCD_CERT_FILE="" #ETCD_KEY_FILE="" #ETCD_CLIENT_CERT_AUTH="false" #ETCD_TRUSTED_CA_FILE="" #ETCD_PEER_CERT_FILE="" #ETCD_PEER_KEY_FILE="" #ETCD_PEER_CLIENT_CERT_AUTH="false" #ETCD_PEER_TRUSTED_CA_FILE="" # #[logging] #ETCD_DEBUG="false" # examples for -log-package-levels etcdserver=WARNING,security=DEBUG #ETCD_LOG_PACKAGE_LEVELS=""
2)創建ETCD數據目錄并 啟動etcd
mkdir -p /data/etcd/;chmod 757 -R /data/etcd/ systemctl restart etcd.service
3)查看etcd 進程 是否正常
[root@localhost ~]# ps -ef | grep etcd kube 3161 1 0 16:54 ? 00:02:04 /usr/bin/kube-apiserver --logtostderr=true --v=0 --etcd-servers=http://10.10.10.12:2379 --insecure-bind-address=0.0.0.0 --port=8080 --kubelet-port=10250 --allow-privileged=false --service-cluster-ip-range=10.254.0.0/16 --admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota etcd 4267 1 1 19:13 ? 00:01:08 /usr/bin/etcd --name=etcd1 --data-dir=/data/etcd --listen-client-urls=http://10.10.10.12:2379,http://127.0.0.1:2379 root 4299 1 0 19:13 ? 00:00:00 /usr/bin/flanneld -etcd-endpoints=http://10.10.10.12:2379 -etcd-prefix=/atomic.io/network root 4799 4706 0 21:05 pts/0 00:00:00 grep --color=auto etcd
4)配置MASTER節點主配置文件,注意配置文件里面的IP地址,可以將原有配置文件清空 ,復制以下內容即可。目錄如下 [root@localhost ~]# more /etc/kubernetes/config
[root@localhost ~]# more /etc/kubernetes/config ### # kubernetes system config # # The following values are used to configure various aspects of all # kubernetes services, including # # kube-apiserver.service # kube-controller-manager.service # kube-scheduler.service # kubelet.service # kube-proxy.service # logging to stderr means we get it in the systemd journal KUBE_LOGTOSTDERR="--logtostderr=true" # journal message level, 0 is debug KUBE_LOG_LEVEL="--v=0" # Should this cluster be allowed to run privileged Docker containers KUBE_ALLOW_PRIV="--allow-privileged=false" #是否開啟docker容器的超級權限,默認不開啟,也可以開啟為true # How the controller-manager, scheduler, and proxy find the apiserver KUBE_MASTER="--master=http://10.10.10.12:8080"
5)配置api組件配置文件,可以直接復制以下內容,注意IP地址
[root@localhost ~]# [root@localhost ~]# more /etc/kubernetes/apiserver # kubernetes system config # The following values are used to configure the kube-apiserver # The address on the local server to listen to. KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0" # The port on the local server to listen on. KUBE_API_PORT="--port=8080" # Port minions listen on KUBELET_PORT="--kubelet-port=10250" # Comma separated list of nodes in the etcd cluster KUBE_ETCD_SERVERS="--etcd-servers=http://10.10.10.12:2379" # Address range to use for i KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" #這個配置無需修改 # default admission control policies #KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota" # Add your own! KUBE_API_ARGS=""
6)啟動相關組件
for I in etcd kube-apiserver kube-controller-manager kube-scheduler; do systemctl restart $I systemctl enable $I systemctl status $I done
7)查詢相關組件端口
netstat -anltp
8)到此為止 MASTER 節點配置完成
----------------------------------------------------------------------------------------------------------------------------
二、minion 節點部署步驟
1)關閉防火墻開啟NTP
systemctl stop firewalld #停止防火墻 systemctl disable firewalld #禁用防火墻 yum -y install ntp #安裝NTP服務器 ntpdate pool.ntp.org #更新時間同步 systemctl start ntpd #啟動NTP setenforce 0 systemctl enable ntpd#enable #NTP關機開機后不需要在開啟
2)部署相關組件如下:
yum install epel-release -y yum install kubernetes-node docker flannel *rhsm* -y
3)修改配置文件,注意IP指向主節點,文件復制即可
[root@localhost ~]# more /etc/kubernetes/config # kubernetes system config # The following values are used to configure various aspects of all # kubernetes services, including # kube-apiserver.service # kube-controller-manager.service # kube-scheduler.service # kubelet.service # kube-proxy.service # logging to stderr means we get it in the systemd journal KUBE_LOGTOSTDERR="--logtostderr=true" # journal message level, 0 is debug KUBE_LOG_LEVEL="--v=0" # Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV="--allow-privileged=false" # How the controller-manager, scheduler, and proxy find the apiserver KUBE_MASTER="--master=http://10.10.10.12:8080" #指向主節點
4)修改kubelet 配置文件 如下所示,注意IP地址
[root@localhost kubernetes]# more /etc/kubernetes/kubelet ### # kubernetes kubelet (minion) config # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) KUBELET_ADDRESS="--address=0.0.0.0" # The port for the info server to serve on KUBELET_PORT="--port=10250" # You may leave this blank to use the actual hostname KUBELET_HOSTNAME="--hostname-override=10.10.10.14" # location of the api-server KUBELET_API_SERVER="--api-servers=http://10.10.10.12:8080" # pod infrastructure container #KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=192.168.0.123:5000/centos68" KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest" # Add your own! KUBELET_ARGS=""
5)啟動
for I in kube-proxy kubelet docker do systemctl restart $I systemctl enable $I systemctl status $I done
6)查看相關端口
netstat -anltp
---------------------------------------------------------------------------------------------------------------------------
三、部署FLANNELD 組件
1)修改MASTER 節點 配置文件如下 路徑/etc/sysconfig/flanneld
[root@localhost kubernetes]# more /etc/sysconfig/flanneld # Flanneld configuration options # etcd url location. Point this to the server where etcd runs FLANNEL_ETCD_ENDPOINTS="http://10.10.10.12:2379"#指定ETCD的訪問入口 # etcd config key. This is the configuration key that flannel queries # For address range assignment FLANNEL_ETCD_PREFIX="/atomic.io/network" #指定ETCD的KEY # Any additional options that you want to pass #FLANNEL_OPTIONS=""
2)修改minon 節點配置文件 路徑 /etc/sysconfig/flanneld,注意IP指向MASTER
[root@localhost kubernetes]# more /etc/sysconfig/flanneld # Flanneld configuration options # etcd url location. Point this to the server where etcd runs FLANNEL_ETCD_ENDPOINTS="http://10.10.10.12:2379" #指定ETCD的訪問入口 # etcd config key. This is the configuration key that flannel queries # For address range assignment FLANNEL_ETCD_PREFIX="/atomic.io/network" #指定ETCD的KEY # Any additional options that you want to pass #FLANNEL_OPTIONS=""
3)在MASTER節點指向下面命令,在Master服務器,測試Etcd集群是否正常,同時在Etcd配置中心創建flannel網絡配置: 最后一條命令是創建ETCD KEY-VALUES 不創建的話 FLANNELD組件無法啟動
etcdctl member list etcdctl cluster-health etcdctl get /atomic.io/network/config etcdctl ls /atomic.io/network/subnets etcdctl rm /atomic.io/network/ --recursive etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16"}'
4)啟動組件
service flanneld restart
5)查看配置
MASTER
[root@localhost kubernetes]# ifconfig ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 10.10.10.12 netmask 255.255.255.0 broadcast 10.10.10.255 inet6 fe80::5236:328f:e632:35f3 prefixlen 64 scopeid 0x20<link> ether 00:0c:29:c6:56:7d txqueuelen 1000 (Ethernet) RX packets 82765 bytes 72168174 (68.8 MiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 39638 bytes 17434838 (16.6 MiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 flannel0: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST> mtu 1472 inet 172.17.63.0 netmask 255.255.0.0 destination 172.17.63.0 inet6 fe80::cf87:5bc:e7e9:a6bd prefixlen 64 scopeid 0x20<link> unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC) RX packets 786 bytes 928034 (906.2 KiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 561 bytes 50704 (49.5 KiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
MINLION
[root@localhost kubernetes]# ifconfig docker0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1472 inet 172.17.30.1 netmask 255.255.255.0 broadcast 0.0.0.0 inet6 fe80::42:dbff:fe9a:b8bd prefixlen 64 scopeid 0x20<link> ether 02:42:db:9a:b8:bd txqueuelen 0 (Ethernet) RX packets 4164 bytes 1655177 (1.5 MiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 4392 bytes 467421 (456.4 KiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 10.10.10.14 netmask 255.255.255.0 broadcast 10.10.10.255 inet6 fe80::a4d2:c607:2321:4812 prefixlen 64 scopeid 0x20<link> inet6 fe80::5236:328f:e632:35f3 prefixlen 64 scopeid 0x20<link> ether 00:0c:29:cb:19:44 txqueuelen 1000 (Ethernet) RX packets 654860 bytes 589926691 (562.5 MiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 283773 bytes 23145156 (22.0 MiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 flannel0: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST> mtu 1472 inet 172.17.30.0 netmask 255.255.0.0 destination 172.17.30.0 inet6 fe80::5c5a:2bef:c481:fb02 prefixlen 64 scopeid 0x20<link> unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC) RX packets 558 bytes 50560 (49.3 KiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 789 bytes 928178 (906.4 KiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
6)測試戶ping一下flannel0: 接口的IP是不是能通 能通則正常,如果不通MASTER和minion節點分別指向下面命令
MASTER
service etcd restart;service flanneld restart;service docker restart;iptables -P FORWARD ACCEPT
minion
service etcd restart;service flanneld restart;service docker restart;iptables -P FORWARD ACCEPT
--------------------------------------------------------------------------------------------------------------------------
四、安裝web 組件
以下在minion節點操作
如下為配置kubernetes dashboard完整過程,提前導入兩個列表鏡像
pod-infrastructure
kubernetes-dashboard-amd64
Docker鏡像導入指令如下:
n docker load <pod-infrastructure.tgz,將導入的pod鏡像名稱修改,命令如下:
docker tag $(docker images|grep none|awk '{print $3}') registry.access.redhat.com/rhel7/pod-infrastructure
docker load <kubernetes-dashboard-amd64.tgz,將導入的pod鏡像名稱修改,命令如下:
docker tag $(docker images|grep none|awk '{print $3}') bestwu/kubernetes-dashboard-amd64:v1.6.3
---------------------------------------------------------
以下操作全部在MASTER完成
在Master端,創建dashboard-controller.yaml,代碼如下:
[root@localhost k8s]# more dashboard-controller.yaml apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kubernetes-dashboard namespace: kube-system labels: k8s-App: kubernetes-dashboard kubernetes.io/cluster-service: "true" spec: selector: matchLabels: k8s-app: kubernetes-dashboard template: metadata: labels: k8s-app: kubernetes-dashboard annotations: scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: containers: - name: kubernetes-dashboard image: bestwu/kubernetes-dashboard-amd64:v1.6.3 resources: # keep request = limit to keep this container in guaranteed class limits: cpu: 100m memory: 50Mi requests: cpu: 100m memory: 50Mi ports: - containerPort: 9090 args: - --apiserver-host=http://10.10.10.12:8080 livenessProbe: httpGet: path: / port: 9090 initialDelaySeconds: 30 timeoutSeconds: 30
創建dashboard-service.yaml,代碼如下:
[root@localhost k8s]# more dashboard-service.yaml apiVersion: v1 kind: Service metadata: name: kubernetes-dashboard namespace: kube-system labels: k8s-app: kubernetes-dashboard kubernetes.io/cluster-service: "true" spec: selector: k8s-app: kubernetes-dashboard ports: - port: 80 targetPort: 9090
創建dashboard dashborad pods模塊:
kubectl create -f dashboard-controller.yaml kubectl create -f dashboard-service.yaml
創建完成后,查看Pods和Service的詳細信息:,最后一條可以不執行
kubectl get namespace kubectl get deployment --all-namespaces kubectl get svc --all-namespaces kubectl get pods --all-namespaces kubectl get pod -o wide --all-namespaces kubectl describe service/kubernetes-dashboard --namespace="kube-system" kubectl describe pod/kubernetes-dashboard-530803917-816df --namespace="kube-system" kubectl delete pod/kubernetes-dashboard-530803917-816df --namespace="kube-system" --grace-period=0 --force
7)測試
http://10.10.10.12:8080/ui