`
knight_black_bob
  • 浏览: 853828 次
  • 性别: Icon_minigender_1
  • 来自: 北京
社区版块
存档分类
最新评论

k8s kubernetes 安装

阅读更多

k8s kubernetes 安装

 

-1.准备工作 hadoop 服务器

 

10.156.50.35 yanfabu2-35.base.app.dev.yf zk1  hadoop1 master1 master sparkmaster  k8s-master
10.156.50.36 yanfabu2-36.base.app.dev.yf zk2  hadoop2 master2        sparkwork1   k8s-node1
10.156.50.37 yanfabu2-37.base.app.dev.yf zk3  hadoop3 slaver1        sparkwork2   k8s-node2

 

 

0.关闭selinux和firewalld

 

systemctl status firewalldc
systemctl stop firewalld

service iptables status  
service iptables stop 

iptables -P FORWARD ACCEPT
 
vim /etc/sysctl.conf  
        vm.swappiness = 0 

setenforce 0  
 
 vi /etc/selinux/config  
        SELINUX=disabled 

sudo scp /etc/sysctl.conf root@10.156.50.36:/etc/
sudo scp /etc/sysctl.conf root@10.156.50.37:/etc/

 

 

5.安装 k8s

 

 

 yum install -y kubernetes etcd

[root@yanfabu2-35 zkkafka]# yum install -y kubernetes etcd
 

 

 

6.配置 master

 

6.1  配置启动 etcd

 

 

vim /etc/etcd/etcd.conf

ETCD_DATA_DIR="/var/lib/etcd/master.etcd"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_NAME="master"


systemctl enable etcd
systemctl start etcd 

lsof -i:2379

 

 

 

 

[root@yanfabu2-35 etcd]# lsof -i:2379
COMMAND   PID USER   FD   TYPE   DEVICE SIZE/OFF NODE NAME
etcd    29520 etcd    6u  IPv6 12019691      0t0  TCP *:2379 (LISTEN)
etcd    29520 etcd   10u  IPv4 12019694      0t0  TCP localhost:57236->localhost:2379 (ESTABLISHED)
etcd    29520 etcd   12u  IPv6 12019696      0t0  TCP localhost:2379->localhost:57236 (ESTABLISHED)


 

 

 

 

6.2  配置启动 apiserver

 

 

vim /etc/kubernetes/apiserver

 

 

 

KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBELET_PORT="--kubelet-port=10250"
KUBE_ETCD_SERVERS="--etcd-servers=http://10.156.50.35:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
KUBE_API_ARGS=""


systemctl enable kube-apiserver.service 
systemctl start kube-apiserver.service
lsof -i:8080

 

 

 

 

[root@yanfabu2-35 etcd]# vim /etc/kubernetes/apiserver
[root@yanfabu2-35 etcd]# systemctl enable kube-apiserver.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
[root@yanfabu2-35 etcd]# systemctl start kube-apiserver.service
[root@yanfabu2-35 etcd]#  lsof -i:8080
COMMAND     PID USER   FD   TYPE   DEVICE SIZE/OFF NODE NAME
kube-apis 29583 kube   43u  IPv6 12031006      0t0  TCP *:webcache (LISTEN)
kube-apis 29583 kube   44u  IPv4 12031008      0t0  TCP localhost:37048->localhost:webcache (ESTABLISHED)
kube-apis 29583 kube   46u  IPv6 12028585      0t0  TCP localhost:webcache->localhost:37048 (ESTABLISHED)
kube-apis 29583 kube   47u  IPv4 12028589      0t0  TCP localhost:37052->localhost:webcache (ESTABLISHED)
kube-apis 29583 kube   48u  IPv6 12028590      0t0  TCP localhost:webcache->localhost:37052 (ESTABLISHED)
kube-apis 29583 kube   55u  IPv4 12028603      0t0  TCP localhost:37066->localhost:webcache (ESTABLISHED)
kube-apis 29583 kube   56u  IPv6 12028604      0t0  TCP localhost:webcache->localhost:37066 (ESTABLISHED)
kube-apis 29583 kube   57u  IPv4 12031017      0t0  TCP localhost:37068->localhost:webcache (ESTABLISHED)
kube-apis 29583 kube   58u  IPv6 12031018      0t0  TCP localhost:webcache->localhost:37068 (ESTABLISHED)
kube-apis 29583 kube   59u  IPv4 12031020      0t0  TCP localhost:37070->localhost:webcache (ESTABLISHED)
kube-apis 29583 kube   60u  IPv6 12028606      0t0  TCP localhost:webcache->localhost:37070 (ESTABLISHED)
kube-apis 29583 kube   61u  IPv4 12031021      0t0  TCP localhost:37072->localhost:webcache (ESTABLISHED)
kube-apis 29583 kube   62u  IPv6 12031022      0t0  TCP localhost:webcache->localhost:37072 (ESTABLISHED)
kube-apis 29583 kube   63u  IPv4 12028608      0t0  TCP localhost:37074->localhost:webcache (ESTABLISHED)
kube-apis 29583 kube   64u  IPv6 12028609      0t0  TCP localhost:webcache->localhost:37074 (ESTABLISHED)

 

 

 

http://10.156.50.35:8080/

{
  "paths": [
    "/api",
    "/api/v1",
    "/apis",
    "/apis/apps",
    "/apis/apps/v1beta1",
    "/apis/authentication.k8s.io",
    "/apis/authentication.k8s.io/v1beta1",
    "/apis/authorization.k8s.io",
    "/apis/authorization.k8s.io/v1beta1",
    "/apis/autoscaling",
    "/apis/autoscaling/v1",
    "/apis/batch",
    "/apis/batch/v1",
    "/apis/batch/v2alpha1",
    "/apis/certificates.k8s.io",
    "/apis/certificates.k8s.io/v1alpha1",
    "/apis/extensions",
    "/apis/extensions/v1beta1",
    "/apis/policy",
    "/apis/policy/v1beta1",
    "/apis/rbac.authorization.k8s.io",
    "/apis/rbac.authorization.k8s.io/v1alpha1",
    "/apis/storage.k8s.io",
    "/apis/storage.k8s.io/v1beta1",
    "/healthz",
    "/healthz/ping",
    "/healthz/poststarthook/bootstrap-controller",
    "/healthz/poststarthook/extensions/third-party-resources",
    "/healthz/poststarthook/rbac/bootstrap-roles",
    "/logs",
    "/metrics",
    "/swaggerapi/",
    "/ui/",
    "/version"
  ]
}

 

 

 

6.3 配置启动kube-controller-manager安装配置

 

 

vim /etc/kubernetes/config


KUBE_MASTER="--master=http://10.156.50.35:8080"

 

 

 

 

 

systemctl enable kube-controller-manager.service 
systemctl start kube-controller-manager.service

 

 

 

[root@yanfabu2-35 etcd]# systemctl enable kube-controller-manager.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
[root@yanfabu2-35 etcd]# systemctl start kube-controller-manager.service

 

 

 

6.4 配置启动kube-scheduler安装配置

 

systemctl enable kube-scheduler.service 
systemctl start kube-scheduler.service 

 

 

 

[root@yanfabu2-35 etcd]# systemctl enable kube-scheduler.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@yanfabu2-35 etcd]# systemctl start kube-scheduler.service

 

 

6.5 验证 节点 

 

 

kubectl get nodes

 

 

 

 

[root@yanfabu2-35 etcd]# kubectl get nodes
No resources found.

 

 

 

systemctl restart etcd kube-apiserver kube-controller-manager kube-scheduler
systemctl restart etcd kube-apiserver kube-controller-manager kube-scheduler restart kubelet kube-proxy
 
 

 

 

7.配置 node

7.1 安装docker

 

 

yum install docker -y
systemctl enable docker
systemctl start docker

 

 

 

 

[root@yanfabu2-36 zkkafka]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@yanfabu2-36 zkkafka]# systemctl start docker


 

 

7.2 安装kubernetes 的 kubelet kube-proxy 启动并配置

7.2.1 k8s-node1

 

 

yum install kubernetes -y
systemctl enable kubelet.service 
systemctl enable kube-proxy.service 

 

 

 

vim /etc/kubernetes/kubelet

KUBELET_ADDRESS="--address=10.156.50.36"
KUBELET_PORT="--port=10250"
KUBELET_HOSTNAME="--hostname-override=k8s-node1"
KUBELET_API_SERVER="--api-servers=http://10.156.50.35:8080"

 

 

 

 

systemctl start kubelet
systemctl start kube-proxy

 

 

 

7.2.1 k8s-node1

 

 

yum install kubernetes -y
systemctl enable kubelet.service 
systemctl enable kube-proxy.service 

 

 

 

vim /etc/kubernetes/kubelet

KUBELET_ADDRESS="--address=10.156.50.37"
KUBELET_PORT="--port=10250"
KUBELET_HOSTNAME="--hostname-override=k8s-node2"
KUBELET_API_SERVER="--api-servers=http://10.156.50.35:8080"

 

 

 

systemctl start kubelet
systemctl start kube-proxy
systemctl restart kubelet kube-proxy

systemctl restart etcd kube-apiserver kube-controller-manager kube-scheduler restart kubelet kube-proxy
 

 

 

 

[root@yanfabu2-35 etcd]# kubectl get nodes
NAME        STATUS     AGE
k8s-node1   NotReady   9s
[root@yanfabu2-35 etcd]# kubectl get nodes
NAME        STATUS    AGE
k8s-node1   Ready     32s
k8s-node2   Ready     10s



[root@yanfabu2-35 etcd]# etcdctl ls /registry
/registry/serviceaccounts
/registry/minions
/registry/clusterroles
/registry/ranges
/registry/namespaces
/registry/clusterrolebindings
/registry/services
/registry/events

 

 

 

 

8.安装docker

8.1 安装 docker registry

 

 

docker pull registry 
mkdir -p /opt/data/registry
sudo docker run -d -p 5000:5000 -v /opt/data/registry:/var/lib/registry   --name  registry registry

sudo vim /etc/default/docker 
DOCKER_OPTS="--registry-mirror=http://hub-mirror.c.163.com --insecure-registry 10.156.50.35:5000" 

systemctl restart docker
docker restart registry

docker tag redis 10.156.50.35:5000/redis
docker push 10.156.50.35:5000/redis
docker images

 

 

 

 

 

8.2 安装 docker master

 

 

yum install -y docker-distribution.x86_64
systemctl enable docker-distribution.service	
systemctl start docker-distribution.service
yum install -y docker

 

 

 

 

[root@bogon etc]# docker push 192.168.206.243:5000/busybox 
The push refers to a repository [192.168.206.243:5000/busybox]
Get https://192.168.206.243:5000/v1/_ping: http: server gave HTTP response to HTTPS client

 

 

 

vim /etc/docker/daemon.json


{
   "insecure-registries":["10.156.50.35:5000"]
}


systemctl enable docker
systemctl start docker

 

 

 

8.3 安装 docker node

 

 

yum install -y docker-distribution.x86_64
systemctl enable docker-distribution.service	
systemctl start docker-distribution.service
yum install -y docker

vim /etc/docker/daemon.json


{
   "insecure-registries":["10.156.50.35:5000"]
}


systemctl enable docker
systemctl start docker

 

 

 

9.安装overlay网络

9.1.安装flannel

flannel:基于隧道,效率慢

calico:基于可路由的IP地址,三层,效率高  

 

 

三台机器  yum install -y flannel

[root@yanfabu2-37 zkkafka]#  yum install -y flannel

 

 

 

9.2 修改 网络配置 /atomic.io/network/config

 

 

etcdctl set /atomic.io/network/config '{"Network":"172.17.0.0/16"}'
etcdctl get /atomic.io/network/config
curl http://10.156.50.35:2379/v2/keys/atomic.io/network/config -XPUT -d value='{"Network":"172.17.0.0/16"}'
etcdctl get /atomic.io/network/config

/atomic.io/network/config保存key的地方,flanneld会读取这个配置,保证自己能获取到subnet
这个key可以任意指定,当host主机分配到subnet后,key会修改docker的启动参数

[root@yanfabu2-35 zkkafka]# etcdctl set /atomic.io/network/config '{"Network":"10.10.0.0/16"}'
{"Network":"10.10.0.0/16"}
[root@yanfabu2-35 zkkafka]# etcdctl get /atomic.io/network/config
{"Network":"10.10.0.0/16"}
[root@yanfabu2-35 zkkafka]# curl http://10.156.50.35:2379/v2/keys/atomic.io/network/config -XPUT -d value='{"Network":"172.17.0.0/16"}'
{"action":"set","node":{"key":"/atomic.io/network/config","value":"{\"Network\":\"172.17.0.0/16\"}","modifiedIndex":337327,"createdIndex":337327},"prevNode":{"key":"/atomic.io/network/config","value":"{\"Network\":\"10.10.0.0/16\"}","modifiedIndex":337309,"createdIndex":337309}}
[root@yanfabu2-35 zkkafka]# etcdctl get /atomic.io/network/config
{"Network":"172.17.0.0/16"}

 

 

 

9.3.修改flannel 并启动 (三台服务器修改配置文件并启动)

 

 

vim /etc/sysconfig/flanneld

FLANNEL_ETCD_ENDPOINTS="http://10.156.50.35:2379" 
FLANNEL_ETCD_PREFIX="/atomic.io/network" 

systemctl stop docker
systemctl enable flanneld.service 
systemctl start flanneld.service

systemctl start docker

ip add show flannel0
ip add show docker0

 

 

 

 

[root@yanfabu2-35 zkkafka]# ip add show flannel0
5: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
    link/none 
    inet 172.17.102.0/16 scope global flannel0
       valid_lft forever preferred_lft forever
    inet6 fe80::b727:219b:ba22:621b/64 scope link flags 800 
       valid_lft forever preferred_lft forever
[root@yanfabu2-35 zkkafka]# ip add show docker0
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default 
    link/ether 02:42:63:71:53:cd brd ff:ff:ff:ff:ff:ff
    inet 172.17.102.1/24 scope global docker0
       valid_lft forever preferred_lft forever




[root@yanfabu2-36 zkkafka]# ip add show flannel0
4: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
    link/none 
    inet 172.17.87.0/16 scope global flannel0
       valid_lft forever preferred_lft forever
    inet6 fe80::7580:44e9:9d54:3248/64 scope link flags 800 
       valid_lft forever preferred_lft forever
[root@yanfabu2-36 zkkafka]# ip add show docker0
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default 
    link/ether 02:42:3d:f7:11:07 brd ff:ff:ff:ff:ff:ff
    inet 172.17.87.1/24 scope global docker0
       valid_lft forever preferred_lft forever



[root@yanfabu2-37 zkkafka]# ip add show flannel0
4: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
    link/none 
    inet 172.17.89.0/16 scope global flannel0
       valid_lft forever preferred_lft forever
    inet6 fe80::acce:ffa6:a0cd:8b6d/64 scope link flags 800 
       valid_lft forever preferred_lft forever
[root@yanfabu2-37 zkkafka]# ip add show docker0
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default 
    link/ether 02:42:95:1a:4d:54 brd ff:ff:ff:ff:ff:ff
    inet 172.17.89.1/24 scope global docker0
       valid_lft forever preferred_lft forever
[root@yanfabu2-37 zkkafka]# 

 

 

 

[zkkafka@yanfabu2-35 ~]$ route
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
default         gateway         0.0.0.0         UG    100    0        0 ens192
10.156.50.0     0.0.0.0         255.255.255.0   U     100    0        0 ens192
172.17.0.0      0.0.0.0         255.255.0.0     U     0      0        0 flannel0
172.17.102.0    0.0.0.0         255.255.255.0   U     0      0        0 docker0



[root@yanfabu2-36 zkkafka]# route
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
default         gateway         0.0.0.0         UG    100    0        0 ens192
10.156.50.0     0.0.0.0         255.255.255.0   U     100    0        0 ens192
172.17.0.0      0.0.0.0         255.255.0.0     U     0      0        0 flannel0
172.17.87.0     0.0.0.0         255.255.255.0   U     0      0        0 docker0


[root@yanfabu2-37 zkkafka]# route
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
default         gateway         0.0.0.0         UG    100    0        0 ens192
10.156.50.0     0.0.0.0         255.255.255.0   U     100    0        0 ens192
172.17.0.0      0.0.0.0         255.255.0.0     U     0      0        0 flannel0
172.17.89.0     0.0.0.0         255.255.255.0   U     0      0        0 docker0

 

 

 

 

 

10. 安装pods

10.0 修改  sysctl.d/k8s.conf

 

 

 

vim /etc/sysctl.d/k8s.conf

net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1

scp /etc/sysctl.d/k8s.conf  root@10.156.50.36:/etc/sysctl.d/
scp /etc/sysctl.d/k8s.conf  root@10.156.50.37:/etc/sysctl.d/


 

 

10.1 编写 redis-pod.yaml

 

 

mkdir yaml
cd yaml
vim redis-pod.yaml

aipVersion: v1                    #声明kubernetes的API的版本,目前是v1
kind: Pod                         #声明API的对象类型,这里类型是pod
metadata:                         #设置pod的元数据
  name: redis                   #指定pod的名称,pod的名称必须在namespace内唯一
  labels:
    app: redis
spec:                            #设置pod具体规格
  containers:                    #设置pod中容器的规格,数组形式,每一项定义一个容器
    - name: redis             #指定容器的名称
      image: 10.156.50.35:5000/redis      #设置容器镜像

 

 

 

kubectl create -f redis-pod.yaml

 

 

 

 

[root@yanfabu2-35 yaml]# kubectl create -f redis-pod.yaml
Error from server (ServerTimeout): error when creating "redis-pod.yaml": No API token found for service account "default", retry after the token is automatically created and added to the service account

openssl genrsa -out /etc/kubernetes/serviceaccount.key 2048

vim /etc/kubenetes/apiserver
KUBE_API_ARGS="--service_account_key_file=/etc/kubernetes/serviceaccount.key"

vim /etc/kubernetes/controller-manager
KUBE_CONTROLLER_MANAGER_ARGS="--service_account_private_key_file=/etc/kubernetes/serviceaccount.key"

systemctl restart etcd kube-apiserver kube-controller-manager kube-scheduler

 

 

 

10.2 启动 redis-pod

 

kubectl create -f redis-pod.yaml
kubectl describe pod redis
kubectl get pods -o wide
kubectl get pods redis -o yaml
kubectl exec -ti redis /bin/bash 
kubectl delete pod redis

 

 

 

 

[root@yanfabu2-35 yaml]# kubectl create -f redis-pod.yaml
pod "redis" created
[root@yanfabu2-35 yaml]# kubectl get pods
NAME      READY     STATUS              RESTARTS   AGE
redis     0/1       ContainerCreating   0          7s
 
[root@yanfabu2-35 ~]# kubectl describe pod redis
Name:		redis
Namespace:	default
Node:		k8s-node2/10.156.50.37
Start Time:	Wed, 03 Jul 2019 16:03:27 +0800
Labels:		app=redis
Status:		Pending
IP:		
Controllers:	<none>
Containers:
  redis:
    Container ID:	
    Image:		10.156.50.35:5000/redis
    Image ID:		
    Port:		
    State:		Waiting
      Reason:		ContainerCreating
    Ready:		False
    Restart Count:	0
    Volume Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-8jc9w (ro)
    Environment Variables:	<none>
Conditions:
  Type		Status
  Initialized 	True 
  Ready 	False 
  PodScheduled 	True 
Volumes:
  default-token-8jc9w:
    Type:	Secret (a volume populated by a Secret)
    SecretName:	default-token-8jc9w
QoS Class:	BestEffort
Tolerations:	<none>
Events:
  FirstSeen	LastSeen	Count	From			SubObjectPath	Type		Reason		Message
  ---------	--------	-----	----			-------------	--------	------		-------
  1m		1m		1	{default-scheduler }			Normal		Scheduled	Successfully assigned redis to k8s-node1
  1m		19s		4	{kubelet k8s-node1}			Warning		FailedSync	Error syncing pod, skipping: failed to "StartContainer" for "POD" with ErrImagePull: "image pull failed for registry.access.redhat.com/rhel7/pod-infrastructure:latest, this may be because there are no credentials on this request.  details: (open /etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt: no such file or directory)"

  1m	7s	4	{kubelet k8s-node1}		Warning	FailedSync	Error syncing pod, skipping: failed to "StartContainer" for "POD" with ImagePullBackOff: "Back-off pulling image \"registry.access.redhat.com/rhel7/pod-infrastructure:latest\""

 

 

 

 

[root@yanfabu2-35 ~]#  kubectl get pods -o wide
NAME      READY     STATUS              RESTARTS   AGE       IP        NODE
redis     0/1       ContainerCreating   0          27m       <none>    k8s-node2

 

 

10.3 解决  redis-pod 未运行成功

 

 

[root@yanfabu2-35 yaml]# kubectl get pods
NAME      READY     STATUS              RESTARTS   AGE
redis     0/1       ContainerCreating   0          7s

kubectl describe pod redis

Events:
  FirstSeen	LastSeen	Count	From			SubObjectPath	Type		Reason		Message
  ---------	--------	-----	----			-------------	--------	------		-------
  1m		1m		1	{default-scheduler }			Normal		Scheduled	Successfully assigned redis to k8s-node1
  1m		19s		4	{kubelet k8s-node1}			Warning		FailedSync	Error syncing pod, skipping: failed to "StartContainer" for "POD" with ErrImagePull: "image pull failed for registry.access.redhat.com/rhel7/pod-infrastructure:latest, this may be because there are no credentials on this request.  details: (open /etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt: no such file or directory)"

  1m	7s	4	{kubelet k8s-node1}		Warning	FailedSync	Error syncing pod, skipping: failed to "StartContainer" for "POD" with ImagePullBackOff: "Back-off pulling image \"registry.access.redhat.com/rhel7/pod-infrastructure:latest\""

 

 

 

 

[root@yanfabu2-35 yaml]# cd /etc/rhsm/ca/redhat-uep.pem
-bash: cd: /etc/rhsm/ca/redhat-uep.pem: 没有那个文件或目录
[root@yanfabu2-35 yaml]# yum provides */redhat-uep.pem
已加载插件:fastestmirror
Loading mirror speeds from cached hostfile
elastic-6.x/filelists_db                                                                                                                                                | 8.9 MB  00:00:00     
python-rhsm-certificates-1.19.10-1.el7_4.x86_64 : Certificates required to communicate with a Red Hat Unified Entitlement Platform
源    :base
匹配来源:
文件名    :/etc/rhsm/ca/redhat-uep.pem

 

 

 

 

安装python-rhsm-certificates组件
yum install python-rhsm-certificates -y
yum install yum-utils -y
yumdownloader python-rhsm-certificates
rpm -e subscription-manager-rhsm-certificates
rpm -ivh python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm

 

 

 

10.4 解决 registry.access.redhat.com/rhel7/pod-infrastructure:latest 未运行成功

 

Tolerations:	<none>
Events:
  FirstSeen	LastSeen	Count	From			SubObjectPath	Type		Reason		Message
  ---------	--------	-----	----			-------------	--------	------		-------
  2m		2m		1	{default-scheduler }			Normal		Scheduled	Successfully assigned redis to k8s-node1
  2m		19s		9	{kubelet k8s-node1}			Warning		FailedSync	Error syncing pod, skipping: failed to "StartContainer" for "POD" with ImagePullBackOff: "Back-off pulling image \"registry.access.redhat.com/rhel7/pod-infrastructure:latest\""

  2m	5s	5	{kubelet k8s-node1}		Warning	FailedSync	Error syncing pod, skipping: failed to "StartContainer" for "POD" with ErrImagePull: "image pull failed for registry.access.redhat.com/rhel7/pod-infrastructure:latest, this may be because there are no credentials on this request.  details: (open /etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt: no such file or directory)"

 

 

 

yum install -y *rhsm*
wget http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm
rpm2cpio python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm | cpio -iv --to-stdout ./etc/rhsm/ca/redhat-uep.pem | tee /etc/rhsm/ca/redhat-uep.pem
 


 ls -l /etc/rhsm/ca/ 

 

 

在有网络的docker下载 registry.access.redhat.com/rhel7/pod-infrastructure 然后上传到 node 上

 

 

 

[root@yanfabu2-35 yaml]#  kubectl get pods
NAME      READY     STATUS    RESTARTS   AGE
redis     1/1       Running   0          8s

 

 

 

[root@yanfabu2-35 ~]#  kubectl get pods -o wide
NAME      READY     STATUS    RESTARTS   AGE       IP            NODE
redis     1/1       Running   0          5m        172.17.89.2   k8s-node2




[root@yanfabu2-35 ~]# kubectl get pods redis -o yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: 2019-07-10T09:16:12Z
  labels:
    app: redis
  name: redis
  namespace: default
  resourceVersion: "1203314"
  selfLink: /api/v1/namespaces/default/pods/redis
  uid: 5c429a31-a2f3-11e9-9ed9-005056bb8b05
spec:
  containers:
  - image: 10.156.50.35:5000/redis
    imagePullPolicy: Always
    name: redis
    resources: {}
    terminationMessagePath: /dev/termination-log
    volumeMounts:
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: default-token-8jc9w
      readOnly: true
  dnsPolicy: ClusterFirst
  nodeName: k8s-node2
  restartPolicy: Always
  securityContext: {}
  serviceAccount: default
  serviceAccountName: default
  terminationGracePeriodSeconds: 30
  volumes:
  - name: default-token-8jc9w
    secret:
      defaultMode: 420
      secretName: default-token-8jc9w
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: 2019-07-10T09:16:17Z
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: 2019-07-10T09:16:19Z
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: 2019-07-10T09:16:12Z
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: docker://15a83967ab49a00295ece474c89da7406e62cee14ded8f61b6509bfcc3cb0252
    image: 10.156.50.35:5000/redis
    imageID: docker-pullable://10.156.50.35:5000/redis@sha256:efa9bdc679a6f4f8aee5a1ddc909e98d1257d47ba9326787669b82668b3dc100
    lastState: {}
    name: redis
    ready: true
    restartCount: 0
    state:
      running:
        startedAt: 2019-07-10T09:16:18Z
  hostIP: 10.156.50.37
  phase: Running
  podIP: 172.17.89.2
  startTime: 2019-07-10T09:16:17Z
[root@yanfabu2-35 ~]# 

 

 

 

 

 

[root@yanfabu2-35 ~]# kubectl logs redis
1:C 10 Jul 09:16:18.694 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
1:C 10 Jul 09:16:18.695 # Redis version=4.0.2, bits=64, commit=00000000, modified=0, pid=1, just started
1:C 10 Jul 09:16:18.695 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf
1:M 10 Jul 09:16:18.698 * Running mode=standalone, port=6379.
1:M 10 Jul 09:16:18.698 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.
1:M 10 Jul 09:16:18.699 # Server initialized
1:M 10 Jul 09:16:18.699 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.
1:M 10 Jul 09:16:18.699 * Ready to accept connections

 

 

 

11.使用 k8s 

 

vim springbootdemo-rc.yaml 

apiVersion: v1
kind: ReplicationController
metadata:
  name: springbootdemo
  labels: 
    name: springbootdemo
spec: 
  replicas: 2
  selector: 
    name: springbootdemo
  template:
    metadata:
      labels: 
        name: springbootdemo
    spec: 
      containers:
      - name: springbootdemo
        image: springbootdemo:lasted
        ports: 
        - containerPort: 7071
          protocol: TCP
        imagePullPolicy: IfNotPresent

 

 

 

vim springbootdemo-svc.yaml 


apiVersion: v1
kind: Service
metadata:
  name: springbootdemo
  labels:
    name: springbootdemo
spec:
  type: NodePort
  ports:
  - port: 30000
    targetPort: 7071
    protocol: TCP
    nodePort: 30000
  selector:
    name: springbootdemo

 

 

 

 

port: 30000#service(对内)的端口

targetPort: 7071 #pod的端口

nodePort: 30000 #service对外的端口

 

 

kubectl create -f  springbootdemo-svc.yaml 
kubectl create -f  springbootdemo-rc.yaml 

 

 

 

[root@yanfabu2-35 yaml]# kubectl get node
NAME        STATUS    AGE
k8s-node1   Ready     24d
k8s-node2   Ready     24d
[root@yanfabu2-35 yaml]# kubectl get nodes
NAME        STATUS    AGE
k8s-node1   Ready     24d
k8s-node2   Ready     24d
[root@yanfabu2-35 yaml]# kubectl get pod
NAME                      READY     STATUS    RESTARTS   AGE
redis                     1/1       Running   0          12d
springbootdemo-rc-f3mmk   1/1       Running   0          7m
springbootdemo-rc-lrjxp   1/1       Running   0          7m
[root@yanfabu2-35 yaml]# kubectl get rc
NAME                DESIRED   CURRENT   READY     AGE
springbootdemo-rc   2         2         2         7m
[root@yanfabu2-35 yaml]# kubectl get svc
NAME                 CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
kubernetes           10.254.0.1       <none>        443/TCP          24d
springbootdemo-svc   10.254.170.110   <nodes>       7071:30000/TCP   17h
[root@yanfabu2-35 yaml]# kubectl delete pod springbootdemo-rc-lrjxp 
pod "springbootdemo-rc-lrjxp" deleted
[root@yanfabu2-35 yaml]# kubectl get pod
NAME                      READY     STATUS        RESTARTS   AGE
redis                     1/1       Running       0          12d
springbootdemo-rc-f0x8c   1/1       Running       0          4s
springbootdemo-rc-f3mmk   1/1       Running       0          7m
springbootdemo-rc-lrjxp   1/1       Terminating   0          7m
[root@yanfabu2-35 yaml]# kubectl get pod
NAME                      READY     STATUS        RESTARTS   AGE
redis                     1/1       Running       0          12d
springbootdemo-rc-f0x8c   1/1       Running       0          15s
springbootdemo-rc-f3mmk   1/1       Running       0          8m
springbootdemo-rc-lrjxp   1/1       Terminating   0          8m
[root@yanfabu2-35 yaml]# kubectl delete pods springbootdemo-rc-lrjxp 
Error from server (NotFound): pods "springbootdemo-rc-lrjxp" not found
[root@yanfabu2-35 yaml]# kubectl delete pod springbootdemo-rc-lrjxp 
Error from server (NotFound): pods "springbootdemo-rc-lrjxp" not found
[root@yanfabu2-35 yaml]# kubectl get pod
NAME                      READY     STATUS    RESTARTS   AGE
redis                     1/1       Running   0          12d
springbootdemo-rc-f0x8c   1/1       Running   0          46s
springbootdemo-rc-f3mmk   1/1       Running   0          8m

 

 

 

[root@yanfabu2-35 yaml]# kubectl describe pod springbootdemo-rc-f3mmk
Name:		springbootdemo-rc-f3mmk
Namespace:	default
Node:		k8s-node1/10.156.50.36
Start Time:	Tue, 23 Jul 2019 11:39:10 +0800
Labels:		name=springbootdemo
Status:		Running
IP:		172.17.87.3
Controllers:	ReplicationController/springbootdemo-rc
Containers:
  springbootdemo:
    Container ID:	docker://8e51329039bf1940c71eb59fc893f250d7bacb0e7d8c7b148d3fd22939eeb75f
    Image:		springbootdemo:lasted
    Image ID:		docker://sha256:3bf1f3faec9bd38b6a8cc59dd23c0746bb4b65d05103a2395a7223527ab17306
    Port:		7071/TCP
    State:		Running
      Started:		Tue, 23 Jul 2019 11:39:10 +0800
    Ready:		True
    Restart Count:	0
    Volume Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-8jc9w (ro)
    Environment Variables:	<none>
Conditions:
  Type		Status
  Initialized 	True 
  Ready 	True 
  PodScheduled 	True 
Volumes:
  default-token-8jc9w:
    Type:	Secret (a volume populated by a Secret)
    SecretName:	default-token-8jc9w
QoS Class:	BestEffort
Tolerations:	<none>
Events:
  FirstSeen	LastSeen	Count	From			SubObjectPath			Type		Reason			Message
  ---------	--------	-----	----			-------------			--------	------			-------
  9m		9m		1	{default-scheduler }					Normal		Scheduled		Successfully assigned springbootdemo-rc-f3mmk to k8s-node1
  9m		9m		2	{kubelet k8s-node1}					Warning		MissingClusterDNS	kubelet does not have ClusterDNS IP configured and cannot create Pod using "ClusterFirst" policy. Falling back to DNSDefault policy.
  9m		9m		1	{kubelet k8s-node1}	spec.containers{springbootdemo}	Normal		Pulled			Container image "springbootdemo:lasted" already present on machine
  9m		9m		1	{kubelet k8s-node1}	spec.containers{springbootdemo}	Normal		Created			Created container with docker id 8e51329039bf; Security:[seccomp=unconfined]
  9m		9m		1	{kubelet k8s-node1}	spec.containers{springbootdemo}	Normal		Started			Started container with docker id 8e51329039bf

 

 

 

11. 安装 skydns 

 

 

jasonwangshuming/busybox                                latest              c527b6e6531d        18 months ago       2.43MB
jasonwangshuming/kube2sky-amd64                         latest              efc75cc4310f        18 months ago       29.2MB
jasonwangshuming/skydns                                 latest              4e96d800261d        18 months ago       40.5MB
jasonwangshuming/exechealthz                            latest              3b614debf4d0        18 months ago       7.12MB
jasonwangshuming/etcd-amd64       



docker save -o  etcd-amd64.tar jasonwangshuming/etcd-amd64 
docker save -o  kube2sky-amd64.tar jasonwangshuming/kube2sky-amd64  
docker save -o  skydns.tar jasonwangshuming/skydns  
docker save -o  exechealthz.tar jasonwangshuming/exechealthz
docker save -o  busybox.tar jasonwangshuming/busybox

docker load -i etcd-amd64.tar 
docker load -i kube2sky-amd64.tar
docker load -i skydns.tar
docker load -i exechealthz.tar
docker load -i busybox.tar
 

docker  tag jasonwangshuming/etcd-amd64  10.156.50.35:5000/etcd-amd64  
docker  tag jasonwangshuming/kube2sky-amd64 10.156.50.35:5000/kube2sky-amd64   
docker  tag jasonwangshuming/skydns 10.156.50.35:5000/skydns  
docker  tag jasonwangshuming/exechealthz  10.156.50.35:5000/exechealthz
docker  tag jasonwangshuming/busybox  10.156.50.35:5000/busybox


docker push  10.156.50.35:5000/etcd-amd64 
docker push  10.156.50.35:5000/kube2sky-amd64 
docker push  10.156.50.35:5000/skydns 
docker push  10.156.50.35:5000/exechealthz
docker push  10.156.50.35:5000/busybox

docker pull  10.156.50.35:5000/etcd-amd64 
docker pull  10.156.50.35:5000/kube2sky-amd64 
docker pull  10.156.50.35:5000/skydns 
docker pull  10.156.50.35:5000/exechealthz
docker pull  10.156.50.35:5000/busybox

 

 

 

 

apiVersion: v1
kind: ReplicationController
metadata:
  name: kube-dns
  labels:
    app: kube-dns
spec:
  replicas: 1
  selector:
    app: kube-dns
  template:
    metadata:
      labels:
        app: kube-dns
    spec:
      containers:
      - name: etcd
        image: 10.156.50.35:5000/etcd-amd64
        resources:
          limits:
            cpu: 100m
            memory: 500Mi
          requests:
            cpu: 100m
            memory: 50Mi
        command:
        - /usr/local/bin/etcd
        - -data-dir
        - /var/etcd/data
        - -listen-client-urls
        - http://0.0.0.0:2379,http://0.0.0.0:4001
        - -advertise-client-urls
        - http://127.0.0.1:2379,http://127.0.0.1:4001
        - -initial-cluster-token
        - skydns-etcd
      - name: kube2sky
        image: 10.156.50.35:5000/kube2sky-amd64
        args:
        - --domain=cluster.local
        - --kube-master-url=http://10.156.50.35:8080
        resources:
          limits:
            cpu: 100m
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 50Mi
      - name: skydns
        image: 10.156.50.35:5000/skydns
        resources:
          limits:
            cpu: 100m
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 50Mi
        args:
        - -machines=http://127.0.0.1:2379
        - -addr=0.0.0.0:53
        - -ns-rotate=false
        - -domain=cluster.local.
	- -nameservers=10.156.89.3:53,10.156.89.4:53
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /readiness
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 30
          timeoutSeconds: 5
      - name: healthz
        image: 10.156.50.35:5000/exechealthz
        resources:
          limits:
            cpu: 10m
            memory: 20Mi
          requests:
            cpu: 10m
            memory: 20Mi
        args:
        - -cmd=nslookup kubernetes.default.svc.cluster.local localhost >/dev/null
        - -port=8080
        ports:
        - containerPort: 8080
          protocol: TCP
      volumes:
      - name: etcd-storage
        emptyDir: {}
      dnsPolicy: Default

 

 

 

 

apiVersion: v1
kind: Service
metadata:                                                                             
  name: kube-dns
  labels:
    app: kube-dns
spec:
  selector:
    app: kube-dns
  clusterIP:  10.254.254.254
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP

 
apiVersion: v1
kind: Pod
metadata:
  labels:
    name: busybox
  name: busybox
spec:
  containers:
    - name: busybox
      image: 10.156.50.35:5000/busybox 
      command:
      - sleep
      - "360000"

 

 

 

 

[root@yanfabu2-35 yaml]# kubectl describe pod kube-dns-0
Name:		kube-dns-0r0cg
Namespace:	default
Node:		k8s-node2/10.156.50.37
Start Time:	Wed, 24 Jul 2019 15:27:28 +0800
Labels:		app=kube-dns
Status:		Running
IP:		172.17.89.2
Controllers:	ReplicationController/kube-dns
Containers:
  etcd:
    Container ID:	docker://15b8700db0229a089fa536bd7036f11f7a9aed7f6aff1f360a0d7c92dc73eb35
    Image:		10.156.50.35:5000/etcd-amd64
    Image ID:		docker-pullable://10.156.50.35:5000/etcd-amd64@sha256:60a9af2ecc64391ff207b41d26a9ad3c2c41648ece157609a0088a0f9905810b
    Port:		
    Command:
      /usr/local/bin/etcd
      -data-dir
      /var/etcd/data
      -listen-client-urls
      http://127.0.0.1:2379,http://127.0.0.1:4001
      -advertise-client-urls
      http://127.0.0.1:2379,http://127.0.0.1:4001
      -initial-cluster-token
      skydns-etcd
    Limits:
      cpu:	100m
      memory:	500Mi
    Requests:
      cpu:		100m
      memory:		50Mi
    State:		Running
      Started:		Wed, 24 Jul 2019 15:27:30 +0800
    Ready:		True
    Restart Count:	0
    Volume Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-8jc9w (ro)
    Environment Variables:	<none>
  kube2sky:
    Container ID:	docker://6617ecebd1233204b5b1bcb26a7bad6e6ca6d15399749a5dd44fbb640b01556a
    Image:		10.156.50.35:5000/kube2sky-amd64
    Image ID:		docker-pullable://10.156.50.35:5000/kube2sky-amd64@sha256:dbe1492b21a14fdfc5e9dabf813c96319a64f8856c69d77bcf41f262bda4aa69
    Port:		
    Args:
      --domain=cluster.local
      --kube-master-url=http://10.156.50.35:8080
    Limits:
      cpu:	100m
      memory:	200Mi
    Requests:
      cpu:		100m
      memory:		50Mi
    State:		Running
      Started:		Wed, 24 Jul 2019 15:27:31 +0800
    Ready:		True
    Restart Count:	0
    Volume Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-8jc9w (ro)
    Environment Variables:	<none>
  skydns:
    Container ID:	docker://a6d6de9e87cf9b3774d58733eb1619258fcbf89e3b4060103c7d66c0881c7e39
    Image:		10.156.50.35:5000/skydns
    Image ID:		docker-pullable://10.156.50.35:5000/skydns@sha256:104e55075c00d8dbfd74d08b25174df84eff96dc1476d856ea4de2f60be7313a
    Ports:		53/UDP, 53/TCP
    Args:
      -machines=http://127.0.0.1:4001
      -addr=0.0.0.0:53
      -ns-rotate=false
      -domain=cluster.local.
    Limits:
      cpu:	100m
      memory:	200Mi
    Requests:
      cpu:		100m
      memory:		50Mi
    State:		Running
      Started:		Wed, 24 Jul 2019 15:27:31 +0800
    Ready:		True
    Restart Count:	0
    Liveness:		http-get http://:8080/healthz delay=60s timeout=5s period=10s #success=1 #failure=5
    Readiness:		http-get http://:8080/readiness delay=30s timeout=5s period=10s #success=1 #failure=3
    Volume Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-8jc9w (ro)
    Environment Variables:	<none>
  healthz:
    Container ID:	docker://76bed055175fee5c85f0cdb11eeeed17d37e1edd72b8d43cfd1e3505d3eb3864
    Image:		10.156.50.35:5000/exechealthz
    Image ID:		docker-pullable://10.156.50.35:5000/exechealthz@sha256:61d1d0b35f1e954574a5be8138301251e9b394cf1514d00ef12db0395bb28e13
    Port:		8080/TCP
    Args:
      -cmd=nslookup kubernetes.default.svc.cluster.local localhost >/dev/null
      -port=8080
    Limits:
      cpu:	10m
      memory:	20Mi
    Requests:
      cpu:		10m
      memory:		20Mi
    State:		Running
      Started:		Wed, 24 Jul 2019 15:27:30 +0800
    Ready:		True
    Restart Count:	0
    Volume Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-8jc9w (ro)
    Environment Variables:	<none>
Conditions:
  Type		Status
  Initialized 	True 
  Ready 	True 
  PodScheduled 	True 
Volumes:
  etcd-storage:
    Type:	EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:	
  default-token-8jc9w:
    Type:	Secret (a volume populated by a Secret)
    SecretName:	default-token-8jc9w
QoS Class:	Burstable
Tolerations:	<none>
Events:
  FirstSeen	LastSeen	Count	From			SubObjectPath			Type		Reason		Message
  ---------	--------	-----	----			-------------			--------	------		-------
  4m		4m		1	{default-scheduler }					Normal		Scheduled	Successfully assigned kube-dns-0r0cg to k8s-node2
  4m		4m		1	{kubelet k8s-node2}	spec.containers{healthz}	Normal		Pulling		pulling image "10.156.50.35:5000/exechealthz"
  4m		4m		1	{kubelet k8s-node2}	spec.containers{healthz}	Normal		Created		Created container with docker id 76bed055175f; Security:[seccomp=unconfined]
  4m		4m		1	{kubelet k8s-node2}	spec.containers{healthz}	Normal		Pulled		Successfully pulled image "10.156.50.35:5000/exechealthz"
  4m		4m		1	{kubelet k8s-node2}	spec.containers{kube2sky}	Normal		Pulled		Successfully pulled image "10.156.50.35:5000/kube2sky-amd64"
  4m		4m		1	{kubelet k8s-node2}	spec.containers{healthz}	Normal		Started		Started container with docker id 76bed055175f
  4m		4m		1	{kubelet k8s-node2}	spec.containers{etcd}		Normal		Pulling		pulling image "10.156.50.35:5000/etcd-amd64"
  4m		4m		1	{kubelet k8s-node2}	spec.containers{etcd}		Normal		Pulled		Successfully pulled image "10.156.50.35:5000/etcd-amd64"
  4m		4m		1	{kubelet k8s-node2}	spec.containers{etcd}		Normal		Created		Created container with docker id 15b8700db022; Security:[seccomp=unconfined]
  4m		4m		1	{kubelet k8s-node2}	spec.containers{kube2sky}	Normal		Pulling		pulling image "10.156.50.35:5000/kube2sky-amd64"
  4m		4m		1	{kubelet k8s-node2}	spec.containers{etcd}		Normal		Started		Started container with docker id 15b8700db022
  4m		4m		1	{kubelet k8s-node2}	spec.containers{kube2sky}	Normal		Created		Created container with docker id 6617ecebd123; Security:[seccomp=unconfined]
  4m		4m		1	{kubelet k8s-node2}	spec.containers{kube2sky}	Normal		Started		Started container with docker id 6617ecebd123
  4m		4m		1	{kubelet k8s-node2}	spec.containers{skydns}		Normal		Pulling		pulling image "10.156.50.35:5000/skydns"
  4m		4m		1	{kubelet k8s-node2}	spec.containers{skydns}		Normal		Pulled		Successfully pulled image "10.156.50.35:5000/skydns"
  4m		4m		1	{kubelet k8s-node2}	spec.containers{skydns}		Normal		Created		Created container with docker id a6d6de9e87cf; Security:[seccomp=unconfined]
  4m		4m		1	{kubelet k8s-node2}	spec.containers{skydns}		Normal		Started		Started container with docker id a6d6de9e87cf


[root@yanfabu2-35 yaml]# kubectl get svc -l app=kube-dns
NAME       CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
kube-dns   10.254.254.254   <none>        53/UDP,53/TCP   6m

 

 

 

修改  kubelet 启动参数

 

 

--cluster-dns=10.254.254.254 --cluster-domain=cluster.local

 

 

systemctl restart kubelet kube-proxy

 

 

kubectl delete pods   kube-dns-0r0cg  --grace-period=0 --force

 

 

 

[root@yanfabu2-35 yaml]# kubectl get pods
NAME                      READY     STATUS    RESTARTS   AGE
kube-dns-0r0cg            4/4       Unknown   0          1h
kube-dns-bg8xd            4/4       Running   0          36m
redis                     1/1       Running   0          13d
springbootdemo-rc-gdtgt   1/1       Running   0          5m
springbootdemo-rc-jc3jv   1/1       Running   0          5m
[root@yanfabu2-35 yaml]# 
[root@yanfabu2-35 yaml]# 
[root@yanfabu2-35 yaml]# 
[root@yanfabu2-35 yaml]# 
[root@yanfabu2-35 yaml]# kubectl logs springbootdemo-rc-gdtgt
  .   ____          _            __ _ _
 /\\ / ___'_ __ _ _(_)_ __  __ _ \ \ \ \
( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
 \\/  ___)| |_)| | | | | || (_| |  ) ) ) )
  '  |____| .__|_| |_|_| |_\__, | / / / /
 =========|_|==============|___/=/_/_/_/
 :: Spring Boot ::        (v1.3.5.RELEASE)
2019-07-24 08:38:04.080  INFO 5 --- [           main] s.d.j.SpringJenkinsDockerApplication     : Starting SpringJenkinsDockerApplication v0.0.1-SNAPSHOT on springbootdemo-rc-gdtgt with PID 5 (/app.jar started by root in /)
2019-07-24 08:38:04.085  INFO 5 --- [           main] s.d.j.SpringJenkinsDockerApplication     : No active profile set, falling back to default profiles: default
2019-07-24 08:38:04.422  INFO 5 --- [           main] ationConfigEmbeddedWebApplicationContext : Refreshing org.springframework.boot.context.embedded.AnnotationConfigEmbeddedWebApplicationContext@14b38b37: startup date [Wed Jul 24 08:38:04 GMT 2019]; root of context hierarchy
2019-07-24 08:38:10.861  INFO 5 --- [           main] s.b.c.e.t.TomcatEmbeddedServletContainer : Tomcat initialized with port(s): 7071 (http)
2019-07-24 08:38:10.935  INFO 5 --- [           main] o.apache.catalina.core.StandardService   : Starting service Tomcat
2019-07-24 08:38:10.938  INFO 5 --- [           main] org.apache.catalina.core.StandardEngine  : Starting Servlet Engine: Apache Tomcat/8.0.33
2019-07-24 08:38:13.906  INFO 5 --- [ost-startStop-1] org.apache.jasper.servlet.TldScanner     : At least one JAR was scanned for TLDs yet contained no TLDs. Enable debug logging for this logger for a complete list of JARs that were scanned but no TLDs were found in them. Skipping unneeded JARs during scanning can improve startup time and JSP compilation time.
2019-07-24 08:38:14.664  INFO 5 --- [ost-startStop-1] o.a.c.c.C.[Tomcat].[localhost].[/]       : Initializing Spring embedded WebApplicationContext
2019-07-24 08:38:14.665  INFO 5 --- [ost-startStop-1] o.s.web.context.ContextLoader            : Root WebApplicationContext: initialization completed in 10272 ms
2019-07-24 08:38:15.898  INFO 5 --- [ost-startStop-1] o.s.b.c.e.ServletRegistrationBean        : Mapping servlet: 'dispatcherServlet' to [/]
2019-07-24 08:38:15.904  INFO 5 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean  : Mapping filter: 'characterEncodingFilter' to: [/*]
2019-07-24 08:38:15.905  INFO 5 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean  : Mapping filter: 'hiddenHttpMethodFilter' to: [/*]
2019-07-24 08:38:15.905  INFO 5 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean  : Mapping filter: 'httpPutFormContentFilter' to: [/*]
2019-07-24 08:38:15.905  INFO 5 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean  : Mapping filter: 'requestContextFilter' to: [/*]
2019-07-24 08:38:16.904  INFO 5 --- [           main] s.w.s.m.m.a.RequestMappingHandlerAdapter : Looking for @ControllerAdvice: org.springframework.boot.context.embedded.AnnotationConfigEmbeddedWebApplicationContext@14b38b37: startup date [Wed Jul 24 08:38:04 GMT 2019]; root of context hierarchy
2019-07-24 08:38:17.303  INFO 5 --- [           main] s.w.s.m.m.a.RequestMappingHandlerMapping : Mapped "{[/hello]}" onto public java.lang.String showandshare.docker.jenkinsdockerdemo.controller.HelloController.hello()
2019-07-24 08:38:17.316  INFO 5 --- [           main] s.w.s.m.m.a.RequestMappingHandlerMapping : Mapped "{[/hello/test]}" onto public java.util.Map<java.lang.String, java.lang.String> showandshare.docker.jenkinsdockerdemo.controller.HelloController.getInfo(java.lang.String,java.lang.Integer)
2019-07-24 08:38:17.317  INFO 5 --- [           main] s.w.s.m.m.a.RequestMappingHandlerMapping : Mapped "{[/hello/info]}" onto public java.util.Map<java.lang.String, java.lang.String> showandshare.docker.jenkinsdockerdemo.controller.HelloController.getInfo(java.lang.String)
2019-07-24 08:38:17.349  INFO 5 --- [           main] s.w.s.m.m.a.RequestMappingHandlerMapping : Mapped "{[/error],produces=[text/html]}" onto public org.springframework.web.servlet.ModelAndView org.springframework.boot.autoconfigure.web.BasicErrorController.errorHtml(javax.servlet.http.HttpServletRequest,javax.servlet.http.HttpServletResponse)
2019-07-24 08:38:17.359  INFO 5 --- [           main] s.w.s.m.m.a.RequestMappingHandlerMapping : Mapped "{[/error]}" onto public org.springframework.http.ResponseEntity<java.util.Map<java.lang.String, java.lang.Object>> org.springframework.boot.autoconfigure.web.BasicErrorController.error(javax.servlet.http.HttpServletRequest)
2019-07-24 08:38:18.754  INFO 5 --- [           main] o.s.w.s.handler.SimpleUrlHandlerMapping  : Mapped URL path [/webjars/**] onto handler of type [class org.springframework.web.servlet.resource.ResourceHttpRequestHandler]
2019-07-24 08:38:18.755  INFO 5 --- [           main] o.s.w.s.handler.SimpleUrlHandlerMapping  : Mapped URL path [/**] onto handler of type [class org.springframework.web.servlet.resource.ResourceHttpRequestHandler]
2019-07-24 08:38:19.020  INFO 5 --- [           main] o.s.w.s.handler.SimpleUrlHandlerMapping  : Mapped URL path [/**/favicon.ico] onto handler of type [class org.springframework.web.servlet.resource.ResourceHttpRequestHandler]
2019-07-24 08:38:19.643  INFO 5 --- [           main] o.s.j.e.a.AnnotationMBeanExporter        : Registering beans for JMX exposure on startup
2019-07-24 08:38:19.777  INFO 5 --- [           main] s.b.c.e.t.TomcatEmbeddedServletContainer : Tomcat started on port(s): 7071 (http)
2019-07-24 08:38:19.786  INFO 5 --- [           main] s.d.j.SpringJenkinsDockerApplication     : Started SpringJenkinsDockerApplication in 17.09 seconds (JVM running for 18.507)


[root@yanfabu2-35 yaml]# kubectl exec -ti springbootdemo-b9wv5 wget localhost:7071/hello
Connecting to localhost:7071 (127.0.0.1:7071)
hello                100% |**********************************************************************************************************************************************|    17   0:00:00 ETA
[root@yanfabu2-35 yaml]# ls

 

 

 

 

解决 

 

[root@yanfabu2-35 yaml]# kubectl logs  kube-dns-x2g2g skydns
2019/07/26 05:50:48 skydns: falling back to default configuration, could not read from etcd: 100: Key not found (/skydns) [1]
2019/07/26 05:50:48 skydns: ready for queries on cluster.local. for tcp://0.0.0.0:53 [rcache 0]
2019/07/26 05:50:48 skydns: ready for queries on cluster.local. for udp://0.0.0.0:53 [rcache 0]
 

 

- -nameservers=10.156.89.3:53,10.156.89.4:53  增加 dns 测试环境的dns 解析器

 

 

 

 

[root@yanfabu2-35 yaml]# kubectl exec -it busybox nslookup yanfabu2-35.base.app.dev.yf
Server:    10.254.254.254
Address 1: 10.254.254.254

nslookup: can't resolve 'yanfabu2-35.base.app.dev.yf'

 

 

 

 kubectl logs  kube-dns-q4nxp   skydns
2019/07/26 07:06:34 skydns: falling back to default configuration, could not read from etcd: 501: All the given peers are not reachable (failed to propose on members [http://127.0.0.1:4001] twice [last error: Get http://127.0.0.1:4001/v2/keys/skydns/config?quorum=false&recursive=false&sorted=false: dial tcp 127.0.0.1:4001: connection refused]) [0]
2019/07/26 07:06:34 skydns: ready for queries on cluster.local. for tcp://0.0.0.0:53 [rcache 0]
2019/07/26 07:06:34 skydns: ready for queries on cluster.local. for udp://0.0.0.0:53 [rcache 0]
[root@yanfabu2-35 yaml]# kubectl exec -it busybox nslookup kubernetes


- name: skydns
        image: 10.156.50.35:5000/skydns
        resources:
          limits:
            cpu: 100m
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 50Mi
        args:
        - -machines=http://127.0.0.1:2379  #修改为 2379
        - -addr=0.0.0.0:53
        - -ns-rotate=false
        - -domain=cluster.local.
	- -nameservers=10.156.89.3:53,10.156.89.4:53

 

 

 

 

[root@yanfabu2-35 yaml]# kubectl exec -it busybox nslookup springbootdemo
Server:    10.254.254.254
Address 1: 10.254.254.254

Name:      springbootdemo
Address 1: 10.254.196.233


#nslookup 通, ping 不通

/ # nslookup springbootdemo
Server:    10.254.254.254
Address 1: 10.254.254.254

Name:      springbootdemo
Address 1: 10.254.196.233
/ # ping springbootdemo
PING springbootdemo (10.254.196.233): 56 data bytes
/ # curl http://springbootdemo:30008/hello
sh: curl: not found
/ # wget http://springbootdemo:30008/hello
Connecting to springbootdemo:30008 (10.254.196.233:30008)
wget: can't connect to remote host (10.254.196.233): Connection timed out

 

 

 

 

num  target     prot opt source               destination         
1    REJECT     tcp  --  anywhere             10.254.59.83         /* kubernetes-dashboard/dashboard-metrics-scraper: has no endpoints */ tcp dpt:irdmi reject-with icmp-port-unreachable
2    REJECT     tcp  --  anywhere             10.254.121.193       /* default/springbootdemo: has no endpoints */ tcp dpt:30008 reject-with icmp-port-unreachable
3    REJECT     tcp  --  anywhere             10.254.1.134         /* default/mysql-service: has no endpoints */ tcp dpt:mysql reject-with icmp-port-unreachable
4    REJECT     tcp  --  anywhere             10.254.147.165       /* kubernetes-dashboard/kubernetes-dashboard: has no endpoints */ tcp dpt:https reject-with icmp-port-unreachable


Chain KUBE-FIREWALL (2 references)
num  target     prot opt source               destination         
1    DROP       all  --  anywhere             anywhere             /* kubernetes firewall for dropping marked packets */ mark match 0x8000/0x8000

Chain KUBE-SERVICES (1 references)
num  target     prot opt source               destination         
1    REJECT     tcp  --  anywhere             10.254.1.134         /* default/mysql-service: has no endpoints */ tcp dpt:mysql reject-with icmp-port-unreachable
2    REJECT     tcp  --  anywhere             10.254.121.193       /* default/springbootdemo: has no endpoints */ tcp dpt:30008 reject-with icmp-port-unreachable
3    REJECT     tcp  --  anywhere             10.254.147.165       /* kubernetes-dashboard/kubernetes-dashboard: has no endpoints */ tcp dpt:https reject-with icmp-port-unreachable
4    REJECT     tcp  --  anywhere             10.254.59.83         /* kubernetes-dashboard/dashboard-metrics-scraper: has no endpoints */ tcp dpt:irdmi reject-with icmp-port-unreachable


修改selector 中 app 为 name 保持一致性

 

 

 

 

 

[root@yanfabu2-35 yaml]# cat springbootdemo-rc.yaml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: springbootdemo
  labels: 
    name: springbootdemo
spec: 
  replicas: 2
  selector: 
    name: springbootdemo
  template:
    metadata:
      labels: 
        name: springbootdemo
    spec: 
      containers:
      - name: springbootdemo
        image: springbootdemo:lasted
        ports: 
        - containerPort: 7071
          protocol: TCP
        imagePullPolicy: IfNotPresent

 

 

 

 

[root@yanfabu2-35 yaml]# cat springbootdemo-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: springbootdemo
  labels:
    name: springbootdemo
spec:
  type: NodePort
  ports:
  - port: 30008
    targetPort: 7071
    protocol: TCP
    nodePort: 30008
  selector:
    name: springbootdemo

 

 

 

vim defaultbackend.yaml 

 

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: default-http-backend
  labels:
    k8s-app: default-http-backend
  namespace: default
spec:
  replicas: 1
  template:
    metadata:
      labels:
        k8s-app: default-http-backend
    spec:
      terminationGracePeriodSeconds: 60
      containers:
      - name: default-http-backend
        image: 192.168.206.243:5000/defaultbackend 
        livenessProbe:
          httpGet:
            path: /healthz
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 30
          timeoutSeconds: 5
        ports:
        - containerPort: 8080
        resources:
          limits:
            cpu: 10m
            memory: 20Mi
          requests:
            cpu: 10m
            memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
  name: default-http-backend
  namespace: default
  labels:
    k8s-app: default-http-backend
spec:
  ports:
  - port: 80
    targetPort: 8080
  selector:
    k8s-app: default-http-backend

 

 

vim  nginx-ingress-controller.yaml

 

apiVersion: v1
kind: ReplicationController
metadata:
  name: nginx-ingress-lb
  labels:
    name: nginx-ingress-lb
  namespace: default
spec:
  replicas: 1
  template:
    metadata:
      labels:
        name: nginx-ingress-lb
      annotations:
        prometheus.io/port: '10254'
        prometheus.io/scrape: 'true'
    spec:
      terminationGracePeriodSeconds: 60
      hostNetwork: true
      containers:
      - image: 192.168.206.243:5000/nginx-ingress-controller
        name: nginx-ingress-lb
        readinessProbe:
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
        livenessProbe:
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          timeoutSeconds: 1
        ports:
        - containerPort: 80
          hostPort: 80
        - containerPort: 443
          hostPort: 443
        env:
          - name: POD_NAME
            valueFrom:
              fieldRef:
                fieldPath: metadata.name
          - name: POD_NAMESPACE
            valueFrom:
              fieldRef:
                fieldPath: metadata.namespace
          - name: KUBERNETES_MASTER
            value: http://192.168.206.243:8080
        args:
        - /nginx-ingress-controller
        - --default-backend-service=$(POD_NAMESPACE)/default-http-backend
        - --apiserver-host=http://192.168.206.243:8080

 

 

springbootdemo-ingress.yaml

 

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: springbootdemo-ingress
  namespace: default
spec:
  rules:
  - host: springbootdemo
    http:
      paths:
      - path: /
        backend:
          serviceName: springbootdemo
          servicePort: 30000

 

 

kubectl get ingress -o wide

kubectl exec -it nginx-ingress-controller-5b79cbb5c6-2zr7f  sh

 

cat /etc/nginx/nginx.conf

# Configuration checksum: 10355849410420420293

# setup custom paths that do not require root access
pid /tmp/nginx.pid;

load_module /etc/nginx/modules/ngx_http_modsecurity_module.so;

daemon off;

worker_processes 1;

worker_rlimit_nofile 1047552;

worker_shutdown_timeout 10s ;

events {
	multi_accept        on;
	worker_connections  16384;
	use                 epoll;
}

http {
	lua_package_cpath "/usr/local/lib/lua/?.so;/usr/lib/lua-platform-path/lua/5.1/?.so;;";
	lua_package_path "/etc/nginx/lua/?.lua;/etc/nginx/lua/vendor/?.lua;/usr/local/lib/lua/?.lua;;";
	
	lua_shared_dict configuration_data 5M;
	lua_shared_dict certificate_data 16M;
	lua_shared_dict locks 512k;
	lua_shared_dict sticky_sessions 1M;
	
	init_by_lua_block {
		require("resty.core")
		collectgarbage("collect")
		
		local lua_resty_waf = require("resty.waf")
		lua_resty_waf.init()
		
		-- init modules
		local ok, res
		
		ok, res = pcall(require, "configuration")
		if not ok then
		error("require failed: " .. tostring(res))
		else
		configuration = res
	configuration.nameservers = { "192.168.206.2" }
		end
		
		ok, res = pcall(require, "balancer")
		if not ok then
		error("require failed: " .. tostring(res))
		else
		balancer = res
		end
		
		ok, res = pcall(require, "monitor")
		if not ok then
		error("require failed: " .. tostring(res))
		else
		monitor = res
		end
		
	}
	
	init_worker_by_lua_block {
		balancer.init_worker()
		monitor.init_worker()
	}
	
	real_ip_header      X-Forwarded-For;
	
	real_ip_recursive   on;
	
	set_real_ip_from    0.0.0.0/0;
	
	geoip_country       /etc/nginx/geoip/GeoIP.dat;
	geoip_city          /etc/nginx/geoip/GeoLiteCity.dat;
	geoip_org           /etc/nginx/geoip/GeoIPASNum.dat;
	geoip_proxy_recursive on;
	
	aio                 threads;
	aio_write           on;
	
	tcp_nopush          on;
	tcp_nodelay         on;
	
	log_subrequest      on;
	
	reset_timedout_connection on;
	
	keepalive_timeout  75s;
	keepalive_requests 100;
	
	client_body_temp_path           /tmp/client-body;
	fastcgi_temp_path               /tmp/fastcgi-temp;
	proxy_temp_path                 /tmp/proxy-temp;
	ajp_temp_path                   /tmp/ajp-temp;
	
	client_header_buffer_size       1k;
	client_header_timeout           60s;
	large_client_header_buffers     4 8k;
	client_body_buffer_size         8k;
	client_body_timeout             60s;
	
	http2_max_field_size            4k;
	http2_max_header_size           16k;
	http2_max_requests              1000;
	
	types_hash_max_size             2048;
	server_names_hash_max_size      1024;
	server_names_hash_bucket_size   32;
	map_hash_bucket_size            64;
	
	proxy_headers_hash_max_size     512;
	proxy_headers_hash_bucket_size  64;
	
	variables_hash_bucket_size      128;
	variables_hash_max_size         2048;
	
	underscores_in_headers          off;
	ignore_invalid_headers          on;
	
	limit_req_status                503;
	
	include /etc/nginx/mime.types;
	default_type text/html;
	
	gzip on;
	gzip_comp_level 5;
	gzip_http_version 1.1;
	gzip_min_length 256;
	gzip_types application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component;
	gzip_proxied any;
	gzip_vary on;
	
	# Custom headers for response
	
	server_tokens on;
	
	# disable warnings
	uninitialized_variable_warn off;
	
	# Additional available variables:
	# $namespace
	# $ingress_name
	# $service_name
	# $service_port
	log_format upstreaminfo '$the_real_ip - [$the_real_ip] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status $req_id';
	
	map $request_uri $loggable {
		
		default 1;
	}
	
	access_log /var/log/nginx/access.log upstreaminfo if=$loggable;
	
	error_log  /var/log/nginx/error.log notice;
	
	resolver 192.168.206.2 valid=30s;
	
	# See https://www.nginx.com/blog/websocket-nginx
	map $http_upgrade $connection_upgrade {
		default          upgrade;
		
		# See http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive
		''               '';
		
	}
	
	# The following is a sneaky way to do "set $the_real_ip $remote_addr"
	# Needed because using set is not allowed outside server blocks.
	map '' $the_real_ip {
		
		default          $remote_addr;
		
	}
	
	# trust http_x_forwarded_proto headers correctly indicate ssl offloading
	map $http_x_forwarded_proto $pass_access_scheme {
		default          $http_x_forwarded_proto;
		''               $scheme;
	}
	
	map $http_x_forwarded_port $pass_server_port {
		default           $http_x_forwarded_port;
		''                $server_port;
	}
	
	# Obtain best http host
	map $http_host $this_host {
		default          $http_host;
		''               $host;
	}
	
	map $http_x_forwarded_host $best_http_host {
		default          $http_x_forwarded_host;
		''               $this_host;
	}
	
	# validate $pass_access_scheme and $scheme are http to force a redirect
	map "$scheme:$pass_access_scheme" $redirect_to_https {
		default          0;
		"http:http"      1;
		"https:http"     1;
	}
	
	map $pass_server_port $pass_port {
		443              443;
		default          $pass_server_port;
	}
	
	# Reverse proxies can detect if a client provides a X-Request-ID header, and pass it on to the backend server.
	# If no such header is provided, it can provide a random value.
	map $http_x_request_id $req_id {
		default   $http_x_request_id;
		
		""        $request_id;
		
	}
	
	# Create a variable that contains the literal $ character.
	# This works because the geo module will not resolve variables.
	geo $literal_dollar {
		default "$";
	}
	
	server_name_in_redirect off;
	port_in_redirect        off;
	
	ssl_protocols TLSv1.2;
	
	# turn on session caching to drastically improve performance
	
	ssl_session_cache builtin:1000 shared:SSL:10m;
	ssl_session_timeout 10m;
	
	# allow configuring ssl session tickets
	ssl_session_tickets on;
	
	# slightly reduce the time-to-first-byte
	ssl_buffer_size 4k;
	
	# allow configuring custom ssl ciphers
	ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256';
	ssl_prefer_server_ciphers on;
	
	ssl_ecdh_curve auto;
	
	proxy_ssl_session_reuse on;
	
	upstream upstream_balancer {
		server 0.0.0.1; # placeholder
		
		balancer_by_lua_block {
			balancer.balance()
		}
		
		keepalive 32;
		
		keepalive_timeout  60s;
		keepalive_requests 100;
		
	}
	
	# Global filters
	
	## start server _
	server {
		server_name _ ;
		
		listen 80 default_server reuseport backlog=511;
		
		listen [::]:80 default_server reuseport backlog=511;
		
		set $proxy_upstream_name "-";
		
		listen 443  default_server reuseport backlog=511 ssl http2;
		
		listen [::]:443  default_server reuseport backlog=511 ssl http2;
		
		# PEM sha: 7e07077985f304dc830f1cd7d4d741fffaf6fef7
		ssl_certificate                         /etc/ingress-controller/ssl/default-fake-certificate.pem;
		ssl_certificate_key                     /etc/ingress-controller/ssl/default-fake-certificate.pem;
		
		location / {
			
			set $namespace      "";
			set $ingress_name   "";
			set $service_name   "";
			set $service_port   "0";
			set $location_path  "/";
			
			rewrite_by_lua_block {
				balancer.rewrite()
			}
			access_by_lua_block {
				
			}
			header_filter_by_lua_block {
				
			}
			body_filter_by_lua_block {
				
			}
			
			log_by_lua_block {
				
				balancer.log()
				monitor.call()
			}
			
			if ($scheme = https) {
				more_set_headers                        "Strict-Transport-Security: max-age=15724800; includeSubDomains";
			}
			
			access_log off;
			
			port_in_redirect off;
			
			set $proxy_upstream_name "upstream-default-backend";
			
			client_max_body_size                    1m;
			
			proxy_set_header Host                   $best_http_host;
			
			# Pass the extracted client certificate to the backend
			
			# Allow websocket connections
			proxy_set_header                        Upgrade           $http_upgrade;
			
			proxy_set_header                        Connection        $connection_upgrade;
			
			proxy_set_header X-Request-ID           $req_id;
			proxy_set_header X-Real-IP              $the_real_ip;
			
			proxy_set_header X-Forwarded-For        $the_real_ip;
			
			proxy_set_header X-Forwarded-Host       $best_http_host;
			proxy_set_header X-Forwarded-Port       $pass_port;
			proxy_set_header X-Forwarded-Proto      $pass_access_scheme;
			
			proxy_set_header X-Original-URI         $request_uri;
			
			proxy_set_header X-Scheme               $pass_access_scheme;
			
			# Pass the original X-Forwarded-For
			proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;
			
			# mitigate HTTPoxy Vulnerability
			# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
			proxy_set_header Proxy                  "";
			
			# Custom headers to proxied server
			
			proxy_connect_timeout                   5s;
			proxy_send_timeout                      60s;
			proxy_read_timeout                      60s;
			
			proxy_buffering                         off;
			proxy_buffer_size                       4k;
			proxy_buffers                           4 4k;
			proxy_request_buffering                 on;
			
			proxy_http_version                      1.1;
			
			proxy_cookie_domain                     off;
			proxy_cookie_path                       off;
			
			# In case of errors try the next upstream server before returning an error
			proxy_next_upstream                     error timeout;
			proxy_next_upstream_tries               3;
			
			proxy_pass http://upstream_balancer;
			
			proxy_redirect                          off;
			
		}
		
		# health checks in cloud providers require the use of port 80
		location /healthz {
			
			access_log off;
			return 200;
		}
		
		# this is required to avoid error if nginx is being monitored
		# with an external software (like sysdig)
		location /nginx_status {
			
			allow 127.0.0.1;
			
			allow ::1;
			
			deny all;
			
			access_log off;
			stub_status on;
		}
		
	}
	## end server _
	
	## start server springbootdemo
	server {
		server_name springbootdemo ;
		
		listen 80;
		
		listen [::]:80;
		
		set $proxy_upstream_name "-";
		
		location / {
			
			set $namespace      "default";
			set $ingress_name   "springbootdemo-ingress";
			set $service_name   "springbootdemo";
			set $service_port   "7071";
			set $location_path  "/";
			
			rewrite_by_lua_block {
				balancer.rewrite()
			}
			access_by_lua_block {
				
			}
			header_filter_by_lua_block {
				
			}
			body_filter_by_lua_block {
				
			}
			
			log_by_lua_block {
				
				balancer.log()
				monitor.call()
			}
			
			port_in_redirect off;
			
			set $proxy_upstream_name "default-springbootdemo-7071";
			
			client_max_body_size                    1m;
			
			proxy_set_header Host                   $best_http_host;
			
			# Pass the extracted client certificate to the backend
			
			# Allow websocket connections
			proxy_set_header                        Upgrade           $http_upgrade;
			
			proxy_set_header                        Connection        $connection_upgrade;
			
			proxy_set_header X-Request-ID           $req_id;
			proxy_set_header X-Real-IP              $the_real_ip;
			
			proxy_set_header X-Forwarded-For        $the_real_ip;
			
			proxy_set_header X-Forwarded-Host       $best_http_host;
			proxy_set_header X-Forwarded-Port       $pass_port;
			proxy_set_header X-Forwarded-Proto      $pass_access_scheme;
			
			proxy_set_header X-Original-URI         $request_uri;
			
			proxy_set_header X-Scheme               $pass_access_scheme;
			
			# Pass the original X-Forwarded-For
			proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;
			
			# mitigate HTTPoxy Vulnerability
			# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
			proxy_set_header Proxy                  "";
			
			# Custom headers to proxied server
			
			proxy_connect_timeout                   5s;
			proxy_send_timeout                      60s;
			proxy_read_timeout                      60s;
			
			proxy_buffering                         off;
			proxy_buffer_size                       4k;
			proxy_buffers                           4 4k;
			proxy_request_buffering                 on;
			
			proxy_http_version                      1.1;
			
			proxy_cookie_domain                     off;
			proxy_cookie_path                       off;
			
			# In case of errors try the next upstream server before returning an error
			proxy_next_upstream                     error timeout;
			proxy_next_upstream_tries               3;
			
			proxy_pass http://upstream_balancer;
			
			proxy_redirect                          off;
			
		}
		
	}
	## end server springbootdemo
	
	# backend for when default-backend-service is not configured or it does not have endpoints
	server {
		listen 8181 default_server reuseport backlog=511;
		listen [::]:8181 default_server reuseport backlog=511;
		set $proxy_upstream_name "-";
		
		location / {
			return 404;
		}
	}
	
	# default server, used for NGINX healthcheck and access to nginx stats
	server {
		listen 18080 default_server reuseport backlog=511;
		listen [::]:18080 default_server reuseport backlog=511;
		set $proxy_upstream_name "-";
		
		location /healthz {
			
			access_log off;
			return 200;
		}
		
		location /is-dynamic-lb-initialized {
			
			access_log off;
			
			content_by_lua_block {
				local configuration = require("configuration")
				local backend_data = configuration.get_backends_data()
				if not backend_data then
				ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
				return
				end
				
				ngx.say("OK")
				ngx.exit(ngx.HTTP_OK)
			}
		}
		
		location /nginx_status {
			set $proxy_upstream_name "internal";
			
			access_log off;
			stub_status on;
		}
		
		location /configuration {
			access_log off;
			
			allow 127.0.0.1;
			
			allow ::1;
			
			deny all;
			
			# this should be equals to configuration_data dict
			client_max_body_size                    10m;
			client_body_buffer_size                 10m;
			proxy_buffering                         off;
			
			content_by_lua_block {
				configuration.call()
			}
		}
		
		location / {
			
			set $proxy_upstream_name "upstream-default-backend";
			proxy_set_header    Host   $best_http_host;
			
			proxy_pass          http://upstream_balancer;
		}
		
	}
}

stream {
	lua_package_cpath "/usr/local/lib/lua/?.so;/usr/lib/lua-platform-path/lua/5.1/?.so;;";
	lua_package_path "/etc/nginx/lua/?.lua;/etc/nginx/lua/vendor/?.lua;/usr/local/lib/lua/?.lua;;";
	
	lua_shared_dict tcp_udp_configuration_data 5M;
	
	init_by_lua_block {
		require("resty.core")
		collectgarbage("collect")
		
		-- init modules
		local ok, res
		
		ok, res = pcall(require, "tcp_udp_configuration")
		if not ok then
		error("require failed: " .. tostring(res))
		else
		tcp_udp_configuration = res
	tcp_udp_configuration.nameservers = { "192.168.206.2" }
		end
		
		ok, res = pcall(require, "tcp_udp_balancer")
		if not ok then
		error("require failed: " .. tostring(res))
		else
		tcp_udp_balancer = res
		end
	}
	
	init_worker_by_lua_block {
		tcp_udp_balancer.init_worker()
	}
	
	lua_add_variable $proxy_upstream_name;
	
	log_format log_stream [$time_local] $protocol $status $bytes_sent $bytes_received $session_time;
	
	access_log /var/log/nginx/access.log log_stream;
	
	error_log  /var/log/nginx/error.log;
	
	upstream upstream_balancer {
		server 0.0.0.1:1234; # placeholder
		
		balancer_by_lua_block {
			tcp_udp_balancer.balance()
		}
	}
	
	server {
		listen unix:/tmp/ingress-stream.sock;
		
		content_by_lua_block {
			tcp_udp_configuration.call()
		}
	}
	
	# TCP services
	
	# UDP services
	
}

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

捐助开发者 

在兴趣的驱动下,写一个免费的东西,有欣喜,也还有汗水,希望你喜欢我的作品,同时也能支持一下。 当然,有钱捧个钱场(支持支付宝和微信 以及扣扣群),没钱捧个人场,谢谢各位。

 

个人主页http://knight-black-bob.iteye.com/



 
 
 谢谢您的赞助,我会做的更好!

 

0
0
分享到:
评论

相关推荐

    k8s-auto-kubernetes/k8s一键安装

    kubernetes/k8s自动安装程序,版本对应:v1.18.2,linux环境 使用kubeadm安装,改程序若环境不符合要求,是不能一键安装的,需按照程序指示分布安装,该程序是为了搭建测试环境时,简化繁琐的配置时所用,不能用作...

    k8s教程:Kubernetes 安装部署教程+编程知识+技术开发

    k8s安装部署,k8s教程:Kubernetes 安装部署教程+编程知识+技术开发; k8s安装部署,k8s教程:Kubernetes 安装部署教程+编程知识+技术开发; k8s安装部署,k8s教程:Kubernetes 安装部署教程+编程知识+技术开发; k8...

    K8s Kubernetes从入门到集群架构视频教程

    K8s Kubernetes从入门到集群架构视频教程,集群搭建,教程,教学资料,Kubernetes视频教程

    sealyun k8s kubernetes 1.13.4 自动化安装脚本

    花了50大洋买的kubernetes自动化安装脚本,可一键安装k8s集群,生产可用!

    kubernetes K8S超详细安装部署手册

    ### Kubernetes (K8S) 超详细安装部署手册知识点概览 #### 一、Kubernetes简介与核心功能 Kubernetes(简称K8S)是一个开源的容器编排平台,旨在自动化容器化应用的部署、扩展和管理。通过提供一系列核心功能,K8S...

    K8s kubernetes 系统初始化脚本,一键安装部署基础环境,安装依赖包,针对k8s优化的内核参数,升级内核

    2.1 设置主机名和hosts文件解析 2.2 安装依赖包 ...2.5 调整内核参数,对于 K8S 2.6 调整系统时区 2.7 关闭系统不需要服务 2.8 设置 rsyslogd 和 systemd journald 2.9 升级系统内核为 4.44 一键shell脚本

    prometheus docker 容器监控 k8s kubernetes

    prometheus docker 容器监控 k8s kubernetes 好东西 岗岗的

    k8s(kubernetes)常见故障处理总结-详细笔记文档总结

    kubernetes(k8s)常见故障处理总结 kubernetes(k8s)作为一个容器编排系统,广泛应用于云计算和大数据等领域。然而,在实际使用中,k8s 也会出现各种故障,影响系统的稳定性和可用性。因此,本文总结了 k8s 中...

    Kubernetes(K8S)超快速入门视频教程

    Kubernetes(K8S)是Google在2014年发布的一个开源项目,用于自动化容器化应用程序的部署、扩展和管理。Kubernetes通常结合docker容器工作,并且整合多个运行着docker容器的主机集群。 适用人群 零基础以及有一定运维...

    CentOS 7.9 最小化安装 kubernetes(k8s)1.25.3(免费下载)

    在本教程中,我们将深入探讨如何在CentOS 7.9最小化安装环境中部署Kubernetes(简称k8s)版本1.25.3。Kubernetes是一个开源的容器编排系统,用于自动化容器化应用程序的部署、扩展和管理。CentOS作为一款稳定的Linux...

    基于CentOS 7的Kubernetes安装全过程(含附件)

    基于CentOS 7的Kubernetes安装全过程(含附件) 目录如下: 第一部分:Nginx on Kubernetes应用部署 3 一、环境准备 3 1.1软硬件环境 3 1.2 网络拓扑 4 二、Kubenetes及相关组件部署 6 2.1 Docker容器及私有仓库部署...

    Kubernetes(K8s)搭建视频.rar

    ├ k8s-1、搭建docker+kubernetes │ │ k8s-1、搭建docker+kubernetes.pdf │ │ VMware启动.mp4 │ └ 安装docker和k8s.mp4 ├ k8s-2、k8s安装网络插件Flannel │ │ k8s-2、k8s安装网络插件Flannel.pdf │ └ ...

    K8s(Kubernetes)架构培训笔记.pdf

    Kubernetes(简称K8s),是一个自动部署、扩展和管理容器化应用程序的开源系统。它提供了一个灵活、可扩展、可靠的平台,用于自动部署、扩展和管理容器化应用程序。 Kubernetes架构概览 --------------------------...

    K8S(kubernetes)学习指南.pdf

    "K8S(kubernetes)学习指南.pdf" Kubernetes是当前最流行的容器编排系统,它提供了自动化部署、扩展和管理容器化应用程序的功能。本文档将指导读者一步步学习Kubernetes的核心概念和实现机制。 首先,Kubernetes的...

    Kubernetes(k8s)2020版入门笔记和资料(尚).zip

    Kubernetes,简称k8s,是Google开源的一种容器编排系统,用于自动化容器化应用程序的部署、扩展和管理。本资源包含2020版的Kubernetes入门笔记和相关资料,对于初学者或希望深入理解k8s的IT从业者来说极具价值。 ...

    k8s kubernetes dashboard dns 配置文件 yaml

    在Kubernetes(k8s)集群环境中,配置和管理各个组件是系统运维的关键部分。这里我们聚焦于Kubernetes Dashboard、DNS服务以及相关的YAML配置文件。YAML是一种常用的语言,用于编写Kubernetes的资源定义,它简洁且...

    k8s 安装教程 一键安装脚本

    ### K8s (Kubernetes) 安装教程与一键安装脚本详解 #### 一、前言 在本文中,我们将详细介绍如何通过一键脚本在常见的Linux发行版上(如Ubuntu 16.04 64位、CentOS 7 64位和Debian 9 64位)安装Kubernetes(简称k8...

    Kubernetes(k8s)视频教程.zip

    网盘文件永久链接 目录 1.Kubernetes-组件个招ra量 2.Kubernetes-基础念.at 3.Kubernetes-集群安装at 4.Kubernetes-室源清单rar 5、Kubernetes-资源控器at ...12.Kubernetes-高可用的K8S集群构建rar

    内网环境k8s离线安装部署

    在无法连接互联网的内网环境部署k8s详细教程。每步骤详细说明,纯傻瓜式。

Global site tag (gtag.js) - Google Analytics