1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
|
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: test-network-policy
namespace: foo
spec:
podSelector:
matchLabels: {}
policyTypes:
- Ingress
ingress:
- from:
- ipBlock:
cidr: 10.2.3.4/32
- namespaceSelector:
matchExpressions:
- key: region
operator: NotIn
values:
- bar
|
kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
node1 Ready master,worker 34d v1.19.8 10.102.123.117 <none> CentOS Linux 7 (Core) 3.10.0-1127.el7.x86_64 docker://20.10.6
node2 Ready worker 34d v1.19.8 10.102.123.104 <none> CentOS Linux 7 (Core) 3.10.0-1127.el7.x86_64 docker://20.10.6
node3 Ready worker 34d v1.19.8 10.102.123.143 <none> CentOS Linux 7 (Core) 3.10.0-1127.el7.x86_64 docker://20.10.6
|
kubectl -n tekton-pipelines get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
tekton-dashboard-75c65d785b-xbgk6 1/1 Running 0 14h 10.233.96.32 node2 <none> <none>
|
kubectl -n tekton-pipelines get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
tekton-dashboard NodePort 10.233.5.155 <none> 9097:31602/TCP 10m
|
route
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
10.233.92.0 node3.cluster.l 255.255.255.0 UG 0 0 0 tunl0
10.233.96.0 node2.cluster.l 255.255.255.0 UG 0 0 0 tunl0
|
ipvsadm -L
TCP node2:31602 rr
-> 10.233.96.32:9097 Masq 1 0 0
TCP node2:31602 rr
-> 10.233.96.32:9097 Masq 1 0 0
TCP node2.cluster.local:31602 rr
-> 10.233.96.32:9097 Masq 1 0 1
TCP node2:31602 rr
-> 10.233.96.32:9097 Masq 1 0 0
TCP localhost:31602 rr
-> 10.233.96.32:9097 Masq 1 0 0
|
route
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
10.233.90.0 node1.cluster.l 255.255.255.0 UG 0 0 0 tunl0
10.233.92.0 node3.cluster.l 255.255.255.0 UG 0 0 0 tunl0
10.233.96.32 0.0.0.0 255.255.255.255 UH 0 0 0 cali73daeaf4b12
|
ifconfig
tunl0: flags=193<UP,RUNNING,NOARP> mtu 1440
inet 10.233.90.0 netmask 255.255.255.255
|
ifconfig
tunl0: flags=193<UP,RUNNING,NOARP> mtu 1440
inet 10.233.96.0 netmask 255.255.255.255
|
ifconfig
tunl0: flags=193<UP,RUNNING,NOARP> mtu 1440
inet 10.233.92.0 netmask 255.255.255.255
|
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: test-network-policy
namespace: foo
spec:
podSelector:
matchLabels: {}
policyTypes:
- Ingress
ingress:
- from:
- ipBlock:
cidr: 10.2.3.4/32
- ipBlock:
cidr: 10.233.90.0/32
- ipBlock:
cidr: 10.233.96.0/32
- ipBlock:
cidr: 10.233.92.0/32
- namespaceSelector:
matchExpressions:
- key: region
operator: NotIn
values:
- bar
|
kubectl -n tekton-pipelines get svc tekton-dashboard -o yaml
apiVersion: v1
kind: Service
metadata:
name: tekton-dashboard
namespace: tekton-pipelines
spec:
clusterIP: 10.233.5.155
externalTrafficPolicy: Local
...
|
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: test-network-policy-deny-all
namespace: foo
spec:
podSelector:
matchLabels: {}
ingress: []
|
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: test-network-policy
namespace: foo
spec:
podSelector:
matchLabels: {}
policyTypes:
- Ingress
ingress:
- from:
- ipBlock:
cidr: 10.2.3.4/32
|
相关推荐
传统的网络安全依赖于围绕可信内部网络的强大防御边界,以将不良行为者拒之门外,将敏感数据拒之门外。在日益复杂的网络环境中,维护强大的边界越来越困难。 零信任安全正在成为企业保护其传统和现代云原生应用程序的首选方法。零信任网络架构颠覆了边界安全的假设。在零信任网络中,每个资源都在内部受到保护,就好像它暴露在开放的互联网中一样。 为了为行业和美国联邦政府建立零信任安全指南,美国国家标准与技术研究院 (N
Helm 3 终于发布了。我们可以告别 Tiller 了,但 Helm 3 的改变不仅于此。让我们继续探讨其他的变化。1. 告别 TillerHelm 3 移除了 Tiller ,是个不错的决定。但是要理解为什么不错,我们还需要了解一下 Tiller 产生的背景。Tiller 是 Helm 的服务端组件(运行在 Kubernetes 集群上),主要目的是为了让多个不同的操作者能够在同一个集群上操作
Harbor启动ui界面502 Harbor 启动 502 k8s Harbor安装 当安装完Harbor的时候,因为要做Harbor的高可用,就关闭服务器做了一个快照,服务器开启的时候出现了Harbor 访问502的情况 这时候使用docker-compose ps [列出所有容器]此时容器都运行正常 又进入调试模式查看,没有发现异常情况 docker-compose up [列出所有容器的日志
2月29日,阿里云宣布史上最大力度降价,引发行业对用云成本的热议。 近日,在InfoQ发起的圆桌讨论上,InfoQ、极客邦科技创始人 & CEO霍太稳,与阿里云智能集团副总裁、公共云首席解决方案架构师韩鸿源,掌阅科技 CTO、AI 业务负责人孙凯,贝联珠贯创始人&CEO、CCF杰出工程师林昊(毕玄),共同围绕云上的降本增效、Twitter“下云”真相、自建IDC和上云成本比对,及大模型时代的机会,
回到顶部