<ul id="qxxfc"><fieldset id="qxxfc"><tr id="qxxfc"></tr></fieldset></ul>


      自動(dòng)補(bǔ)全:
      yum -y install bash-completion echo "source <(kubectl completion bash)" >>
      ~/.bashrc
      添加鏡像倉庫認(rèn)證
      kubectl -n $NAMESPACE create secret docker-registry $KEYNAME \
      --docker-server=$DOCKER_REGISTRY_SERVER \ --docker-username=$DOCKER_USER \
      --docker-password=$DOCKER_PASSWORD \ --docker-email=$DOCKER_EMAIL echo
      "------------ create k8s-user secret ----------------" kubectl get secret
      $KEYNAME --output="jsonpath={.data.\.dockerconfigjson}" | base64 -d echo
      "------------ add k8s-user serviceaccount ----------------" kubectl -n
      $NAMESPACE patch serviceaccount default -p '{"imagePullSecrets": [{"name":
      "k8s-user"}]}'
      獲取集群內(nèi)部service&pod網(wǎng)段
      ps -ef | grep -Po 'cluster-cidr\S+\s|service-cluster-ip-range\S+\s'
      獲取集群/組件狀態(tài)
      kubectl get componentstatus kubectl cluster-info
      獲取kubelet狀態(tài)
      systemctl status kubelet journalctl -xefu kubelet
      更新istio流量攔截網(wǎng)段
      helm template install/kubernetes/helm/istio --set
      global.proxy.includeIPRanges="10.1.0.0/16\,10.2.0.0/20" -x
      templates/sidecar-injector-configmap.yaml | kubectl apply -f -
      istio 開啟自動(dòng)注入
      kubectl label namespace default istio-injection=enabled
      istio替換現(xiàn)有deploy, 注入sidercar
      istioctl kube-inject -f <(kubectl get deploy xxx -o yaml) | kubectl replace -f
      -
      重啟pod(無法直接重啟, 刪除或者replace pod可以達(dá)到重啟效果, 或者進(jìn)入容器/cashbus/tomcat/restart.sh)
      kubectl delete po -l app=sc-tag kubectl get pod logtail-ds-698m5 -n
      kube-system -o yaml | kubectl replace --force -f -
      轉(zhuǎn)發(fā)pod流量到kubectl運(yùn)行節(jié)點(diǎn)上
      # kiali kubectl -n istio-system port-forward $(kubectl -n istio-system get pod
      -l app=kiali -o jsonpath='{.items[0].metadata.name}') 20001:20001 # grafana
      kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l
      app=grafana -o jsonpath='{.items[0].metadata.name}') 3000:3000 & # prometheus
      kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l
      app=prometheus -o jsonpath='{.items[0].metadata.name}') 9090:9090 & # service
      Graph kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l
      app=servicegraph -o jsonpath='{.items[0].metadata.name}') 8088:8088 & #
      jaeger-collector kubectl -n istio-system port-forward $(kubectl -n istio-system
      get po -l app=tracing-on-sls,component=collector -o
      jsonpath='{.items[0].metadata.name}') 9411:9411&
      停止應(yīng)用所有pod 沒法直接停止 可以 scale對應(yīng)deployment為0達(dá)到效果
      kubectl scale xx sc-contract-sc-xxx --replicas=0
      命令行處理yaml
      cat <<EOF | kubectl create -f - apiVersion: extensions/v1beta1 kind: Ingress
      metadata: name: simple annotations: nginx.ingress.kubernetes.io/rewrite-target:
      / spec: rules: - http: paths: - path: /svc backend: serviceName: http-svc
      servicePort: 80 EOF
      標(biāo)記master可調(diào)度(不建議)
      kubectl taint node -l node-role.kubernetes.io/master
      node-role.kubernetes.io/master=:PreferNoSchedule --overwrite kubectl taint node
      -l node-role.kubernetes.io/master node-role.kubernetes.io/master-
      標(biāo)記節(jié)點(diǎn)不可調(diào)度
      kubectl cordon node
      helm手動(dòng)安裝
      ######### helm install 需要支持ipv6 /etc/sysctl.conf
      net.ipv6.conf.all.disable_ipv6 = 0 net.ipv6.conf.default.disable_ipv6 = 0
      net.ipv6.conf.lo.disable_ipv6 = 0 $ sysctl -p /etc/sysconfig/network
      NETWORKING_IPV6=yes $ service network restart curl
      https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
      helm tiller自行升級 官方源被墻 國內(nèi)連不上 這里用阿里鏡像源
      export TILLER_TAG=v2.9.1 kubectl --namespace=kube-system set image
      deployments/tiller-deploy
      tiller=registry-vpc.cn-beijing.aliyuncs.com/google_containers/tiller:$TILLER_TAG
      helm 自動(dòng)補(bǔ)全
      source <(helm completion bash)
      獲取pod異常終止前的狀態(tài)
      kubectl get pod -o go-template='{{range.status.containerStatuses}}{{"Container
      Name: "}}{{.name}}{{"\r\nLastState: "}}{{.lastState}}{{end}}' xxxxxxxx
      刪除node
      轉(zhuǎn)移節(jié)點(diǎn)正在運(yùn)行的 resource kubectl get no -o wide | awk '/NotReady/{print $1}' | xargs
      -i -t kubectl drain {} --ignore-daemonsets 刪除 kubectl get no -o wide | awk
      '/NotReady/{print $1}' | xargs -i -t kubectl delete node {}
      逐出節(jié)點(diǎn)上所有pod
      kubectl drain cn-beijing.i-xxxxx --delete-local-data --ignore-daemonsets
      批量打ingress controller標(biāo)簽 用作自動(dòng)調(diào)度
      kubectl get node | awk '!/master/{print $1}' |xargs -i -t kubectl label node
      {} node-role.kubernetes.io/ingress=true --overwrite
      各種patch
      patch configmap kubectl patch -n kube-system cm tcp-services --patch
      '{"data":{"32218": "default/sc-xxx0debug:32219"}}' patch serice kubectl patch
      -n kube-system svc nginx-ingress-lb --patch
      '{"spec":{"ports":[{"name":"patch-test","port": 32222, "protocol":"TCP",
      "targetPort": 32222}]}}' patch nodeSelector kubectl patch deployment
      sc-demo-sc-demo -p
      '{"spec":{"template":{"spec":{"nodeSelector":{"env":"testing"}}}}}' patch
      readiness healthCheck jsonpatch http://jsonpatch.com/
      patch healthCheck, JSON Patch, RFC 6902
      kubectl patch deployment xxxx --type='json' -p='[ { "op": "replace", "path":
      "/spec/template/spec/containers/0/readinessProbe", "value": {
      "failureThreshold": 1, "initialDelaySeconds": 30, "periodSeconds": 3,
      "successThreshold": 3, "httpGet": { "path": "/rest/healthCheck", "port":
      "app-port" }, "timeoutSeconds": 1 } } ]'
      獲取所有運(yùn)行鏡像 go-template示例
      kubectl get deployment -o go-template --template='{{range .items}}{{range
      .spec.template.spec.containers }}{{printf "%s\n" .image}}{{end}}{{end}}'
      kubectl get deployment -o go-template --template='{{range .items}}{{range
      .spec.template.spec.containers }}{{printf "%s\n" .image}}{{end}}{{end}}' | awk
      -F'[:/]' '{printf " [\042%s\042]=\042%s\042\n",$(NF-1),$NF}'
      刪除istio相關(guān)crd(customresourcedefinition)
      kubectl get crd | awk '/istio/{print $1}' | xargs -i kubectl delete crd {}
      清空整個(gè)namespace(慎用)
      kubectl -n istio-system delete all --all
      強(qiáng)制更新 helm "no deployed release"狀態(tài)的release,感覺還是比較雞肋. 至今沒找到好的辦法處理 "no deployed
      release"
      helm upgrade --force -i -f ack-istio-default.yaml ack-istio-default
      incubator/ack-istio

      友情鏈接
      ioDraw流程圖
      API參考文檔
      OK工具箱
      云服務(wù)器優(yōu)惠
      阿里云優(yōu)惠券
      騰訊云優(yōu)惠券
      京東云優(yōu)惠券
      站點(diǎn)信息
      問題反饋
      郵箱:[email protected]
      QQ群:637538335
      關(guān)注微信

        <ul id="qxxfc"><fieldset id="qxxfc"><tr id="qxxfc"></tr></fieldset></ul>
          国产乱伦一区 | 天天日比 | 亚洲无码内射在线 | 国产高清无码毛片 | 免费观看黄色电影 | 日韩精品一区二区在线观看 | 亚洲综合社区 | 日韩大香蕉网 | 国产夜夜嗨 | 色拍拍综合网 |