当前位置: 首页 > 工具软件 > Pinpoint > 使用案例 >

k8s之炉火纯青之pinpoint链路追踪

南门鸿振
2023-12-01

k8s搭建pinpoint–zookeeper

1、创建目录和专用的名称空间

mkdir -p /data/yaml/pinpoint && cd /data/yaml/pinpoint
kubectl create ns pinpoint

2、部署zookeepers

mkdir -p /data/yaml/pinpoint/zookeepers && cd /data/yaml/pinpoint/zookeepers

vim zoo1.yaml
apiVersion: v1
kind: Service
metadata:
    namespace: pinpoint
    labels:
      app: zoo1
    name: zoo1
spec:
   ports:
   - name: httpa
     targetPort: 2181
     port: 2181
   - name: httpb
     targetPort: 2888
     port: 2888
   - name: httpc
     targetPort: 3888
     port: 3888
   selector:
     app: zoo1
---
kind: Deployment
apiVersion: apps/v1
metadata:
  namespace: pinpoint
  name: zoo1
  labels:
     app: zoo1
spec:
  replicas: 1
  minReadySeconds: 120
  strategy:
    type: RollingUpdate
    rollingUpdate:
       maxSurge: 1
       maxUnavailable: 0
  selector:
    matchLabels:
      app: zoo1
  template:
    metadata:
       labels:
          app: zoo1
    spec:
       terminationGracePeriodSeconds: 60
       hostname: zoo1
       containers:
       - name: zoo1
         image: harbor.junengcloud.com/pinpoint/zookeeper:3.4
         imagePullPolicy: IfNotPresent
         resources:
           requests:
             cpu: 100m
             memory: 204Mi
           limits:
             cpu: 2000m
             memory: 2048Mi
         ports:
         - containerPort: 2181
           name: httpa
         - containerPort: 2888
           name: httpb
         - containerPort: 3888
           name: httpc
         livenessProbe:
           tcpSocket:
              port: 2181
           initialDelaySeconds: 60
           periodSeconds: 180
         env:
         - name: ZOO_MY_ID
           value: "1"
         - name: ZOO_SERVERS
           value: "server.1=0.0.0.0:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888"                              
[root@master zookeepers]# vim zoo2.yaml 
apiVersion: v1
kind: Service
metadata:
    namespace: pinpoint
    labels:
      app: zoo2
    name: zoo2
spec:
   ports:
   - name: httpa
     targetPort: 2181
     port: 2181
   - name: httpb
     targetPort: 2888
     port: 2888
   - name: httpc
     targetPort: 3888
     port: 3888
   selector:
     app: zoo2
---
kind: Deployment
apiVersion: apps/v1
metadata:
  namespace: pinpoint
  name: zoo2
  labels:
     app: zoo2
spec:
  replicas: 1
  minReadySeconds: 120
  strategy:
    type: RollingUpdate
    rollingUpdate:
       maxSurge: 1
       maxUnavailable: 0
  selector:
    matchLabels:
      app: zoo2
  template:
    metadata:
       labels:
          app: zoo2
    spec:
       terminationGracePeriodSeconds: 60
       hostname: zoo2
       containers:
       - name: zoo2
         image: harbor.junengcloud.com/pinpoint/zookeeper:3.4
         imagePullPolicy: IfNotPresent
         resources:
           requests:
             cpu: 100m
             memory: 204Mi
           limits:
             cpu: 2000m
             memory: 2048Mi
         ports:
         - containerPort: 2181
           name: httpa
         - containerPort: 2888
           name: httpb
         - containerPort: 3888
           name: httpc
         livenessProbe:
           tcpSocket:
              port: 2181
           initialDelaySeconds: 60
           periodSeconds: 180
         env:
         - name: ZOO_MY_ID
           value: "2"
         - name: ZOO_SERVERS
           value: "server.1=zoo1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zoo3:2888:3888"
vim zoo3.yaml
apiVersion: v1
kind: Service
metadata:
    namespace: pinpoint
    labels:
      app: zoo3
    name: zoo3
spec:
   ports:
   - name: httpa
     targetPort: 2181
     port: 2181
   - name: httpb
     targetPort: 2888
     port: 2888
   - name: httpc
     targetPort: 3888
     port: 3888
   selector:
     app: zoo3
---
kind: Deployment
apiVersion: apps/v1
metadata:
  namespace: pinpoint
  name: zoo3
  labels:
     app: zoo3
spec:
  replicas: 1
  minReadySeconds: 120
  strategy:
    type: RollingUpdate
    rollingUpdate:
       maxSurge: 1
       maxUnavailable: 0
  selector:
    matchLabels:
      app: zoo3
  template:
    metadata:
       labels:
         app: zoo3
    spec:
       terminationGracePeriodSeconds: 60
       hostname: zoo3
       containers:
       - name: zoo3
         image: harbor.junengcloud.com/pinpoint/zookeeper:3.4
         imagePullPolicy: IfNotPresent
         resources:
           requests:
             cpu: 100m
             memory: 204Mi
           limits:
             cpu: 2000m
             memory: 2048Mi
         ports:
         - containerPort: 2181
           name: httpa
         - containerPort: 2888
           name: httpb
         - containerPort: 3888
           name: httpc
         livenessProbe:
           tcpSocket:
              port: 2181
           initialDelaySeconds: 60
           periodSeconds: 180
         env:
         - name: ZOO_MY_ID
           value: "3"
         - name: ZOO_SERVERS
           value: "server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=0.0.0.0:2888:3888"
kubectl apply -f  zoo1.yaml
kubectl apply -f  zoo2.yaml
kubectl apply -f  zoo3.yaml
[root@master zookeepers]# kubectl get pods -n pinpoint
NAME                    READY   STATUS    RESTARTS   AGE
zoo1-749d8cc498-bbx4g   1/1     Running   0          3h33m
zoo2-695c9f8755-swnqb   1/1     Running   0          3h28m
zoo3-6bb94d5568-45tft   1/1     Running   0          3h28m
[root@master zookeepers]# kubectl get svc -n pinpoint
NAME   TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE
zoo1   ClusterIP   10.102.68.14     <none>        2181/TCP,2888/TCP,3888/TCP   178m
zoo2   ClusterIP   10.108.226.102   <none>        2181/TCP,2888/TCP,3888/TCP   3h29m
zoo3   ClusterIP   10.101.11.240    <none>        2181/TCP,2888/TCP,3888/TCP   3h29m
[root@master zookeepers]# 
[root@master zookeepers]# kubectl -n pinpoint exec -it zoo1-749d8cc498-bbx4g   -- zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Mode: follower
You have new mail in /var/spool/mail/root
[root@master zookeepers]# 

[root@master zookeepers]# kubectl -n pinpoint exec -it zoo2-695c9f8755-swnqb    -- zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Mode: follower

[root@master zookeepers]# kubectl -n pinpoint exec -it zoo3-6bb94d5568-45tft   -- zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Mode: leader
You have new mail in /var/spool/mail/root

k8s搭建jobmanager.taskmanager

配置Jobmanager

[root@master zookeepers]# mkdir -p /data/yaml/pinpoint/jobmanager
[root@master zookeepers]# cd /data/yaml/pinpoint/jobmanager/
[root@master jobmanager]# vim deployment.yaml 
kind: Deployment
apiVersion: apps/v1
metadata:
    namespace: pinpoint
    name: jobmanager
    labels:
       app: jobmanager
spec:
   replicas: 1
   minReadySeconds: 60
   strategy:
     type: RollingUpdate
     rollingUpdate:
       maxSurge: 1
       maxUnavailable: 0
   selector:
      matchLabels:
        app: jobmanager
   template:
     metadata:
        labels:
           app: jobmanager
     spec:
        imagePullSecrets:
        - name: harbor
        terminationGracePeriodSeconds: 60
        hostname: jobmanager
        containers:
        - name: jobmanager
          image: harbor.junengcloud.com/pinpoint/flink:1.3.1
          args:
          - jobmanager
          resources:
            requests:
                cpu: 100m
                memory: 204Mi
            limits:
                cpu: 2000m
                memory: 2048Mi
          env:
          - name: JOB_MANAGER_RPC_ADDRESS
            value: jobmanager
          ports:
          - containerPort: 8081
            name: httpa
          - containerPort: 6123
            name: httpb
          livenessProbe:
            tcpSocket:
               port: 8081
            initialDelaySeconds: 60
            periodSeconds: 180
[root@master jobmanager]# vim svc.yaml 
kind: Service
apiVersion: v1
metadata:
      namespace: pinpoint
      name: jobmanager
      labels:
         app: jobmanager
spec:
   ports:
   - name: httpa
     port: 8081
     targetPort: 8081
   - name: httpb
     port: 6123
     targetPort: 6123
   selector:
     app: jobmanager

配置taskMansger

[root@master jobmanager]# mkdir -p /data/yaml/pinpoint/taskmanager
[root@master jobmanager]# cd /data/yaml/pinpoint/taskmanager/
[root@master taskmanager]# vim deployment.yaml 

apiVersion: apps/v1
kind: Deployment
metadata:
      namespace: pinpoint
      name: taskmanager
      labels:
         app: taskmanager
spec:
     replicas: 1
     minReadySeconds: 120
     strategy:
        type: RollingUpdate
        rollingUpdate:
           maxSurge: 1
           maxUnavailable: 0
     selector:
         matchLabels:
             app: taskmanager
     template:
         metadata:
           labels:
              app: taskmanager
         spec:
           imagePullSecrets:
           - name: harbor
           terminationGracePeriodSeconds: 60
           hostname: taskmanager
           containers:
           - name: taskmanager
             image: harbor.junengcloud.com/pinpoint/flink:1.3.1
             args:
             - taskmanager
             resources:
               requests:
                  cpu: 100m
                  memory: 204Mi
               limits:
                  cpu: 2000m
                  memory: 2048Mi
             env:
             - name: JOB_MANAGER_RPC_ADDRESS
               value: jobmanager
             ports:
             - containerPort: 6121
               name: httpa
             - containerPort: 6122
               name: httpb:
             - containerPort: 19994
               name: httpc
[root@master taskmanager]# vim svc.yaml 

kind: Service
apiVersion: v1
metadata:
    namespace: pinpoint
    labels:
        app: taskmanager
    name: taskmanager
spec:
   ports:
   - name: httpa
     port: 6121
     targetPort: 6121
   - name: httpb
     port: 6122
     targetPort: 6122
   - name: httpc
     port: 19994
     targetPort: 19994
   selector:
     app: taskmanager
[root@master taskmanager]# kubectl apply -f deployment.yaml 
deployment.apps/taskmanager created
[root@master taskmanager]# kubectl apply -f svc.yaml 
service/taskmanager created

配置Hbase

[root@master taskmanager]# mkdir -p /data/yaml/pinpoint/pinpoint-hbase
[root@master taskmanager]# cd /data/yaml/pinpoint/pinpoint-hbase/
master:
yum install nfs-utils -y
mkdir /data/volumes -pv
vim /etc/exports
/data/volumes 192.168.10.0/24(rw,no_root_squash)

exportfs -arv
service nfs start
systemctl enable nfs
systemctl status nfs

vim nfs.yaml
apiVersion: v1
kind: Pod
metadata:
  name: test-nfs-volume
spec:
  containers:
  - name: test-nfs
    image: nginx:latest
    imagePullPolicy: IfNotPresent
    ports:
    - containerPort: 80
       protocol: TCP
    volumeMounts:
    - name: nfs-volumes
      mountPath: /usr/share/nginx/html
  volumes:
  - name: nfs-volumes
    nfs:
       path: /data/volumes
       server: 192.168.10.140
kubectl apply -f nfs.yaml

node1/node2
docker load -i nfs-subdir-external-provisioner.tar.gz

master:
vim sericeaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-provisioner
  
  
kubectl create clusterrolebinding nfs-provisioner --clusterrole=cluster-admin --serviceaccount=default:nfs-provisioner


安装nfs-provisioner
mkdir /data/nfs_pro -p
mkdir /data/volume_test/v1

vim /etc/exports
/data/volumes 192.168.10.0/24(rw,no_root_squash)
/data/nfs_pro 192.168.10.0/24(rw,no_root_squash)
/data/volume_test/v1 192.168.10.0/24(rw,no_root_squash)             

exportfs -arv
systemctl restart nfs


vim nfs-deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
   name: nfs-provisioner
spec:
   selector:
      matchLabels:
            app: nfs-provisioner
   replicas: 1
   strategy:
      type: Recreate
   template:
      metadata:
         labels:
            app: nfs-provisioner
      spec:
         serviceAccount: nfs-provisioner
         containers:
         - name: nfs-provisioner
           image: registry.cn-beijing.aliyuncs.com/mydlq/nfs-subdir-external-provisioner:v4.0.0
           imagePullPolicy: IfNotPresent
           volumeMounts:
           - name: nfs-client-root
             mountPath: /persistentvolumes
           env:
           - name: PROVISIONER_NAME
             value: example.com/nfs
           - name: NFS_SERVER
             value: 192.168.10.140
           - name: NFS_PATH
             value: /data/nfs_pro
         volumes:
         - name: nfs-client-root
           nfs:
             server: 192.168.10.140
             path: /data/nfs_pro
             
kubectl apply -f nfs-deployment.yaml

创建storageclass 提供PV
vim nfs-storageclass.yaml

kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
   name: local-storage
provisioner: example.com/nfs


创建pvc,通过storageclass动态生成pv
vim  claim.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
    name: test-claim1
spec:
   accessModes: ["ReadWriteMany"]
   resources:
      requests:
          storage: 1Gi
   storageClassName: local-storage

node1/node2
yum install nfs-utils -y
service nfs start
systemctl enable nfs

node1:
mkdir /data1
mount -t nfs 192.168.10.140:/data/volumes /data1

node2:
mkdir /data2
mount -t nfs 192.168.10.140:/data/volumes /data2
[root@master pinpoint-hbase]# vim pvc.yaml 

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
   namespace: pinpoint
   name: pinpoint-zookeeper
spec:
   accessModes:
   - ReadWriteOnce
   resources:
     requests:
        storage: 10Gi
   storageClassName:  local-storage
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
    namespace: pinpoint
    name: pinpoint-hbase
spec:
    accessModes:
    - ReadWriteOnce
    resources:
       requests:
          storage: 10Gi
    storageClassName: local-storage
[root@master pinpoint-hbase]# vim pv.yaml 

kind: PersistentVolume
apiVersion: v1
metadata:
  namespace: pinpoint
  name: pinpoint-hbase
spec:
  capacity:
     storage: 10Gi
  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Delete
  storageClassName: local-storage
  local:
    path: /data/yaml/pinpoint/pinpoint-hbase
  nodeAffinity:
    required:
       nodeSelectorTerms:
       - matchExpressions:
         - key: kubernetes.io/hostname
           operator: In
           values:
           - worker1(hostname)
---
kind: PersistentVolume
apiVersion: v1
metadata:
   namespace: pinpoint
   name: pinpoint-zookeeper
spec:
   capacity:
      storage: 10Gi
   volumeMode: Filesystem
   accessModes:
   - ReadWriteOnce
[root@master pinpoint-hbase]# vim sts.yaml 

kind: StatefulSet
apiVersion: apps/v1
metadata:
   name: pinpoint-hbase
   namespace: pinpoint
spec:
   selector:
     matchLabels:
          app: pinpoint-hbase
   serviceName: pinpoint-hbase
   replicas: 1
   updateStrategy:
      type: RollingUpdate
   template:
     metadata:
       labels:
          app: pinpoint-hbase
     spec:
       nodeName: worker1
       terminationGracePeriodSeconds: 60
       containers:
       - name: pinpoint-hbase
         imagePullPolicy: IfNotPresent
         image: docker.io/pinpointdocker/pinpoint-hbase:latest
         resources:
            requests:
               cpu: 100m
               memory: 2048Mi
            limits:
               cpu: 2000m
               memory: 7Gi
         ports:
         - containerPort: 60000
           protocol: TCP
           name: httpa
         - containerPort: 16010
           protocol: TCP
           name: httpb
         - containerPort: 60020
           protocol: TCP
           name: httpc
         - containerPort: 16030
           protocol: TCP
           name: httpd
         livenessProbe:
           tcpSocket:
             port: 60000
           initialDelaySeconds: 180
           periodSeconds: 120
         volumeMounts:
         - mountPath: /etc/localtime
           readOnly: false
           name: time-data
         - mountPath: /home/pinpoint/hbase
           name: hbase
         - mountPath: /home/pinpoint/zookeeper
           name: zookeeper
       volumes:
       - name: time-data
         hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
       - name: hbase
         persistentVolumeClaim:
            claimName: pinpoint-hbase
       - name: zookeeper
         persistentVolumeClaim:
             claimName: pinpoint-zookeeper
[root@master pinpoint-hbase]# vim svc.yaml 

kind: Service
apiVersion: v1
metadata:
  namespace: pinpoint
  labels:
     app: pinpoint-hbase
  name: pinpoint-hbase
spec:
  ports:
  - name: httpa
    port: 60000
    targetPort: 60000
  - name: httpb
    port: 16010
    targetPort: 16010
  - name: httpc
    port: 60020
    targetPort: 60020
  - name: httpd
    port: 16030
    targetPort: 16030
  selector:
    app: pinpoint-hbase

配置pinpoint-collector

[root@master ~]# mkdir -p /data/yaml/pinpoint/pinpoint-collector
[root@master ~]# cd /data/yaml/pinpoint/pinpoint-collector/
[root@master pinpoint-collector]# vim deployment.yaml
[root@master pinpoint-collector]# vim deployment.yaml 

kind: Deployment
apiVersion: apps/v1
metadata:
   namespace: pinpoint
   name: pinpoint-collector
   labels:
     app: pinpoint-collector
spec:
   replicas: 1
   minReadySeconds: 120
   strategy:
       type: RollingUpdate
       rollingUpdate:
          maxSurge: 1
          maxUnavailable: 0
   selector:
     matchLabels:
       app: pinpoint-collector
   template:
     metadata:
       labels:
          app: pinpoint-collector
     spec:
       imagePullSecrets:
       - name: harbor
       terminationGracePeriodSeconds: 60
       hostname: pinpoint-collector
       containers:
       - name:  pinpoint-collector
         image: harbor.junengcloud.com/pinpoint/pinpoint-collector:2.1.0
         resources:
           requests:
             cpu: 200m
             memory: 307Mi
           limits:
             cpu: 3000m
             memory: 4Gi
         ports:
         - containerPort: 9994
           name: httpa
         - containerPort: 9992
           name: httpb
         - containerPort: 9993
           name: httpc
         - containerPort: 9995
           name: httpd
         - containerPort: 9996
           name: httpe
         - containerPort: 9997
           name: httpf
         - containerPort: 9998
           protocol: UDP
           name: httpg
         - containerPort: 9999
           protocol: UDP
           name: httph
         livenessProbe:
           tcpSocket:
               port: 9991
           initialDelaySeconds: 60
           periodSeconds: 180
         env:
         - name: CLUSTER_ENABLE
           value: "true"
         - name: FLINK_CLUSTER_ENABLE
           value: "true"
         - name: FLINK_CLUSTER_ZOOKEEPER_ADDRESS
           value: "zoo1"
         - name: PINPOINT_ZOOKEEPER_ADDRESS
           value: "zoo1"
         - name: SPRING_PROFILES_ACTIVE
           value: "release"
         - name: HBASE_CLIENT_HOST
         - name: HBASE_CLIENT_HOST
           value: "zoo1"
         - name: HBASE_HOST
           value: "zoo1"
         - name: HBASE_PORT
           value: "2181"
         - name: COLLECTOR_RECEIVER_GRPC_SPAN_WORKER_EXECUTOR_THREAD_SIZE
           value: "256"
vim svc.yaml

apiVersion: v1
kind: Service
metadata:
  namespace: pinpoint
  labels:
    app: pinpoint-collector
  name: pinpoint-collector
spec:
  ports:
  - name: httpa
    port: 9991
    targetPort: 9991
    nodePort: 30091
  - name: httpb
    port: 9992
    targetPort: 9992
    nodePort: 30092
  - name: httpc
    port: 9993
    targetPort: 9993
    nodePort: 30093
  - name: httpd
    port: 9994
    targetPort: 9994
    nodePort: 30094
  - name: httpe
    port: 9995
    targetPort: 9995
    nodePort: 30095
  - name: httpf
    port: 9996
    targetPort: 9996
    nodePort: 30096
  - name: httpg
    port: 9995
    protocol: UDP
    targetPort: 30095
  - name: httph
    port: 9996
    protocol: UDP
    targetPort: 30096
  selector:
    app: pinpoint-collector
  type: NodePort
  
  
kubectl apply -f deployment.yaml
kubectl apply -f svc.yaml

配置pinpoint-web

mkdir -p /data/yaml/pinpoint/pinpoint-web
cd /data/yaml/pinpoint/pinpoint-web
docker pull pinpointdocker/pinpoint-web
vim deployment.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: pinpoint
  name: pinpoint-web
  labels:
    app: pinpoint-web
spec:
  replicas: 1
  minReadySeconds: 120
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
  selector:
    matchLabels:
      app: pinpoint-web
  template:
    metadata:
      labels:
        app: pinpoint-web
    spec:
      terminationGracePeriodSeconds: 60
      hostname: pinpoint-web
      containers:
      - name: pinpoint-web
        image: docker.io/pinpointdocker/pinpoint-web:latest
        resources:
          requests:
            cpu: 100m
            memory: 204Mi
          limits:
            cpu: 2000m
            memory: 2048Mi
        env:
        - name: ADMIN_PASSWORD
          value: "admin"
        - name: ALARM_MAIL_DEBUG
          value: "false"
        - name: ALARM_MAIL_SENDER_ADDRESS
          value: "762359676@qq.com"
        - name: ALARM_MAIL_SERVER_PASSWORD
          value: "mjh123"
        - name: ALARM_MAIL_SERVER_PORT
          value: "465"
        - name: ALARM_MAIL_SERVER_URL
          value: "192.168.10.140"
        - name: ALARM_MAIL_SERVER_USERNAME
          value: "system"
        - name: ALARM_MAIL_SMTP_AUTH
          value: "false"
        - name: ALARM_MAIL_SMTP_PORT
          value: "25"
        - name: ALARM_MAIL_SMTP_STARTTLS_ENABLE
          value: "false"
        - name: ALARM_MAIL_SMTP_STARTTLS_REQUIRED
          value: "false"
        - name: ALARM_MAIL_TRANSPORT_PROTOCOL
          value: "smtp"
        - name: BATCH_ENABLE
          value: "false"
        - name: BATCH_FLINK_SERVER
          value: "jobmanager"
        - name: BATCH_SERVER_IP
          value: "127.0.0.1"
        - name: CLUSTER_ENABLE
          value: "true"
        - name: CONFIG_SENDUSAGE
          value: "true"
        - name: CONFIG_SHOW_APPLICATIONSTAT
          value: "true"
        - name: JDBC_DRIVERCLASSNAME
          value: "com.mysql.jdbc.Driver"
        - name: JDBC_PASSWORD
          value: "klvchen"
        - name: JDBC_URL
          value: "jdbc:mysql://192.168.10.140:3307/pinpoint?characterEncoding=UTF-8"
        - name: JDBC_USERNAME
          value: "root"
        - name: LOGGING_LEVEL_ROOT
          value: "INFO"
        - name: PINPOINT_ZOOKEEPER_ADDRESS
          value: "zoo1"
        - name: SERVER_PORT
          value: "8079"
        - name: SPRING_PROFILES_ACTIVE
          value: "release,batch"
        ports:
        - containerPort: 9997
          name: http
        - containerPort: 8079
          name: web
        livenessProbe:
          tcpSocket:
            port: 8079
          initialDelaySeconds: 60
          periodSeconds: 180
vim svc.yaml

apiVersion: v1
kind: Service
metadata:
  namespace: pinpoint
  labels:
    app: pinpoint-web
  name: pinpoint-web
spec:
  type: NodePort
  ports:
  - name: http
    port: 9997
    targetPort: 9997
    nodePort: 30097
  - name: web
    port: 8079
    targetPort: 8079
    nodePort: 30079
  selector:
    app: pinpoint-web


kubectl apply -f deployment.yaml 
kubectl apply -f svc.yaml
192.168.10.140:30079

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-1olgrWLZ-1656905422258)(C:\Users\Administrator\AppData\Roaming\Typora\typora-user-images\image-20220702110251937.png)]

构建测试镜像,进行测试

构建测试镜像

所需要agent的服务器
1、下载agent
mkdir -p /data/demo
cd /data/demo
wget https://github.com/pinpoint-apm/pinpoint/releases/download/v2.1.0/pinpoint-agent-2.1.0.tar.gz
tar zxvf pinpoint-agent-2.1.0.tar.gz
cd /data/demo/pinpoint-agent/profiles/release
cp pinpoint.config  pinpoint.config.ori
2、vim pinpoint.config
把 profiler.transport.grpc.collector.ip 的值指向 pinpoint colector 地址

profiler.transport.grpc.collector.ip=192.168.10.140
3、cd /data/demo
 把 需要的jar包 放到这里,这个jar包的功能会起一个服务,访问 /hello 接口会返回 hello spring
vim DockerFile

FROM openjdk:8u302-slim
RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai  /etc/localtime
ADD demo-0.0.1-SNAPSHOT.jar demo.jar(需要的jar包)
ADD pinpoint-agent-2.1.0 /pinpoint-agent/
COPY docker-entrypoint.sh /
CMD [ "/bin/bash", "/docker-entrypoint.sh" ]
# agentId 最大支持24个字母,必须不同,若是 K8S 多个副本,applicationName 必须一样

vim docker-entrypoint.sh

#!/bin/bash
java -javaagent:/pinpoint-agent/pinpoint-bootstrap-2.1.0.jar  -Dpinpoint.agentId=${HOSTNAME} -Dpinpoint.applicationName=${appName} ${JVM:-Xmx512m -Xms512m} -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp/ -Djava.security.egd=file:/dev/./urandom -Duser.timezone=GMT+08 -jar /${appName}.jar

发送给镜像仓库

docker build -t harbor.junengcloud.com/tmp/pinpoint-demo:0.0.11 .
docker push harbor.junengcloud.com/tmp/pinpoint-demo:0.0.11

测试,在master中创建

mkdir -p /data/yaml/default/pinpoint-demo
cd /data/yaml/default/pinpoint-demo
vim deployment.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: demo
spec:
  replicas: 2
  selector:
    matchLabels:
      app: demo
  template:
    metadata:
      labels:
        app: demo
    spec:
      containers:
      - name: demo
        image: harbor.junengcloud.com/tmp/pinpoint-demo:0.0.11(镜像仓库)
        imagePullPolicy: IfNotPresent
        env:
        - name: JVM
          value: "-Xms1024m -Xmx1024m"
        - name: appName
          valueFrom:
            fieldRef:
              fieldPath: metadata.labels['app'] 

kubectl apply -f  deployment.yml
vim svc.yaml

apiVersion: v1
kind: Service
metadata:
  labels:
    app: pinpoint-demo
  name: pinpoint-demo
spec:
  ports:
  - name: http
    port: 8080
    targetPort: 8080
    nodePort: 30079
  selector:
    app: pinpoint-demo
  type: NodePort

kubectl apply -f  svc.yaml

+08 -jar /${appName}.jar


> 发送给镜像仓库

docker build -t harbor.junengcloud.com/tmp/pinpoint-demo:0.0.11 .
docker push harbor.junengcloud.com/tmp/pinpoint-demo:0.0.11




# 测试,在master中创建

mkdir -p /data/yaml/default/pinpoint-demo
cd /data/yaml/default/pinpoint-demo


vim deployment.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
name: demo
spec:
replicas: 2
selector:
matchLabels:
app: demo
template:
metadata:
labels:
app: demo
spec:
containers:
- name: demo
image: harbor.junengcloud.com/tmp/pinpoint-demo:0.0.11(镜像仓库)
imagePullPolicy: IfNotPresent
env:
- name: JVM
value: “-Xms1024m -Xmx1024m”
- name: appName
valueFrom:
fieldRef:
fieldPath: metadata.labels[‘app’]

kubectl apply -f deployment.yml


vim svc.yaml

apiVersion: v1
kind: Service
metadata:
labels:
app: pinpoint-demo
name: pinpoint-demo
spec:
ports:

  • name: http
    port: 8080
    targetPort: 8080
    nodePort: 30079
    selector:
    app: pinpoint-demo
    type: NodePort

kubectl apply -f svc.yaml



 类似资料: