• 首页 首页 icon
  • 工具库 工具库 icon
    • IP查询 IP查询 icon
  • 内容库 内容库 icon
    • 快讯库 快讯库 icon
    • 精品库 精品库 icon
    • 问答库 问答库 icon
  • 更多 更多 icon
    • 服务条款 服务条款 icon

k8s-7: kafka+zookeeper的单节点和集群的持久化

武飞扬头像
salarh
帮助1

之前在k8s环境中有这个需求,看了好多的文档,都有坑,踩了一边总结一下,需要的朋友可自取

一、单节点部署

建议开发环境使用,且此处采用动态挂载的,生产不建议

1、安装zk

  1.  
    cat > zk.yaml <<EOF
  2.  
    apiVersion: v1
  3.  
    kind: Service
  4.  
    metadata:
  5.  
    labels:
  6.  
    app: zookeeper-service
  7.  
    name: zookeeper-service
  8.  
    spec:
  9.  
    ports:
  10.  
    - name: zookeeper-port
  11.  
    port: 2181
  12.  
    targetPort: 2181
  13.  
    selector:
  14.  
    app: zookeeper
  15.  
    ---
  16.  
    apiVersion: apps/v1
  17.  
    kind: Deployment
  18.  
    metadata:
  19.  
    labels:
  20.  
    app: zookeeper
  21.  
    name: zookeeper
  22.  
    spec:
  23.  
    replicas: 1
  24.  
    selector:
  25.  
    matchLabels:
  26.  
    app: zookeeper
  27.  
    template:
  28.  
    metadata:
  29.  
    labels:
  30.  
    app: zookeeper
  31.  
    spec:
  32.  
    containers:
  33.  
    - image: wurstmeister/zookeeper
  34.  
    imagePullPolicy: IfNotPresent
  35.  
    name: zookeeper
  36.  
    ports:
  37.  
    - containerPort: 2181
  38.  
    EOF
学新通

2.安装kafka

  1.  
    cat > kafka.yaml <<EOF
  2.  
    apiVersion: v1
  3.  
    kind: Service
  4.  
    metadata:
  5.  
    name: kafka-service
  6.  
    labels:
  7.  
    app: kafka
  8.  
    spec:
  9.  
    type: NodePort
  10.  
    ports:
  11.  
    - port: 9092
  12.  
    name: kafka-port
  13.  
    targetPort: 9092
  14.  
    nodePort: 30092
  15.  
    protocol: TCP
  16.  
    selector:
  17.  
    app: kafka
  18.  
     
  19.  
    ---
  20.  
     
  21.  
    apiVersion: apps/v1
  22.  
    kind: Deployment
  23.  
    metadata:
  24.  
    name: kafka
  25.  
    labels:
  26.  
    app: kafka
  27.  
    spec:
  28.  
    replicas: 1
  29.  
    selector:
  30.  
    matchLabels:
  31.  
    app: kafka
  32.  
    template:
  33.  
    metadata:
  34.  
    labels:
  35.  
    app: kafka
  36.  
    spec:
  37.  
    containers:
  38.  
    - name: kafka
  39.  
    image: wurstmeister/kafka
  40.  
    imagePullPolicy: IfNotPresent
  41.  
    ports:
  42.  
    - containerPort: 9092
  43.  
    env:
  44.  
    - name: KAFKA_ADVERTISED_PORT
  45.  
    value: "9092"
  46.  
    - name: KAFKA_ADVERTISED_HOST_NAME
  47.  
    value: "kafka-service" #[kafka的service的clusterIP]
  48.  
    - name: KAFKA_ZOOKEEPER_CONNECT
  49.  
    value: zookeeper-service:2181
  50.  
    - name: KAFKA_BROKER_ID
  51.  
    value: "1"
  52.  
    EOF
学新通

3.验证

 进入容器 创建生产者消费者测试

生产者:

  1.  
    kafka-console-producer.sh --broker-list 127.0.0.1:9092 --topic demo
  2.  
     

学新通

 学新通

消费者:

kafka-console-consumer.sh --bootstrap-server 127.0.0.1:9092  --topic demo --from-beginning

 学新通

二、集群部署

这里是生产使用的。直接采用pv-pvc的挂载方式

1.zook的pv-pvc.yaml

  1.  
    kind: PersistentVolume
  2.  
    apiVersion: v1
  3.  
    metadata:
  4.  
    name: k8s-pv-zk0
  5.  
    annotations:
  6.  
    volume.beta.kubernetes.io/storage-class: "wms-zook"
  7.  
    labels:
  8.  
    type: local
  9.  
    spec:
  10.  
    capacity:
  11.  
    storage: 5Gi
  12.  
    accessModes:
  13.  
    - ReadWriteOnce
  14.  
    nfs:
  15.  
    server: 192.168.XX.XX
  16.  
    path: "/mnt/nas/zmj_pord/nfs/wms-zookeeper/pv0"
  17.  
    ---
  18.  
    kind: PersistentVolume
  19.  
    apiVersion: v1
  20.  
    metadata:
  21.  
    name: k8s-pv-zk1
  22.  
    annotations:
  23.  
    volume.beta.kubernetes.io/storage-class: "wms-zook"
  24.  
    labels:
  25.  
    type: local
  26.  
    spec:
  27.  
    capacity:
  28.  
    storage: 5Gi
  29.  
    accessModes:
  30.  
    - ReadWriteOnce
  31.  
    nfs:
  32.  
    server: 192.168.XX.XX
  33.  
    path: "/mnt/nas/zmj_pord/nfs/wms-zookeeper/pv1"
  34.  
    ---
  35.  
    kind: PersistentVolume
  36.  
    apiVersion: v1
  37.  
    metadata:
  38.  
    name: k8s-pv-zk2
  39.  
    annotations:
  40.  
    volume.beta.kubernetes.io/storage-class: "wms-zook"
  41.  
    labels:
  42.  
    type: local
  43.  
    spec:
  44.  
    capacity:
  45.  
    storage: 5Gi
  46.  
    accessModes:
  47.  
    - ReadWriteOnce
  48.  
    nfs:
  49.  
    server: 192.168.XX.XX
  50.  
    path: "/mnt/nas/zmj_pord/nfs/wms-zookeeper/pv2"
学新通

2.zk的all.yaml

  1.  
    ---
  2.  
    apiVersion: v1
  3.  
    kind: Service
  4.  
    metadata:
  5.  
    name: zk-hs
  6.  
    labels:
  7.  
    app: zk
  8.  
    spec:
  9.  
    ports:
  10.  
    - port: 2888
  11.  
    name: server
  12.  
    - port: 3888
  13.  
    name: leader-election
  14.  
    clusterIP: None
  15.  
    selector:
  16.  
    app: zk
  17.  
    ---
  18.  
    apiVersion: v1
  19.  
    kind: Service
  20.  
    metadata:
  21.  
    name: zk-cs
  22.  
    labels:
  23.  
    app: zk
  24.  
    spec:
  25.  
    type: NodePort
  26.  
    ports:
  27.  
    - port: 2181
  28.  
    name: client
  29.  
    selector:
  30.  
    app: zk
  31.  
    ---
  32.  
    apiVersion: policy/v1beta1
  33.  
    kind: PodDisruptionBudget
  34.  
    metadata:
  35.  
    name: zk-pdb
  36.  
    spec:
  37.  
    selector:
  38.  
    matchLabels:
  39.  
    app: zk
  40.  
    maxUnavailable: 1
  41.  
    ---
  42.  
    apiVersion: apps/v1
  43.  
    kind: StatefulSet
  44.  
    metadata:
  45.  
    name: zk
  46.  
    spec:
  47.  
    selector:
  48.  
    matchLabels:
  49.  
    app: zk
  50.  
    serviceName: zk-hs
  51.  
    replicas: 3
  52.  
    updateStrategy:
  53.  
    type: RollingUpdate
  54.  
    podManagementPolicy: Parallel
  55.  
    template:
  56.  
    metadata:
  57.  
    labels:
  58.  
    app: zk
  59.  
    spec:
  60.  
    affinity:
  61.  
    podAntiAffinity:
  62.  
    requiredDuringSchedulingIgnoredDuringExecution:
  63.  
    - labelSelector:
  64.  
    matchExpressions:
  65.  
    - key: "app"
  66.  
    operator: In
  67.  
    values:
  68.  
    - zk
  69.  
    topologyKey: "kubernetes.io/hostname"
  70.  
    containers:
  71.  
    - name: kubernetes-zookeeper
  72.  
    imagePullPolicy: Always
  73.  
    image: "mirror谷歌containers/kubernetes-zookeeper:1.0-3.4.10"
  74.  
    resources:
  75.  
    requests:
  76.  
    memory: "1Gi"
  77.  
    cpu: "0.5"
  78.  
    ports:
  79.  
    - containerPort: 2181
  80.  
    name: client
  81.  
    - containerPort: 2888
  82.  
    name: server
  83.  
    - containerPort: 3888
  84.  
    name: leader-election
  85.  
    command:
  86.  
    - sh
  87.  
    - -c
  88.  
    - "start-zookeeper \
  89.  
    --servers=3 \
  90.  
    --data_dir=/var/lib/zookeeper/data \
  91.  
    --data_log_dir=/var/lib/zookeeper/data/log \
  92.  
    --conf_dir=/opt/zookeeper/conf \
  93.  
    --client_port=2181 \
  94.  
    --election_port=3888 \
  95.  
    --server_port=2888 \
  96.  
    --tick_time=2000 \
  97.  
    --init_limit=10 \
  98.  
    --sync_limit=5 \
  99.  
    --heap=512M \
  100.  
    --max_client_cnxns=60 \
  101.  
    --snap_retain_count=3 \
  102.  
    --purge_interval=12 \
  103.  
    --max_session_timeout=40000 \
  104.  
    --min_session_timeout=4000 \
  105.  
    --log_level=INFO"
  106.  
    readinessProbe:
  107.  
    exec:
  108.  
    command:
  109.  
    - sh
  110.  
    - -c
  111.  
    - "zookeeper-ready 2181"
  112.  
    initialDelaySeconds: 10
  113.  
    timeoutSeconds: 5
  114.  
    livenessProbe:
  115.  
    exec:
  116.  
    command:
  117.  
    - sh
  118.  
    - -c
  119.  
    - "zookeeper-ready 2181"
  120.  
    initialDelaySeconds: 10
  121.  
    timeoutSeconds: 5
  122.  
    volumeMounts:
  123.  
    - name: datadir
  124.  
    mountPath: /var/lib/zookeeper
  125.  
    securityContext:
  126.  
    runAsUser: 1000
  127.  
    fsGroup: 1000
  128.  
    volumeClaimTemplates:
  129.  
    - metadata:
  130.  
    name: datadir
  131.  
    annotations:
  132.  
    volume.beta.kubernetes.io/storage-class: "wms-zook"
  133.  
    spec:
  134.  
    accessModes: [ "ReadWriteOnce" ]
  135.  
    resources:
  136.  
    requests:
  137.  
    storage: 5Gi
学新通

3.kafka的pv-pvc

  1.  
    apiVersion: v1
  2.  
    kind: PersistentVolume
  3.  
    metadata:
  4.  
    name: k8s-pv-kafka0
  5.  
    namespace: tools
  6.  
    labels:
  7.  
    app: kafka
  8.  
    annotations:
  9.  
    volume.beta.kubernetes.io/storage-class: "wms-kafka"
  10.  
    spec:
  11.  
    capacity:
  12.  
    storage: 5G
  13.  
    accessModes:
  14.  
    - ReadWriteOnce
  15.  
    nfs:
  16.  
    server: 192.168.XX.XX
  17.  
    path: "/mnt/nas/zmj_pord/nfs/wms-kafk/pv0"
  18.  
    ---
  19.  
    apiVersion: v1
  20.  
    kind: PersistentVolume
  21.  
    metadata:
  22.  
    name: k8s-pv-kafka1
  23.  
    namespace: tools
  24.  
    labels:
  25.  
    app: kafka
  26.  
    annotations:
  27.  
    volume.beta.kubernetes.io/storage-class: "wms-kafka"
  28.  
    spec:
  29.  
    capacity:
  30.  
    storage: 5G
  31.  
    accessModes:
  32.  
    - ReadWriteOnce
  33.  
    nfs:
  34.  
    server: 192.168.XX.XX
  35.  
    path: "/mnt/nas/zmj_pord/nfs/wms-kafka/pv1"
  36.  
    ---
  37.  
    apiVersion: v1
  38.  
    kind: PersistentVolume
  39.  
    metadata:
  40.  
    name: k8s-pv-kafka2
  41.  
    namespace: tools
  42.  
    labels:
  43.  
    app: kafka
  44.  
    annotations:
  45.  
    volume.beta.kubernetes.io/storage-class: "wms-kafka"
  46.  
    spec:
  47.  
    capacity:
  48.  
    storage: 5G
  49.  
    accessModes:
  50.  
    - ReadWriteOnce
  51.  
    nfs:
  52.  
    server: 192.168.xx.XX
  53.  
    path: "/mnt/nas/zmj_pord/nfs/wms-kafka/pv2"
学新通

4.kafka的all.yaml

(这里要把健康检查#掉)

  1.  
    ---
  2.  
    apiVersion: v1
  3.  
    kind: Service
  4.  
    metadata:
  5.  
    name: kafka-hs
  6.  
    labels:
  7.  
    app: kafka
  8.  
    spec:
  9.  
    ports:
  10.  
    - port: 9092
  11.  
    name: server
  12.  
    clusterIP: None
  13.  
    selector:
  14.  
    app: kafka
  15.  
    ---
  16.  
    apiVersion: v1
  17.  
    kind: Service
  18.  
    metadata:
  19.  
    name: kafka-cs
  20.  
    labels:
  21.  
    app: kafka
  22.  
    spec:
  23.  
    selector:
  24.  
    app: kafka
  25.  
    type: NodePort
  26.  
    ports:
  27.  
    - name: client
  28.  
    port: 9092
  29.  
    # nodePort: 19092
  30.  
    ---
  31.  
    apiVersion: policy/v1beta1
  32.  
    kind: PodDisruptionBudget
  33.  
    metadata:
  34.  
    name: kafka-pdb
  35.  
    spec:
  36.  
    selector:
  37.  
    matchLabels:
  38.  
    app: kafka
  39.  
    minAvailable: 2
  40.  
    ---
  41.  
    apiVersion: apps/v1
  42.  
    kind: StatefulSet
  43.  
    metadata:
  44.  
    name: kafka
  45.  
    spec:
  46.  
    serviceName: kafka-hs
  47.  
    replicas: 3
  48.  
    selector:
  49.  
    matchLabels:
  50.  
    app: kafka
  51.  
    template:
  52.  
    metadata:
  53.  
    labels:
  54.  
    app: kafka
  55.  
    spec:
  56.  
    affinity:
  57.  
    podAntiAffinity:
  58.  
    requiredDuringSchedulingIgnoredDuringExecution:
  59.  
    - labelSelector:
  60.  
    matchExpressions:
  61.  
    - key: "app"
  62.  
    operator: In
  63.  
    values:
  64.  
    - kafka
  65.  
    topologyKey: "kubernetes.io/hostname"
  66.  
    podAffinity:
  67.  
    preferredDuringSchedulingIgnoredDuringExecution:
  68.  
    - weight: 1
  69.  
    podAffinityTerm:
  70.  
    labelSelector:
  71.  
    matchExpressions:
  72.  
    - key: "app"
  73.  
    operator: In
  74.  
    values:
  75.  
    - zk
  76.  
    topologyKey: "kubernetes.io/hostname"
  77.  
    terminationGracePeriodSeconds: 300
  78.  
    containers:
  79.  
    - name: kafka
  80.  
    # imagePullPolicy: IfNotPresent
  81.  
    image: registry.cn-hangzhou.aliyuncs.com/jaxzhai/k8skafka:v1
  82.  
    #image: wurstmeister/kafka
  83.  
    command: [ "/bin/bash", "-ce", "tail -f /dev/null" ]
  84.  
    resources:
  85.  
    requests:
  86.  
    memory: "200M"
  87.  
    cpu: 500m
  88.  
    ports:
  89.  
    - containerPort: 9092
  90.  
    name: server
  91.  
    command:
  92.  
    - sh
  93.  
    - -c
  94.  
    - "exec kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} \
  95.  
    --override listeners=PLAINTEXT://:9092 \
  96.  
    --override zookeeper.connect=zk-0.zk-hs.prod-zmj-wms.svc.cluster.local:2181,zk-1.zk-hs.prod-zmj-wms.svc.cluster.local:2181,zk-2.zk-hs.prod-zmj-wms.svc.cluster.local:2181 \
  97.  
    --override log.dir=/var/lib/kafka \
  98.  
    --override auto.create.topics.enable=true \
  99.  
    --override auto.leader.rebalance.enable=true \
  100.  
    --override background.threads=10 \
  101.  
    --override compression.type=producer \
  102.  
    --override delete.topic.enable=true \
  103.  
    --override leader.imbalance.check.interval.seconds=300 \
  104.  
    --override leader.imbalance.per.broker.percentage=10 \
  105.  
    --override log.flush.interval.messages=9223372036854775807 \
  106.  
    --override log.flush.offset.checkpoint.interval.ms=60000 \
  107.  
    --override log.flush.scheduler.interval.ms=9223372036854775807 \
  108.  
    --override log.retention.bytes=-1 \
  109.  
    --override log.retention.hours=168 \
  110.  
    --override log.roll.hours=168 \
  111.  
    --override log.roll.jitter.hours=0 \
  112.  
    --override log.segment.bytes=1073741824 \
  113.  
    --override log.segment.delete.delay.ms=60000 \
  114.  
    --override message.max.bytes=1000012 \
  115.  
    --override min.insync.replicas=1 \
  116.  
    --override num.io.threads=8 \
  117.  
    --override num.network.threads=3 \
  118.  
    --override num.recovery.threads.per.data.dir=1 \
  119.  
    --override num.replica.fetchers=1 \
  120.  
    --override offset.metadata.max.bytes=4096 \
  121.  
    --override offsets.commit.required.acks=-1 \
  122.  
    --override offsets.commit.timeout.ms=5000 \
  123.  
    --override offsets.load.buffer.size=5242880 \
  124.  
    --override offsets.retention.check.interval.ms=600000 \
  125.  
    --override offsets.retention.minutes=1440 \
  126.  
    --override offsets.topic.compression.codec=0 \
  127.  
    --override offsets.topic.num.partitions=50 \
  128.  
    --override offsets.topic.replication.factor=3 \
  129.  
    --override offsets.topic.segment.bytes=104857600 \
  130.  
    --override queued.max.requests=500 \
  131.  
    --override quota.consumer.default=9223372036854775807 \
  132.  
    --override quota.producer.default=9223372036854775807 \
  133.  
    --override replica.fetch.min.bytes=1 \
  134.  
    --override replica.fetch.wait.max.ms=500 \
  135.  
    --override replica.high.watermark.checkpoint.interval.ms=5000 \
  136.  
    --override replica.lag.time.max.ms=10000 \
  137.  
    --override replica.socket.receive.buffer.bytes=65536 \
  138.  
    --override replica.socket.timeout.ms=30000 \
  139.  
    --override request.timeout.ms=30000 \
  140.  
    --override socket.receive.buffer.bytes=102400 \
  141.  
    --override socket.request.max.bytes=104857600 \
  142.  
    --override socket.send.buffer.bytes=102400 \
  143.  
    --override unclean.leader.election.enable=true \
  144.  
    --override zookeeper.session.timeout.ms=6000 \
  145.  
    --override zookeeper.set.acl=false \
  146.  
    --override broker.id.generation.enable=true \
  147.  
    --override connections.max.idle.ms=600000 \
  148.  
    --override controlled.shutdown.enable=true \
  149.  
    --override controlled.shutdown.max.retries=3 \
  150.  
    --override controlled.shutdown.retry.backoff.ms=5000 \
  151.  
    --override controller.socket.timeout.ms=30000 \
  152.  
    --override default.replication.factor=1 \
  153.  
    --override fetch.purgatory.purge.interval.requests=1000 \
  154.  
    --override group.max.session.timeout.ms=300000 \
  155.  
    --override group.min.session.timeout.ms=6000 \
  156.  
    --override inter.broker.protocol.version=0.10.2-IV0 \
  157.  
    --override log.cleaner.backoff.ms=15000 \
  158.  
    --override log.cleaner.dedupe.buffer.size=134217728 \
  159.  
    --override log.cleaner.delete.retention.ms=86400000 \
  160.  
    --override log.cleaner.enable=true \
  161.  
    --override log.cleaner.io.buffer.load.factor=0.9 \
  162.  
    --override log.cleaner.io.buffer.size=524288 \
  163.  
    --override log.cleaner.io.max.bytes.per.second=1.7976931348623157E308 \
  164.  
    --override log.cleaner.min.cleanable.ratio=0.5 \
  165.  
    --override log.cleaner.min.compaction.lag.ms=0 \
  166.  
    --override log.cleaner.threads=1 \
  167.  
    --override log.cleanup.policy=delete \
  168.  
    --override log.index.interval.bytes=4096 \
  169.  
    --override log.index.size.max.bytes=10485760 \
  170.  
    --override log.message.timestamp.difference.max.ms=9223372036854775807 \
  171.  
    --override log.message.timestamp.type=CreateTime \
  172.  
    --override log.preallocate=false \
  173.  
    --override log.retention.check.interval.ms=300000 \
  174.  
    --override max.connections.per.ip=2147483647 \
  175.  
    --override num.partitions=1 \
  176.  
    --override producer.purgatory.purge.interval.requests=1000 \
  177.  
    --override replica.fetch.backoff.ms=1000 \
  178.  
    --override replica.fetch.max.bytes=1048576 \
  179.  
    --override replica.fetch.response.max.bytes=10485760 \
  180.  
    --override reserved.broker.max.id=1000 "
  181.  
    env:
  182.  
    - name: KAFKA_HEAP_OPTS
  183.  
    value : "-Xmx300M -Xms200M"
  184.  
    - name: KAFKA_OPTS
  185.  
    value: "-Dlogging.level=INFO"
  186.  
     
  187.  
    volumeMounts:
  188.  
    - name: datadir
  189.  
    mountPath: /var/lib/kafka
  190.  
    readinessProbe:
  191.  
    tcpSocket:
  192.  
    port: 9092
  193.  
    timeoutSeconds: 5
  194.  
    initialDelaySeconds: 20
  195.  
    # exec:
  196.  
    # command:
  197.  
    # - sh
  198.  
    # - -c
  199.  
    # - "/opt/kafka/bin/kafka-broker-api-versions.sh --bootstrap-server=0.0.0.0:9092"
  200.  
    securityContext:
  201.  
    runAsUser: 1000
  202.  
    fsGroup: 1000
  203.  
    volumeClaimTemplates:
  204.  
    - metadata:
  205.  
    name: datadir
  206.  
    annotations:
  207.  
    volume.beta.kubernetes.io/storage-class: "wms-kafka"
  208.  
    spec:
  209.  
    accessModes: [ "ReadWriteOnce" ]
  210.  
    resources:
  211.  
    requests:
  212.  
    storage: 5G
学新通

5.测试

略,参考单节点

这篇好文章是转载于:学新通技术网

  • 版权申明: 本站部分内容来自互联网,仅供学习及演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,请提供相关证据及您的身份证明,我们将在收到邮件后48小时内删除。
  • 本站站名: 学新通技术网
  • 本文地址: /boutique/detail/tanhgcgifk
系列文章
更多 icon
同类精品
更多 icon
继续加载