Docker-Compose部署kafka教程

Docker-Compose部署kafka教程

1. 环境

2. 创建kafka.conf文件

vim kafka.conf

input {
  kafka {
    # Elasticsearch的ip:port
    bootstrap_servers => "10.0.168.217:9092"
    topics => ["developer"]
    group_id => "dev-logs"
    client_id => "developer"
    decorate_events => true
  }
}

output {
  elasticsearch {
      id => "dev-log-%{+YYYY.MM.dd}"
      index => "dev-log-%{+YYYY.MM.dd}"
      # Elasticsearch的账号
      user => "elastic"
      # Elasticsearch的密码
      password => "elastic@2022"
      template_overwrite => true
      # Elasticsearch的ip:port
      hosts => ["10.0.168.217:9200"]
  }
}

3. 创建kafka的docker-compose.yml文件

version: '3'
services:
  zookeeper:
    image: zookeeper
    container_name: pl_zookeeper
    ports:
      - 2181:2181
      - 2888:2888
      - 3888:3888
    volumes:
      - /home/data/zookeeper/data:/data
      - /home/data/zookeeper/conf:/conf
    restart: always
  kafka:
    image: wurstmeister/kafka
    depends_on:
      - zookeeper
    container_name: pl_elk_kafka
    ports:
      - 9092:9092
    environment:
      KAFKA_BROKER_ID: 0
      # 服务器内网ip:2181/kafka
      KAFKA_ZOOKEEPER_CONNECT: 10.0.168.217:2181/kafka
      # PLAINTEXT://服务器内网ip:port
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.0.168.217:9092
      KAFKA_LISTENERS: PLAINTEXT://:9092
      KAFKA_LOG_DIRS: /data/kafka-data
      KAFKA_LOG_RETENTION_HOURS: 24
    volumes:
      - /home/data/elk/kafka/data:/data/kafka-data
      - /etc/localtime:/etc/localtime
    restart: unless-stopped  

4. 启动kafka

docker-compose up -d 

5. 自此kafka部署完成


版权声明:本文为sunjob_s72原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。