`

kafka安装配置

    博客分类:
  • mq
 
阅读更多
cd /test/kafka_2.11-0.9.0.0/bin
./kafka-server-start.sh ./../config/server.properties &


[work@sz-vm-143-111 config]$ cat server.properties 
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
# 
#    http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see kafka.server.KafkaConfig for additional details and defaults

############################# Server Basics #############################

# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0

############################# Socket Server Settings #############################

listeners=PLAINTEXT://192.168.143.111:9092

# The port the socket server listens on
#port=9092

# Hostname the broker will bind to. If not set, the server will bind to all interfaces
#host.name=localhost
host.name=192.168.143.111

# Hostname the broker will advertise to producers and consumers. If not set, it uses the
# value for "host.name" if configured.  Otherwise, it will use the value returned from
# java.net.InetAddress.getCanonicalHostName().
#advertised.host.name=<hostname routable by clients>

# The port to publish to ZooKeeper for clients to use. If this is not set,
# it will publish the same port that the broker binds to.
#advertised.port=<port accessible by clients>

# The number of threads handling network requests
num.network.threads=3
 
# The number of threads doing disk I/O
num.io.threads=8

# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=102400

# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=102400

# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600


############################# Log Basics #############################

# A comma seperated list of directories under which to store log files
log.dirs=/tmp/kafka-logs

# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=1

# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1

############################# Log Flush Policy #############################

# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk. 
# There are a few important trade-offs here:
#    1. Durability: Unflushed data may be lost if you are not using replication.
#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.

# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000

# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000

############################# Log Retention Policy #############################

# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.

# The minimum age of a log file to be eligible for deletion
log.retention.hours=168

# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
# segments don't drop below log.retention.bytes.
#log.retention.bytes=1073741824

# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=1073741824

# The interval at which log segments are checked to see if they can be deleted according 
# to the retention policies
log.retention.check.interval.ms=300000

# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
log.cleaner.enable=false

############################# Zookeeper #############################

# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=localhost:2181

# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=6000
分享到:
评论

相关推荐

    kafka安装配置教程完整版

    **Kafka安装配置教程完整版** Kafka是一种分布式流处理平台,由Apache软件基金会开发,广泛应用于大数据实时处理、消息传递以及日志收集系统。它以其高吞吐量、低延迟和可扩展性而闻名。在本文中,我们将详细介绍...

    kafka安装与配置

    **二、Kafka配置** Kafka的配置主要通过修改`config/server.properties`文件完成。以下是一些关键配置项: 1. **broker.id**: 每个Kafka节点都有一个唯一的ID,用于标识自己。 2. **listeners**: 定义Kafka服务器...

    kafka配置安装详解

    ### Kafka配置安装详解 #### 一、环境搭建与配置 Kafka是一款开源的消息队列中间件,被广泛应用于大数据处理领域。本篇文章将详细介绍如何在本地环境中安装并配置Kafka,以及进行基本的操作演示。 ##### 环境要求...

    2、Kafka安装配置、快速启动及基本操作教程

    Kafka安装配置、快速启动及基本操作1、Kafka下载2、启动服务扩展知识:进程守护方法和前后台进程切换3、创建一个主题扩展知识:bootstrap-server和zookeeper使用区别4、删除主题5、发送消息6、消费消息7、设置多个...

    Kafka安装(安装与配置).pdf

    对于其他机器上的Kafka配置文件,同样需要修改`broker.id`为不同的值,例如1、2、3等。 ##### 5. 配置环境变量 为了方便使用Kafka命令,需要在每台机器上配置环境变量: ``` [root@c0~]# vim /etc/bashrc export ...

    kafka参数配置详解

    Kafka参数配置详解 Kafka是一种流行的分布式流处理平台,用于构建实时数据管道和事件驱动的架构。为了确保Kafka集群的稳定运行和高性能,需要正确地配置Kafka参数。本文将详细介绍Kafka的参数配置,包括系统参数、...

    CDH平台kafka配置文件以及相关操作

    CDH大数据平台kafka配置文件以及相关操作

    kafka集群配置文件

    在这里,我们将深入探讨`kafka集群配置文件`以及它们与`zookeeper集群`的关系。 **1. Kafka集群配置** Kafka集群由多个服务器(称为Brokers)组成,每个Broker都存储一部分主题的数据。为了设置一个Kafka集群,我们...

    kafka完整安装配置.zip

    在这个“kafka完整安装配置.zip”压缩包中,包含了在Ubuntu 14上安装Kafka所需的所有文档和软件包,包括Zookeeper和Kafka本身。 **一、Kafka简介** Kafka是一个高吞吐量、低延迟的消息队列系统,其核心设计目标是...

    kafka配置调优实践

    Kafka 配置调优实践 Kafka 配置调优实践是指通过调整 Kafka 集群的参数配置来提高其吞吐性能。下面是 Kafka 配置调优实践的知识点总结: 一、存储优化 * 数据目录优先存储到 XFS 文件系统或者 EXT4,避免使用 EXT...

    Kafka 配置用户名密码例子

    在本文中,我们将深入探讨如何在Apache Kafka中配置SASL/PLAIN认证机制,并通过具体的密码验证实现安全的通信。Kafka是一个分布式流处理平台,它在数据传输中扮演着重要角色,而安全性是其核心考量之一。SASL...

    kafka Windows安装

    在 Windows 上安装 Kafka 包括下载、配置 Zookeeper 和 Kafka 服务,以及创建和操作 topics、producer 和 consumer。了解这些基础知识后,你可以进一步探索 Kafka 的高级特性,如消费组、offset 管理、Kafka Connect...

    Kafka安装与部署指南

    总结来说,Kafka的安装和部署涉及到Java环境的准备、下载与解压Kafka、配置服务器属性、启动ZooKeeper和Kafka服务,以及创建和测试消息传递。了解这些基本步骤和概念对于理解和操作Kafka至关重要。在实际应用中,你...

    Spring 集成 Kafka的配置文件及代码讲解

    接下来,我们来配置Spring Boot的`application.yml`或`application.properties`文件以连接到Kafka集群。这里是一个示例配置: ```yaml spring: kafka: bootstrap-servers: localhost:9092 # Kafka服务器地址 ...

    kafka 配置文件

    这里我们关注的焦点是 Kafka 的配置文件,包括 `server.properties`、`consumer.properties` 和 `producer.properties`,它们对于理解和优化 Kafka 集群的运行至关重要。 首先,我们来看 `server.properties` 文件...

    Kafka安装手册(Linux)

    本文档详细介绍了如何在Linux环境下安装和配置Kafka服务,包括下载Kafka压缩包、解压、配置Kafka和Zookeeper的相关参数、启动服务以及验证服务是否正常工作的全过程。按照这些步骤操作,可以帮助用户顺利完成Kafka...

    Kafka集群安装配置.docx

    Kafka 集群安装配置 Kafka 集群安装配置是指在多台服务器上部署 Kafka 集群,以便实现高可用性和高性能的消息队列系统。下面是 Kafka 集群安装配置的详细步骤和知识点: 一、环境准备 * 系统版本:CentOS Linux ...

    Apache Kafka:Kafka安装与配置.docx

    Apache Kafka:Kafka安装与配置.docx

Global site tag (gtag.js) - Google Analytics