kafka启动脚本和配置文件
# more kafka
#!/bin/sh
# Init script for kafka
### BEGIN INIT INFO
# Provides: kafka
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description:
# Description: Starts kafka as a daemon.
### END INIT INFO PATH=$PATH:/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/kafka/bin
export PATH
export JAVA_HOME=/usr/local/java # adirname - return absolute dirname of given file
adirname() { odir=`pwd`; cd `dirname $1`; pwd; cd "${odir}"; }
MYNAME=`basename "$0"`
MYDIR=`adirname "$0"`
name="${MYNAME}" KAFKA_USER=elasticsearch
KAFKA_GROUP=elasticsearch
KAFKA_HOME=/usr/local/kafka
KAFKA_LOG_DIR="${MYDIR}/logs"
KAFKA_CONF_DIR="${MYDIR}/config"
KAFKA_CONF_FILENAME=server.properties
JMX_PORT=5760
KAFKA_HEAP_OPTS="-Xms1G -Xmx1G -XX:NewRatio=2 -XX:SurvivorRatio=8 -XX:MaxMetaspaceSize=512M -XX:CompressedClassSpaceSize=512M -X
loggc:$KAFKA_LOG_DIR/gc.log -verbose:gc -XX:+PrintGCApplicationStoppedTime -XX:+PrintGCDateStamps -XX:+PrintGCDetails" KAFKA_LOG_FILE="${KAFKA_LOG_DIR}/$name.log"
pidfile="${KAFKA_LOG_DIR}/$name.pid"
KAFKA_CONF_DIR_FILE="$KAFKA_CONF_DIR/$KAFKA_CONF_FILENAME"
KILL_ON_STOP_TIMEOUT=${KILL_ON_STOP_TIMEOUT-0} #default value is zero to this variable but could be updated by user request
KAFKA_OPTS=""
KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$KAFKA_CONF_DIR/log4j.properties" [ -r /etc/default/$name ] && . /etc/default/$name
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name program=$KAFKA_HOME/bin/kafka-server-start.sh
args=" ${KAFKA_CONF_DIR_FILE}" quiet() {
"$@" > /dev/null 2>&1
return $?
} start() { KAFKA_JAVA_OPTS="${KAFKA_OPTS} -Djava.io.tmpdir=${KAFKA_HOME}"
HOME=${KAFKA_HOME} export PATH HOME KAFKA_JAVA_OPTS KAFKA_HEAP_OPTS JMX_PORT # Run the program!
$program $args > ${KAFKA_LOG_DIR}/$name.stdout 2> "${KAFKA_LOG_DIR}/$name.err" & # Generate the pidfile from here. If we instead made the forked process
# generate it there will be a race condition between the pidfile writing
# and a process possibly asking for status.
echo $! > $pidfile echo "$name started."
return 0
} stop() {
# Try a few times to kill TERM the program
if status ; then
pid=`cat "$pidfile"`
echo "Killing $name (pid $pid) with SIGTERM"
ps -ef |grep $pid |grep -v 'grep' |awk '{print $2}' | xargs kill -9
# Wait for it to exit.
for i in 1 2 3 4 5 6 7 8 9 ; do
echo "Waiting $name (pid $pid) to die..."
status || break
sleep 1
done
if status ; then
if [ $KILL_ON_STOP_TIMEOUT -eq 1 ] ; then
echo "Timeout reached. Killing $name (pid $pid) with SIGKILL. This may result in data loss."
kill -KILL $pid
echo "$name killed with SIGKILL."
else
echo "$name stop failed; still running."
return 1 # stop timed out and not forced
fi
else
echo "$name stopped."
fi
fi
} status() {
if [ -f "$pidfile" ] ; then
pid=`cat "$pidfile"`
if kill -0 $pid > /dev/null 2> /dev/null ; then
# process by this pid is running.
# It may not be our pid, but that's what you get with just pidfiles.
# TODO(sissel): Check if this process seems to be the same as the one we
# expect. It'd be nice to use flock here, but flock uses fork, not exec,
# so it makes it quite awkward to use in this case.
return 0
else
return 2 # program is dead but pid file exists
fi
else
return 3 # program is not running
fi
} configtest() {
# Check if a config file exists
if [ ! "$(ls -A ${KAFKA_CONF_DIR}/* 2> /dev/null)" ]; then
echo "There aren't any configuration files in ${KAFKA_CONF_DIR}"
return 1
fi HOME=${KAFKA_HOME}
export PATH HOME #test_args=""
#$program ${test_args}
#[ $? -eq 0 ] && return 0
# Program not configured
#return 6
} case "$1" in
start)
status
code=$?
if [ $code -eq 0 ]; then
echo "$name is already running"
else
start
code=$?
fi
exit $code
;;
stop) stop ;;
force-stop) force_stop ;;
status)
status
code=$?
if [ $code -eq 0 ] ; then
echo "$name is running"
else
echo "$name is not running"
fi
exit $code
;;
reload) reload ;;
restart)
stop && start
;;
check)
configtest
exit $?
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status}" >&2
exit 3
;;
esac exit $?
# cat server.properties |grep -v "^#"|grep -v "^$"
broker.id=1
delete.topic.enable=true
default.replication.factor=2
listeners=PLAINTEXT://192.168.1.190:9092
advertised.listeners=PLAINTEXT://192.168.1.190:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
message.max.bytes = 10240000
log.dirs=/home/elasticsearch/kafka/logs
num.partitions=6
num.recovery.threads.per.data.dir=1
log.cleaner.enable=false
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
log.cleaner.enable=true
log.cleanup.policy=delete
log.cleaner.min.compaction.lag.ms=86400000
zookeeper.connect=zoo1:2181,zoo2:2181,zoo3:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
# cat producer.properties |grep -v "^#"|grep -v "^$"
bootstrap.servers=zoo1:9020,zoo29092,zoo3:9092
compression.type=none