Hadoop 集群搭建 -->2 zookeeper 搭建

分享 123456789987654321 ⋅ 于 2021-06-10 19:03:15 ⋅ 1125 阅读

zookeeper安装

复制hadoop脚本文件, 
删除ips中的s2 和s3
#分发zookeeper
[hadoop@nn1 zookeeper_base_op]$ sh scp_all.sh /tmp/upload/zookeeper-3.4.8.tar.gz  /tmp/
#解压
[hadoop@nn1 zookeeper_base_op]$ sh ssh_root.sh tar -zxf /tmp/zookeeper-3.4.8.tar.gz -C /usr/local/
#权限
[hadoop@nn1 zookeeper_base_op]$ sh ssh_root.sh chown -R hadoop:hadoop /usr/local/zookeeper-3.4.8/
#配置软连接
[hadoop@nn1 zookeeper_base_op]$ sh ssh_root.sh ln -s /usr/local/zookeeper-3.4.8/ /usr/local/zookeeper 
#权限
[hadoop@nn1 zookeeper_base_op]$ sh ssh_root.sh chown -h hadoop:hadoop /usr/local/zookeeper

[hadoop@nn1 zookeeper_base_op]$ sh ssh_root.sh chmod 770 /usr/local/zookeeper-3.4.8/

配置文件

[hadoop@nn1 zookeeper_base_op]$ cd /usr/local/zookeeper/conf/

[hadoop@nn1 conf]$ sh ~/zookeeper_base_op/ssh_all.sh rm -rf /usr/local/zookeeper/conf/zoo_sample.cfg

[hadoop@nn1 conf]$ sh ~/zookeeper_base_op/scp_all.sh /tmp/upload/zoo.cfg /usr/local/zookeeper/conf/

zookeeper配置文件信息

# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial 
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between 
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just 
# example sakes.
dataDir=/data/
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the 
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
server.1=nn1.hadoop:2888:3888    
server.2=nn2.hadoop:2888:3888    
server.3=s1.hadoop:2888:3888
#创建data目录 zookeeper配置的data目录(输出日志文件夹)
[hadoop@nn1 hadoop_base_op]$ sh ssh_root.sh mkdir /data
[hadoop@nn1 hadoop_base_op]$ sh ssh_root.sh chown -R hadoop:hadoop /data

#修改输出日志配置文件所在目录
[hadoop@nn1 hadoop_base_op]$ vim /usr/local/zookeeper/bin/zkEnv.sh   
ZOO_LOG_DIR=/data #配置日志输出目录

---------------------------------------------------------------------------------------
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# This script should be sourced into other zookeeper
# scripts to setup the env variables

# We use ZOOCFGDIR if defined,
# otherwise we use /etc/zookeeper
# or the conf directory that is
# a sibling of this script's directory

ZOOBINDIR="${ZOOBINDIR:-/usr/bin}"
ZOOKEEPER_PREFIX="${ZOOBINDIR}/.."
ZOO_LOG_DIR=/data

if [ "x$ZOOCFGDIR" = "x" ]
then
  if [ -e "${ZOOKEEPER_PREFIX}/conf" ]; then
    ZOOCFGDIR="$ZOOBINDIR/../conf"
  else
    ZOOCFGDIR="$ZOOBINDIR/../etc/zookeeper"
  fi
fi

if [ -f "${ZOOCFGDIR}/zookeeper-env.sh" ]; then
  . "${ZOOCFGDIR}/zookeeper-env.sh"
fi

if [ "x$ZOOCFG" = "x" ]
then
-----------------------------------*-----------------------------------------------------

#分发 日志文件
[hadoop@nn1 zookeeper_base_op]$ sh ssh_all.sh /usr/local/zookeeper/bin/zkEnv.sh /usr/local/zookeeper/bin/
#创建myid文件
[hadoop@nn1 data]$ touch myid && echo 1 > myid && cat myid
1
[hadoop@nn1 data]$ ssh nn2.hadoop
[hadoop@nn2 ~]$ cd /data
[hadoop@nn2 data]$ touch myid && echo 2 > myid && cat myid 
2
[hadoop@nn2 data]$ ssh s1.hadoop
[hadoop@s1 ~]$ touch myid && echo 3 > myid && cat myid   
3

配置环境变量

#切换root用户,root才可以配置环境变量
[hadoop@nn1 ~]$ su root
[root@nn1 hadoop]# vim /etc/profile

#set Hadoop Path
export HADOOP_HOME=/usr/local/hadoop
export HADOOP_COMMON_HOME=${HADOOP_HOME}
export HADOOP_HDFS_HOME=${HADOOP_HOME}
export HADOOP_MAPRED_HOME=${HADOOP_HOME}
export HADOOP_YARN_HOME=${HADOOP_HOME}
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HDFS_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export YARN_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export LD_LIBRARY_PATH=$HADOOP_HOME/lib/native:/usr/lib64

export HBASE_HOME=/usr/local/hbase
export HIVE_HOME=/usr/local/hive
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HBASE_HOME/bin:$HIVE_HOME/bin:/usr/local/zookeeper/bin
#拷贝
[hadoop@nn1 etc]$ cp ./profile /tmp
[hadoop@nn1 etc]$ cd ~/hadoop_base_op/
[hadoop@nn1 hadoop_base_op]$ sh scp_all.sh /tmp/profile /tmp
[hadoop@nn1 hadoop_base_op]$ sh ssh_root.sh cp /tmp/profile /etc/
[hadoop@nn1 hadoop_base_op]$ sh ssh_all.sh source /etc/profile

开启zookeeper

[hadoop@nn1 zookeeper_base_op]$ sh ssh_all.sh /usr/local/zookeeper/bin/zkServer.sh start 

查看zookeeper运行状态(一个leader,两个follower)

[hadoop@nn1 zookeeper]$ sh ssh_all.sh /usr/local/zookeeper/bin/zkServer.sh status

重启

[hadoop@nn1 zookeeper]$ sh ssh_all.sh /usr/local/zookeeper/bin/zkServer.sh restart
版权声明:原创作品,允许转载,转载时务必以超链接的形式表明出处和作者信息。否则将追究法律责任。来自海汼部落-123456789987654321,http://hainiubl.com/topics/75667
点赞
成为第一个点赞的人吧 :bowtie:
回复数量: 0
    暂无评论~~
    • 请注意单词拼写,以及中英文排版,参考此页
    • 支持 Markdown 格式, **粗体**、~~删除线~~、`单行代码`, 更多语法请见这里 Markdown 语法
    • 支持表情,可用Emoji的自动补全, 在输入的时候只需要 ":" 就可以自动提示了 :metal: :point_right: 表情列表 :star: :sparkles:
    • 上传图片, 支持拖拽和剪切板黏贴上传, 格式限制 - jpg, png, gif,教程
    • 发布框支持本地存储功能,会在内容变更时保存,「提交」按钮点击时清空
    Ctrl+Enter