Browse Source

初始化项目

master
louzin 2 years ago
parent
commit
2e12a4f06d
  1. 77
      README.md
  2. 58
      centos_standalone_hbase.sh
  3. 139
      conf/hbase-env.sh
  4. 48
      conf/hbase-site.xml
  5. 1
      conf/regionservers
  6. 25
      hadoop/core-site.xml
  7. 108
      hadoop/hadoop-env.sh
  8. 40
      hadoop/hdfs-site.xml
  9. 1
      hadoop/slaves
  10. 9
      okhadoop.code-workspace
  11. 39
      zoo.cfg

77
README.md

@ -1,3 +1,76 @@
# okhadoop
# 测试环境
Centos7(目前只支持)
# 描述
One key to install Apache Hadoop
此shell脚本用于一键部署standalone模式下的hadoop及其组件,安装目录`/opt`,
包含:
Hadoop V2.9.0
JAVA V1.8.202
Zookeeper V3.8.0
Hbase V2.2.4
# 运行前置条件
1. 可以访问互联网(wget下载文件需要)
2. 静态ip(可选)
3. 修改网络名(而不是使用localhost)
4. 本机到本机的免密登陆(否则hadoop会要求输入密码)
```shell
ssh-keygen -t rsa
(全部按回车进行确定)
cat /root/.ssh/id_rsa.pub >> authorized_keys
```
# 已知问题
1. 在执行完脚本后需要再次执行`source /root/.bashrc`
2. 在hadoop启动时需要确认两个unknow host
3. 未配置yarn
# One key to install Apache Hadoop
该项目用于通过shell脚本一键部署standalone模式下的hadoop及其组件.
![baseversion](https://img.shields.io/badge/okhadoop-0.0.1-green.svg ) ![Java Version](https://img.shields.io/badge/JAVA-1.8.202-green.svg) ![hbase](https://img.shields.io/badge/HBase-2.2.4-green.svg)
![hadoop](https://img.shields.io/badge/Hadoop-2.9.0-red.svg ) ![zookeeper](https://img.shields.io/badge/Zookeeper-3.8.0-blue.svg )
# 目录
- [前置条件](#前置条件)
- [使用方法](#使用方法)
- [RHEL/CentOS](#RHEL/CentOS)
- [已知问题](#已知问题)
# 前置条件
- 可以访问互联网(**必要**)
- 静态IP
- 网络名称(**必要**)
```shell
vim /etc/hostname
```
- 本机到本机的免密登录(**必要**)
```shell
ssh-keygen -t rsa
cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
```
# 使用方法
## RHEL/CentOS
```shell
chmod +x ./centos_standalone_hbase.sh
bash ./centos_standalone_hbase.sh
```
# 已知问题
- 在执行完脚本后需要手动执行`source /root/.bashrc`
- 使用`start-dfs.sh`启动Hadoop时需要确认两个`unknow host`
- 未配置`yarn`

58
centos_standalone_hbase.sh

@ -0,0 +1,58 @@
#!/bin/bash
echo "开始更新"
yum update -y && yum install wget -y
#下载
#JDK
wget https://repo.huaweicloud.com/java/jdk/8u202-b08/jdk-8u202-linux-x64.tar.gz
tar xf jdk-8u202-linux-x64.tar.gz -C /opt/
#Hadoop
wget https://repo.huaweicloud.com/apache/hadoop/common/hadoop-2.9.0/hadoop-2.9.0.tar.gz
tar xf hadoop-2.9.0.tar.gz -C /opt/
#Zookeeper
wget https://repo.huaweicloud.com/apache/zookeeper/zookeeper-3.8.0/apache-zookeeper-3.8.0-bin.tar.gz
tar xf apache-zookeeper-3.8.0-bin.tar.gz -C /opt/
#Hbase
wget https://repo.huaweicloud.com/apache/hbase/2.2.4/hbase-2.2.4-bin.tar.gz
tar xf hbase-2.2.4-bin.tar.gz -C /opt/
#环境变量
echo "加载环境变量"
echo -e "# JDK Env\nexport JAVA_HOME=/opt/jdk1.8.0_202\n\
export PATH=\${JAVA_HOME}/bin:/\$PATH" >> /root/.bashrc
echo -e "# Hadoop Env\nexport HADOOP_HOME=/opt/hadoop-2.9.0/\n\
export PATH=\$HADOOP_HOME/sbin:\$HADOOP_HOME/bin:\$PATH" >> /root/.bashrc
echo -e "# Zookeeper Env\nexport ZOOKEEPER_HOME=/opt/apache-zookeeper-3.8.0-bin\n\
export PATH=\$PATH:\$ZOOKEEPER_HOME/bin" >> /root/.bashrc
echo -e "# Hbase Env\nexport HBASE_HOME=/opt/hbase-2.2.4\n\
export PATH=\$HBASE_HOME/bin:\$HADOOP_HOME/bin:\$PATH" >> /root/.bashrc
source /root/.bashrc
# 验证安装
echo "验证安装"
java -version
hadoop version
zkServer.sh version
# 配置
# 关闭防火墙
echo "关闭防火墙"
systemctl disable firewalld
systemctl stop firewalld
echo "开始安装hadoop"
# Hadoop
sed "s/{host}/$(cat /etc/hostname)/g" -i ./hadoop/slaves
sed "s|\${JAVA_HOME}|$JAVA_HOME|g" -i ./hadoop/hadoop-env.sh
sed "s/{host}/$(cat /etc/hostname)/g" -i ./hadoop/core-site.xml
\cp -rf ./hadoop /opt/hadoop-2.9.0/etc/
hdfs namenode -format
echo "开始安装zookeeper"
# Zookeeper
mkdir -p /opt/zookeeper_cache/data/zookeeper
mkdir -p /opt/zookeeper_cache/data/zookeeper_log
\cp -rf zoo.cfg /opt/apache-zookeeper-3.8.0-bin/conf/
# Hbase
echo "开始安装hbase"
echo -e "export JAVA_HOME=$JAVA_HOME\n\
export HBASE_CLASSPATH=/opt/hbase-2.2.4/conf\n\
export HBASE_MANAGES_ZK=false" >> ./conf/hbase-env.sh
sed "s/{host}/$(cat /etc/hostname)/g" -i ./conf/hbase-site.xml
sed "s/{host}/$(cat /etc/hostname)/g" -i ./conf/regionservers
\cp -rf ./conf/* /opt/hbase-2.2.4/conf/
echo "安装结束"

139
conf/hbase-env.sh

@ -0,0 +1,139 @@
#!/usr/bin/env bash
#
#/**
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
# Set environment variables here.
# This script sets variables multiple times over the course of starting an hbase process,
# so try to keep things idempotent unless you want to take an even deeper look
# into the startup scripts (bin/hbase, etc.)
# The java implementation to use. Java 1.8+ required.
# export JAVA_HOME=/usr/java/jdk1.8.0/
# Extra Java CLASSPATH elements. Optional.
# export HBASE_CLASSPATH=
# The maximum amount of heap to use. Default is left to JVM default.
# export HBASE_HEAPSIZE=1G
# Uncomment below if you intend to use off heap cache. For example, to allocate 8G of
# offheap, set the value to "8G".
# export HBASE_OFFHEAPSIZE=1G
# Extra Java runtime options.
# Below are what we set by default. May only work with SUN JVM.
# For more on why as well as other possible settings,
# see http://hbase.apache.org/book.html#performance
export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC"
# Uncomment one of the below three options to enable java garbage collection logging for the server-side processes.
# This enables basic gc logging to the .out file.
# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
# This enables basic gc logging to its own file.
# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"
# This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+.
# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
# Uncomment one of the below three options to enable java garbage collection logging for the client processes.
# This enables basic gc logging to the .out file.
# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
# This enables basic gc logging to its own file.
# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"
# This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+.
# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
# See the package documentation for org.apache.hadoop.hbase.io.hfile for other configurations
# needed setting up off-heap block caching.
# Uncomment and adjust to enable JMX exporting
# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
# NOTE: HBase provides an alternative JMX implementation to fix the random ports issue, please see JMX
# section in HBase Reference Guide for instructions.
# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10101"
# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10102"
# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
# export HBASE_REST_OPTS="$HBASE_REST_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10105"
# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers
# Uncomment and adjust to keep all the Region Server pages mapped to be memory resident
#HBASE_REGIONSERVER_MLOCK=true
#HBASE_REGIONSERVER_UID="hbase"
# File naming hosts on which backup HMaster will run. $HBASE_HOME/conf/backup-masters by default.
# export HBASE_BACKUP_MASTERS=${HBASE_HOME}/conf/backup-masters
# Extra ssh options. Empty by default.
# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
# Where log files are stored. $HBASE_HOME/logs by default.
# export HBASE_LOG_DIR=${HBASE_HOME}/logs
# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers
# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070"
# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071"
# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072"
# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8073"
# export HBASE_REST_OPTS="$HBASE_REST_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8074"
# A string representing this instance of hbase. $USER by default.
# export HBASE_IDENT_STRING=$USER
# The scheduling priority for daemon processes. See 'man nice'.
# export HBASE_NICENESS=10
# The directory where pid files are stored. /tmp by default.
# export HBASE_PID_DIR=/var/hadoop/pids
# Seconds to sleep between slave commands. Unset by default. This
# can be useful in large clusters, where, e.g., slave rsyncs can
# otherwise arrive faster than the master can service them.
# export HBASE_SLAVE_SLEEP=0.1
# Tell HBase whether it should manage it's own instance of ZooKeeper or not.
# export HBASE_MANAGES_ZK=true
# The default log rolling policy is RFA, where the log file is rolled as per the size defined for the
# RFA appender. Please refer to the log4j.properties file to see more details on this appender.
# In case one needs to do log rolling on a date change, one should set the environment property
# HBASE_ROOT_LOGGER to "<DESIRED_LOG LEVEL>,DRFA".
# For example:
# HBASE_ROOT_LOGGER=INFO,DRFA
# The reason for changing default to RFA is to avoid the boundary case of filling out disk space as
# DRFA doesn't put any cap on the log size. Please refer to HBase-5655 for more context.
# Tell HBase whether it should include Hadoop's lib when start up,
# the default value is false,means that includes Hadoop's lib.
# export HBASE_DISABLE_HADOOP_CLASSPATH_LOOKUP="true"

48
conf/hbase-site.xml

@ -0,0 +1,48 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-->
<configuration>
<property>
<name>hbase.rootdir</name>
<!-- 同hadoop配置文件core-site.xml中fs.defaultFS的值 -->
<value>hdfs://{host}:9000/hbase</value>
</property>
<!-- false是单机模式,true是分布式模式 -->
<property>
<name>hbase.cluster.distributed</name>
<!-- 当使用外置zookeeper时必须配置为true,仅在使用hbase自带zookeeper时配置为false -->
<value>true</value>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<!-- zookeeper的数据目录,同zookeeper配置文件zoo.cfg中dataDir的值,启动hbase的用户需对此目录有读写权限 -->
<value>/opt/zookeeper_cache/data/zookeeper</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>{host}</value>
</property>
<property>
<name>hbase.unsafe.stream.capability.enforce</name>
<value>false</value>
</property>
</configuration>

1
conf/regionservers

@ -0,0 +1 @@
{host}

25
hadoop/core-site.xml

@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.defaultFS</name>
<!-- host为主机名,需要在/etc/hosts文件中配置映射,9000为hdfs访问端口 -->
<value>hdfs://{host}:9000</value>
</property>
</configuration>

108
hadoop/hadoop-env.sh

@ -0,0 +1,108 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set Hadoop-specific environment variables here.
# The only required environment variable is JAVA_HOME. All others are
# optional. When running a distributed configuration it is best to
# set JAVA_HOME in this file, so that it is correctly defined on
# remote nodes.
# The java implementation to use.
export JAVA_HOME=${JAVA_HOME}
# The jsvc implementation to use. Jsvc is required to run secure datanodes
# that bind to privileged ports to provide authentication of data transfer
# protocol. Jsvc is not required if SASL is configured for authentication of
# data transfer protocol using non-privileged ports.
#export JSVC_HOME=${JSVC_HOME}
export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
if [ "$HADOOP_CLASSPATH" ]; then
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
else
export HADOOP_CLASSPATH=$f
fi
done
# The maximum amount of heap to use, in MB. Default is 1000.
#export HADOOP_HEAPSIZE=
#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
# Enable extra debugging of Hadoop's JAAS binding, used to set up
# Kerberos security.
# export HADOOP_JAAS_DEBUG=true
# Extra Java runtime options. Empty by default.
# For Kerberos debugging, an extended option set logs more invormation
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS"
# set heap args when HADOOP_HEAPSIZE is empty
if [ "$HADOOP_HEAPSIZE" = "" ]; then
export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
fi
#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
# On secure datanodes, user to run the datanode as after dropping privileges.
# This **MUST** be uncommented to enable secure HDFS if using privileged ports
# to provide authentication of data transfer protocol. This **MUST NOT** be
# defined if SASL is configured for authentication of data transfer protocol
# using non-privileged ports.
export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
# Where log files are stored. $HADOOP_HOME/logs by default.
#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
# Where log files are stored in the secure data environment.
#export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
###
# HDFS Mover specific parameters
###
# Specify the JVM options to be used when starting the HDFS Mover.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HADOOP_MOVER_OPTS=""
###
# Advanced Users Only!
###
# The directory where pid files are stored. /tmp by default.
# NOTE: this should be set to a directory that can only be written to by
# the user that will run the hadoop daemons. Otherwise there is the
# potential for a symlink attack.
export HADOOP_PID_DIR=${HADOOP_PID_DIR}
export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
# A string representing this instance of hadoop. $USER by default.
export HADOOP_IDENT_STRING=$USER

40
hadoop/hdfs-site.xml

@ -0,0 +1,40 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<!-- 副本数 -->
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<!-- 主namenode信息存储目录 -->
<name>dfs.namenode.name.dir</name>
<value>file:/opt/hadoop_cache/dn/dfs/name</value>
</property>
<property>
<!-- 从namenode信息存储目录 -->
<name>dfs.namenode.checkpoint.dir</name>
<value>file:/opt/hadoop_cache/dn/dfs/namesecondary</value>
</property>
<property>
<!-- datanode信息存储目录 -->
<name>dfs.datanode.data.dir</name>
<value>file:/opt/hadoop_cache/dn/dfs/data</value>
</property>
</configuration>

1
hadoop/slaves

@ -0,0 +1 @@
{host}

9
okhadoop.code-workspace

@ -0,0 +1,9 @@
{
"folders": [
{
"name": "okhadoop",
"path": "."
}
],
"settings": {}
}

39
zoo.cfg

@ -0,0 +1,39 @@
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
admin.serverPort=8887
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/opt/zookeeper_cache/data/zookeeper
dataLogDir=/opt/zookeeper_cache/data/zookeeper_log
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
## Metrics Providers
#
# https://prometheus.io Metrics Exporter
#metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
#metricsProvider.httpHost=0.0.0.0
#metricsProvider.httpPort=7000
#metricsProvider.exportJvmInfo=true
Loading…
Cancel
Save