start-all.cmd --->set HADOOP_BIN_PATH=${hadoop_install}\sbin --->set HADOOP_LIBEXEC_DIR=${hadoop_install}\libexec ===>${hadoop_install}\libexec\hadoop-config.cmd --->set HADOOP_COMMON_DIR=share\hadoop\common --->set HADOOP_COMMON_LIB_JARS_DIR=share\hadoop\common\lib --->set HADOOP_COMMON_LIB_NATIVE=lib\native --->set HDFS_DIR=share\hadoop\hdfs --->set HDFS_LIB_JARS_DIR=share\hadoop\hdfs\lib --->set YARN_DIR=share\hadoop\yarn --->set YARN_LIB_JARS_DIR=share\hadoop\yarn\lib --->set MAPRED_DIR=share\hadoop\mapreduce --->set MAPRED_LIB_JARS_DIR=share\hadoop\mapreduce\lib --->判断是否存在%HADOOP_HOME%\share\hadoop\common\hadoop-common-*.jar --->set HADOOP_CONF_DIR=%HADOOP_HOME%\etc\hadoop ===>调用hadoop-env.cmd --->set JAVA_HOME=%JAVA_HOME% --->set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar --->set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS% --->set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS% --->set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS% --->set JAVA=%JAVA_HOME%\bin\java.exe --->设置java堆最大为1000M --->set JAVA_HEAP_MAX=-Xmx%HADOOP_HEAPSIZE%m 如果有,则覆盖上面java的堆参数。 --->set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%\* --->设置日志路径、文件名、输出级别 --->set HADOOP_HDFS_HOME=%HADOOP_HOME% --->set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_DIR% --->set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_LIB_JARS_DIR%\* --->set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_DIR%\* --->set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_DIR% --->set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\* --->set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_DIR%\* --->set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR% --->set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR%\* --->set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR%\* ===>sbin\start-dfs.cmd --config %HADOOP_CONF_DIR% --->set HADOOP_BIN_PATH=${hadoop_install}\sbin --->set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec ===>调用hdfs-config.cmd --->set HADOOP_BIN_PATH=%~dp0 --->set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec ===>调用%HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %* ===>……参考前方 --->start "Apache Hadoop Distribution" hadoop namenode ===>hadoop.cmd --->set HADOOP_BIN_PATH=%~dp0 --->call :updatepath %HADOOP_BIN_PATH% set path_to_add=%* set current_path_comparable=%path% set current_path_comparable=%current_path_comparable: =_% set current_path_comparable=%current_path_comparable:(=_% set current_path_comparable=%current_path_comparable:)=_% set path_to_add_comparable=%path_to_add% set path_to_add_comparable=%path_to_add_comparable: =_% set path_to_add_comparable=%path_to_add_comparable:(=_% set path_to_add_comparable=%path_to_add_comparable:)=_% --->set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec --->set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% ===>调用%HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %* ===>……参考前方 --->set hadoop-command=%1 --->set hadoop-command-arguments=%_arguments% //定义hdfs命令集合 --->set hdfscommands=namenode secondarynamenode datanode dfs dfsadmin fsck balancer fetchdt oiv dfsgroups //设置hdfs的核心命令 ===>call %HADOOP_HDFS_HOME%\bin\hdfs.cmd --->set HADOOP_BIN_PATH=%~dp0 --->set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% --->set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% ===>call %HADOOP_LIBEXEC_DIR%\hdfs-config.cmd %* 参考前方 ===>call %HADOOP_CONF_DIR%\hadoop-env.cmd 参考前方 --->set hdfs-command=%1 --->call :make_command_arguments %* ---> set hdfs-command-arguments=%_hdfsarguments% --->如果命令无效goto print_usage --->set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath --->call :%hdfs-command% :namenode set CLASS=org.apache.hadoop.hdfs.server.namenode.NameNode set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_NAMENODE_OPTS% goto :eof :journalnode set CLASS=org.apache.hadoop.hdfs.qjournal.server.JournalNode set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_JOURNALNODE_OPTS% goto :eof :zkfc set CLASS=org.apache.hadoop.hdfs.tools.DFSZKFailoverController set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ZKFC_OPTS% goto :eof :secondarynamenode set CLASS=org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_SECONDARYNAMENODE_OPTS% goto :eof :datanode set CLASS=org.apache.hadoop.hdfs.server.datanode.DataNode set HADOOP_OPTS=%HADOOP_OPTS% -server %HADOOP_DATANODE_OPTS% goto :eof :dfs set CLASS=org.apache.hadoop.fs.FsShell set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% goto :eof :dfsadmin set CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% goto :eof :haadmin set CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin set CLASSPATH=%CLASSPATH%;%TOOL_PATH% set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% goto :eof :fsck set CLASS=org.apache.hadoop.hdfs.tools.DFSck set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% goto :eof :balancer set CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_BALANCER_OPTS% goto :eof :jmxget set CLASS=org.apache.hadoop.hdfs.tools.JMXGet goto :eof :classpath set CLASS=org.apache.hadoop.util.Classpath goto :eof :oiv set CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB goto :eof :oev set CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer goto :eof :fetchdt set CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher goto :eof :getconf set CLASS=org.apache.hadoop.hdfs.tools.GetConf goto :eof :groups set CLASS=org.apache.hadoop.hdfs.tools.GetGroups goto :eof :snapshotDiff set CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff goto :eof :lsSnapshottableDir set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir goto :eof :cacheadmin set CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin goto :eof :mover set CLASS=org.apache.hadoop.hdfs.server.mover.Mover set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_MOVER_OPTS% goto :eof :storagepolicies set CLASS=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin goto :eof --->set java_arguments=%JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %hdfs-command-arguments% // %JAVA% 就是调用java.exe --->call %JAVA% %java_arguments% --->goto:eof //定义mapred命令集合 --->set mapredcommands=pipes job queue mrgroups mradmin jobtracker tasktracker //设置mapredcommand的核心命令 ===>call %HADOOP_MAPRED_HOME%\bin\mapred.cmd %* --->set HADOOP_BIN_PATH=%~dp0 --->set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% --->set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% ===>call %DEFAULT_LIBEXEC_DIR%\mapred-config.cmd %* --->set HADOOP_BIN_PATH=%~dp0 --->set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% --->set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec --->call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %* 参考以前 ===>call %MAPRED_CONF_DIR%\mapred-env.cmd --->set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 --->set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA --->call :make_command_arguments %* --->set JAVA_HEAP_SIZE=-Xmx%MAPRED_HEAPSIZE%m --->set CLASSPATH=%HADOOP_CONF_DIR%;%MAPRED_CONF_DIR%;%CLASSPATH% --->set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\classes --->set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build --->set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\test\classes --->set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\tools --->set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\* --->set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR%\* --->set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\modules\* --->call :%mapred-command% %mapred-command-arguments% :classpath set CLASS=org.apache.hadoop.util.Classpath goto :eof :job set CLASS=org.apache.hadoop.mapred.JobClient set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% goto :eof :queue set CLASS=org.apache.hadoop.mapred.JobQueueClient set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% goto :eof :sampler set CLASS=org.apache.hadoop.mapred.lib.InputSampler set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% goto :eof :historyserver set CLASS=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer set HADOOP_OPTS=%HADOOP_OPTS% -Dmapred.jobsummary.logger=%HADOOP_JHS_LOGGER% %HADOOP_JOB_HISTORYSERVER_OPTS% if defined HADOOP_JOB_HISTORYSERVER_HEAPSIZE ( set JAVA_HEAP_MAX=-Xmx%HADOOP_JOB_HISTORYSERVER_HEAPSIZE%m ) goto :eof :distcp set CLASS=org.apache.hadoop.tools.DistCp set CLASSPATH=%CLASSPATH%;%TOO_PATH% set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% goto :eof :archive set CLASS=org.apache.hadop.tools.HadoopArchives set CLASSPATH=%CLASSPATH%;%TOO_PATH% set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% :hsadmin set CLASS=org.apache.hadoop.mapreduce.v2.hs.client.HSAdmin set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% :pipes goto not_supported :mradmin goto not_supported :jobtracker goto not_supported :tasktracker goto not_supported :groups goto not_supported --->set java_arguments=%JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %mapred-command-arguments% --->call %JAVA% %java_arguments% --->goto:eof //classpath既不是hdfs命令也不是mapred命令 classpath ---> set corecommands=fs version jar checknative distcp daemonlog archive classpath credential key //定义核心命令集合 fs文件系统命令 version查看版本 jar运行jar包中的类 checknative检查本地类库 discp批量拷贝,archive归档 --->call :%hadoop-command% set CLASS=%hadoop-command% :fs set CLASS=org.apache.hadoop.fs.FsShell :jar set CLASS=org.apache.hadoop.util.RunJar :checknative set CLASS=org.apache.hadoop.util.NativeLibraryChecker :distcp set CLASS=org.apache.hadoop.tools.DistCp set CLASSPATH=%CLASSPATH%;%TOOL_PATH% :daemonlog set CLASS=org.apache.hadoop.log.LogLevel :archive set CLASS=org.apache.hadoop.tools.HadoopArchives set CLASSPATH=%CLASSPATH%;%TOOL_PATH% :classpath set CLASS=org.apache.hadoop.util.Classpath :credential set CLASS=org.apache.hadoop.security.alias.CredentialShell :key set CLASS=org.apache.hadoop.crypto.key.KeyShell --->set path=%PATH%;%HADOOP_BIN_PATH% --->set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% --->set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% //调用java程序,java.exe --->call %JAVA% %JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %hadoop-command-arguments% ===>start "Apache Hadoop Distribution" hadoop.cmd datanode 参考以前 //yarn框架的启动脚本 ===>sbin\start-yarn.cmd --config %HADOOP_CONF_DIR% --->set HADOOP_BIN_PATH=%~dp0 --->set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% --->set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% ===>call %HADOOP_LIBEXEC_DIR%\yarn-config.cmd %* --->set HADOOP_BIN_PATH=%~dp0 --->set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% --->set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% ===>call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %* 参考以前 --->set YARN_CONF_DIR=%2 --->set YARN_CONF_DIR=%HADOOP_CONF_DIR% --->set YARN_SLAVES=%YARN_CONF_DIR%\%2 --->set YARN_CONF_DIR=%HADOOP_YARN_HOME%\conf ---> ---> ---> ---> ---> ===>start "Apache Hadoop Distribution" yarn.cmd resourcemanager --->set HADOOP_BIN_PATH=%~dp0 --->set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% --->set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% ===>call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %* 参考以前 ===>call %YARN_CONF_DIR%\yarn-env.cmd --->set HADOOP_YARN_USER=%yarn% --->set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m --->set YARN_LOG_DIR=%HADOOP_YARN_HOME%\logs --->set YARN_LOGFILE=yarn.log --->set YARN_POLICYFILE=hadoop-policy.xml --->set YARN_ROOT_LOGGER=%HADOOP_LOGLEVEL%,console --->set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.dir=%YARN_LOG_DIR% --->set YARN_OPTS=%YARN_OPTS% -Dyarn.log.dir=%YARN_LOG_DIR% --->set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.file=%YARN_LOGFILE% --->set YARN_OPTS=%YARN_OPTS% -Dyarn.log.file=%YARN_LOGFILE% --->set YARN_OPTS=%YARN_OPTS% -Dyarn.home.dir=%HADOOP_YARN_HOME% --->set YARN_OPTS=%YARN_OPTS% -Dyarn.id.str=%YARN_IDENT_STRING% --->set YARN_OPTS=%YARN_OPTS% -Dhadoop.home.dir=%HADOOP_YARN_HOME% --->set YARN_OPTS=%YARN_OPTS% -Dhadoop.root.logger=%YARN_ROOT_LOGGER% --->set YARN_OPTS=%YARN_OPTS% -Dyarn.root.logger=%YARN_ROOT_LOGGER% --->set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH% --->set YARN_OPTS=%YARN_OPTS% -Dyarn.policy.file=%YARN_POLICYFILE% --->set yarn-command=%1 --->call :make_command_arguments %* 将第一个参数设置为yarn命令 set yarn-command-arguments=%_yarnarguments% --->set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m --->set CLASSPATH=%HADOOP_CONF_DIR%;%YARN_CONF_DIR%;%CLASSPATH% --->set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\* 在share\hadoop\yarn --->set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\* 在share\hadoop\yarn\lib --->set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar ^ application applicationattempt cluster container node queue logs daemonlog historyserver ^ timelineserver classpath :classpath set CLASS=org.apache.hadoop.util.Classpath goto :eof :rmadmin set CLASS=org.apache.hadoop.yarn.client.cli.RMAdminCLI set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% goto :eof :application set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% set yarn-command-arguments=%yarn-command% %yarn-command-arguments% goto :eof :applicationattempt set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% set yarn-command-arguments=%yarn-command% %yarn-command-arguments% goto :eof :cluster set CLASS=org.apache.hadoop.yarn.client.cli.ClusterCLI set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% goto :eof :container set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% set yarn-command-arguments=%yarn-command% %yarn-command-arguments% goto :eof :node set CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% goto :eof :queue set CLASS=org.apache.hadoop.yarn.client.cli.QueueCLI set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% goto :eof :resourcemanager set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\rm-config\log4j.properties set CLASS=org.apache.hadoop.yarn.server.resourcemanager.ResourceManager set YARN_OPTS=%YARN_OPTS% %YARN_RESOURCEMANAGER_OPTS% if defined YARN_RESOURCEMANAGER_HEAPSIZE ( set JAVA_HEAP_MAX=-Xmx%YARN_RESOURCEMANAGER_HEAPSIZE%m ) goto :eof :historyserver @echo DEPRECATED: Use of this command to start the timeline server is deprecated. 1>&2 @echo Instead use the timelineserver command for it. 1>&2 set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\ahs-config\log4j.properties set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer set YARN_OPTS=%YARN_OPTS% %HADOOP_HISTORYSERVER_OPTS% if defined YARN_HISTORYSERVER_HEAPSIZE ( set JAVA_HEAP_MAX=-Xmx%YARN_HISTORYSERVER_HEAPSIZE%m ) goto :eof :timelineserver set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\timelineserver-config\log4j.properties set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer set YARN_OPTS=%YARN_OPTS% %HADOOP_TIMELINESERVER_OPTS% if defined YARN_TIMELINESERVER_HEAPSIZE ( set JAVA_HEAP_MAX=-Xmx%YARN_TIMELINESERVER_HEAPSIZE%m ) goto :eof :nodemanager set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\nm-config\log4j.properties set CLASS=org.apache.hadoop.yarn.server.nodemanager.NodeManager set YARN_OPTS=%YARN_OPTS% -server %HADOOP_NODEMANAGER_OPTS% if defined YARN_NODEMANAGER_HEAPSIZE ( set JAVA_HEAP_MAX=-Xmx%YARN_NODEMANAGER_HEAPSIZE%m ) goto :eof :proxyserver set CLASS=org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer set YARN_OPTS=%YARN_OPTS% %HADOOP_PROXYSERVER_OPTS% if defined YARN_PROXYSERVER_HEAPSIZE ( set JAVA_HEAP_MAX=-Xmx%YARN_PROXYSERVER_HEAPSIZE%m ) goto :eof :version set CLASS=org.apache.hadoop.util.VersionInfo set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% goto :eof :jar set CLASS=org.apache.hadoop.util.RunJar set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% goto :eof :logs set CLASS=org.apache.hadoop.yarn.client.cli.LogsCLI set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% goto :eof :daemonlog set CLASS=org.apache.hadoop.log.LogLevel set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% goto :eof --->set java_arguments=%JAVA_HEAP_MAX% %YARN_OPTS% -classpath %CLASSPATH% %CLASS% %yarn-command-arguments% --->call %JAVA% %java_arguments% 调用java.exe ===>start "Apache Hadoop Distribution" yarn.cmd nodemanager 参考以前 ===>@rem start "Apache Hadoop Distribution" yarn.cmd proxyserver 参考以前
- 浏览: 211695 次
- 性别:
- 来自: 上海
文章分类
最新评论
-
masuweng:
写的太好了,
spring security oauth 2 客户端模式研究 sparklr2 tonr2 -
qq979418391:
楼主,继续翻译啊,英文文档看起来真费劲楼主神威!!!
Supporting Multiple Screens 翻译 支持各种屏幕(上) -
jibaole:
[color=green][/color]
数据权限设计 -
jackyrong:
楼主有更好的方法了么?
数据权限设计 -
a6230589:
这么好的东西居然没人顶,赞一个,期待楼主的下半篇翻译。
Supporting Multiple Screens 翻译 支持各种屏幕(上)
发表评论
-
hdfs和mapreduce启动的进程名称
2017-07-18 20:28 461hdfs namenode 通过ip:50070图形化访 ... -
各虚拟机中配置hadoop的模块
2017-07-07 22:34 478ubuntu-64-10 namenode ubuntu- ... -
mapreduce初始二
2017-04-09 23:34 617如果一个文件大于hadoop指定的block大小(默认是1 ... -
hadoop启动脚本分析
2017-04-04 21:19 2569start-all.cmd setlocal en ... -
Call From s0/192.168.56.140 to s0:8020 failed on connection exception
2017-04-03 21:12 985ubuntu@s0:~$ hadoop fs -ls / ... -
(转)HDFS 原理、架构与特性介绍
2017-04-03 20:33 380本文主要讲述 HDFS原理-架构、副本机制、HDFS负载均衡 ... -
hadoop2.7.3 win10部署
2017-03-27 16:48 1144在win10下配置hadoop时 jd ... -
hadoop安装部署
2017-03-25 23:02 311hadoop 主要是分name节点 辅助name节点 数据节 ...
相关推荐
在学习hadoop启动脚本过程中记录的,有一定的参考价值,值得一看!
HadoopHA集群 批量启动脚本HadoopHA集群 批量启动脚本HadoopHA集群 批量启动脚本HadoopHA集群 批量启动脚本
hadoop 高可用启动脚本,运行启动zookeeper集群和hadoop ha集群
启动集群脚本,私聊免费发。上传只为防丢失。以备后用
标题 "用python编写nagios hadoop监控脚本" 暗示了本文将探讨如何使用Python编程语言来创建Nagios监控系统针对Hadoop集群的监控脚本。Nagios是一款广泛使用的开源网络监控系统,它能检测各种IT基础设施的状态,包括...
7. **启动服务**:通过脚本启动Hadoop和Zookeeper服务,检查各个节点的运行状态,确保所有服务正常运行。 8. **测试集群**:运行简单的Hadoop命令(如`hadoop fs -ls /`)和MapReduce程序,验证集群安装是否成功。 ...
jps判断hadoop启动是否成功;分别对master和slave进行了判断。jps不是hadoop的什么命令,是java的命令,所以直接执行就行了。
docker中启动大数据脚本
停止集群麻烦,所以写的脚本。私我可以免费发给你。上传只为自己以后用,防止丢失。
7. 启动Hadoop服务:通过start-dfs.sh和start-yarn.sh命令启动Hadoop的各个组件。 8. 设置SSH免密登录:为了集群间通信,脚本可能会包含一个步骤来配置所有节点间的SSH无密码登录。 9. 验证安装:最后,脚本可能会...
脚本搭建hadoop集群 可以自定义主机名和IP地址 可以自定义安装jdk和hadoop(格式为*tar.gz) 注意事项 1、安装完jdk和hadoop请手动source /etc/profile 刷新环境变量 2测试脚本环境为centOS6,其他操作系统会有些...
hadoop运行脚本run.sh
"hadoop启动日志"这个主题涉及到的是Hadoop集群启动过程中的日志记录,这对于系统管理员进行故障排查、性能优化以及理解Hadoop运行状态至关重要。日志文件通常包含了丰富的信息,如服务启动时间、初始化步骤、依赖...
《Hadoop入门脚本WordCount详解》 在大数据处理领域,Hadoop是一个不可或缺的重要工具,它的分布式计算模型为海量数据的处理提供了强大的支持。而WordCount则是Hadoop入门的经典示例,通过它,我们可以深入理解...
在Hadoop生态系统中,Shell脚本扮演着至关重要的角色,特别是在大数据处理和集群管理中。这些脚本通常用于自动化任务,如数据迁移、作业调度、集群监控等。下面我们将深入探讨Hadoop Shell脚本的相关知识点。 一、...
统一启动集群中各个节点
Apache Hadoop软件库是一个框架,它允许使用简单的编程模型跨计算机群集分布式处理大型数据集。它旨在从单个服务器扩展到数千台机器,每台机器提供本地计算和存储。该库本身不是依靠硬件来提供高可用性,而是设计...
2. "hadoop2的HA配置一键运行脚本startall.sh":这是实际的配置脚本,用于自动化执行Hadoop HA的安装和启动过程。脚本中可能包含了初始化Hadoop集群、配置HA参数、启动和检查服务状态等命令。 3. "教程重要说明.txt...
【Hadoop环境部署自动化Shell脚本】是一种高效的方法,用于快速搭建Hadoop集群,无论是用于学习还是开发。本文档提供了一个详细的脚本,涵盖了从Java环境配置到Hadoop集群的完全分布式安装的所有步骤,旨在降低...
它可能涵盖了如何运行上述脚本、编辑配置文件和启动/停止Hadoop服务等内容。 在配置Hadoop伪分布式环境时,你需要: 1. 首先,运行`install_jdk.sh`脚本安装JDK。 2. 然后,解压`hadoop-2.8.1.tar.gz`到你选择的...