- 浏览: 25414 次
- 性别:
- 来自: 深圳
文章分类
最新评论
#!/bin/bash
# Program:
# uninstall ambari automatic
# History:
# 2014/01/13 - Ivan - 2862099249@qq.com - First release
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
#取得集群的所有主机名,这里需要注意:/etc/hosts配置的IP和主机名只能用一个空格分割
hostList=$(cat /etc/hosts | tail -n +3 | cut -d ' ' -f 2)
yumReposDir=/etc/yum.repos.d/
alterNativesDir=/etc/alternatives/
pingCount=5
logPre=TDP
read -p "Please input your master hostname: " master
master=${master:-"master"}
ssh $master "ambari-server stop"
#重置ambari数据库
ssh $master "ambari-server reset"
for host in $hostList
do
#echo $host
#检测主机的连通性
unPing=$(ping $host -c $pingCount | grep 'Unreachable' | wc -l)
if [ "$unPing" == "$pingCount" ]; then
echo -e "$logPre======>$host is Unreachable,please check '/etc/hosts' file"
continue
fi
echo "$logPre======>$host deleting... \n"
#1.)删除hdp.repo、HDP.repo、HDP-UTILS.repo和ambari.repo
ssh $host "cd $yumReposDir"
ssh $host "rm -rf $yumReposDir/hdp.repo"
ssh $host "rm -rf $yumReposDir/HDP*"
ssh $host "rm -rf $yumReposDir/ambari.repo"
#删除HDP相关的安装包
ssh $host "yum remove -y sqoop.noarch"
ssh $host "yum remove -y lzo-devel.x86_64"
ssh $host "yum remove -y hadoop-libhdfs.x86_64"
ssh $host "yum remove -y rrdtool.x86_64"
ssh $host "yum remove -y hbase.noarch"
ssh $host "yum remove -y pig.noarch"
ssh $host "yum remove -y lzo.x86_64"
ssh $host "yum remove -y ambari-log4j.noarch"
ssh $host "yum remove -y oozie.noarch"
ssh $host "yum remove -y oozie-client.noarch"
ssh $host "yum remove -y gweb.noarch"
ssh $host "yum remove -y snappy-devel.x86_64"
ssh $host "yum remove -y hcatalog.noarch"
ssh $host "yum remove -y python-rrdtool.x86_64"
ssh $host "yum remove -y nagios.x86_64"
ssh $host "yum remove -y webhcat-tar-pig.noarch"
ssh $host "yum remove -y snappy.x86_64"
ssh $host "yum remove -y libconfuse.x86_64"
ssh $host "yum remove -y webhcat-tar-hive.noarch"
ssh $host "yum remove -y ganglia-gmetad.x86_64"
ssh $host "yum remove -y extjs.noarch"
ssh $host "yum remove -y hive.noarch"
ssh $host "yum remove -y hadoop-lzo.x86_64"
ssh $host "yum remove -y hadoop-lzo-native.x86_64"
ssh $host "yum remove -y hadoop-native.x86_64"
ssh $host "yum remove -y hadoop-pipes.x86_64"
ssh $host "yum remove -y nagios-plugins.x86_64"
ssh $host "yum remove -y hadoop.x86_64"
ssh $host "yum remove -y zookeeper.noarch"
ssh $host "yum remove -y hadoop-sbin.x86_64"
ssh $host "yum remove -y ganglia-gmond.x86_64"
ssh $host "yum remove -y libganglia.x86_64"
ssh $host "yum remove -y perl-rrdtool.x86_64"
ssh $host "yum remove -y epel-release.noarch"
ssh $host "yum remove -y compat-readline5*"
ssh $host "yum remove -y fping.x86_64"
ssh $host "yum remove -y perl-Crypt-DES.x86_64"
ssh $host "yum remove -y exim.x86_64"
ssh $host "yum remove -y ganglia-web.noarch"
ssh $host "yum remove -y perl-Digest-HMAC.noarch"
ssh $host "yum remove -y perl-Digest-SHA1.x86_64"
ssh $host "yum remove -y bigtop-jsvc.x86_64"
#删除快捷方式
ssh $host "cd $alterNativesDir"
ssh $host "rm -rf hadoop-etc"
ssh $host "rm -rf zookeeper-conf"
ssh $host "rm -rf hbase-conf"
ssh $host "rm -rf hadoop-log"
ssh $host "rm -rf hadoop-lib"
ssh $host "rm -rf hadoop-default"
ssh $host "rm -rf oozie-conf"
ssh $host "rm -rf hcatalog-conf"
ssh $host "rm -rf hive-conf"
ssh $host "rm -rf hadoop-man"
ssh $host "rm -rf sqoop-conf"
ssh $host "rm -rf hadoop-confone"
#删除用户
ssh $host "userdel -rf nagios"
ssh $host "userdel -rf hive"
ssh $host "userdel -rf ambari-qa"
ssh $host "userdel -rf hbase"
ssh $host "userdel -rf oozie"
ssh $host "userdel -rf hcat"
ssh $host "userdel -rf mapred"
ssh $host "userdel -rf hdfs"
ssh $host "userdel -rf rrdcached"
ssh $host "userdel -rf zookeeper"
ssh $host "userdel -rf sqoop"
ssh $host "userdel -rf puppet"
ssh $host "userdel -rf flume"
ssh $host "userdel -rf tez"
ssh $host "userdel -rf yarn"
ssh $host "userdel -rf storm"
ssh $host "userdel -rf knox"
ssh $host "userdel -rf kafka"
ssh $host "userdel -rf falcon"
ssh $host "userdel -rf hcat"
ssh $host "userdel -rf atlas"
ssh $host "userdel -rf mahout"
ssh $host "userdel -rf spark"
#删除文件夹
ssh $host "rm -rf /hadoop"
ssh $host "rm -rf /etc/hadoop"
ssh $host "rm -rf /etc/hbase"
ssh $host "rm -rf /etc/hcatalog"
ssh $host "rm -rf /etc/hive"
ssh $host "rm -rf /etc/ganglia"
ssh $host "rm -rf /etc/nagios"
ssh $host "rm -rf /etc/oozie"
ssh $host "rm -rf /etc/sqoop"
ssh $host "rm -rf /etc/zookeeper"
ssh $host "rm -rf /etc/kafka"
ssh $host "rm -rf /etc/falcon"
ssh $host "rm -rf /etc/yarn"
ssh $host "rm -rf /etc/spark"
ssh $host "rm -rf /etc/flume"
ssh $host "rm -rf /etc/mapred"
ssh $host "rm -rf /etc/ambari-qa"
ssh $host "rm -rf /etc/tez"
ssh $host "rm -rf /var/run/hadoop"
ssh $host "rm -rf /var/run/hbase"
ssh $host "rm -rf /var/run/hive"
ssh $host "rm -rf /var/run/ganglia"
ssh $host "rm -rf /var/run/nagios"
ssh $host "rm -rf /var/run/oozie"
ssh $host "rm -rf /var/run/zookeeper"
ssh $host "rm -rf /var/run/ambari-metrics-monitor"
ssh $host "rm -rf /var/run/ambari-server"
ssh $host "rm -rf /var/run/hadoop-mapreduce"
ssh $host "rm -rf /var/run/hadoop-yarn"
ssh $host "rm -rf /var/run/spark"
ssh $host "rm -rf /var/log/hadoop"
ssh $host "rm -rf /var/log/hbase"
ssh $host "rm -rf /var/log/hive"
ssh $host "rm -rf /var/log/nagios"
ssh $host "rm -rf /var/log/oozie"
ssh $host "rm -rf /var/log/zookeeper"
ssh $host "rm -rf /var/log/hadoop-mapreduce"
ssh $host "rm -rf /var/log/hadoop-yarn"
ssh $host "rm -rf /var/log/spark"
ssh $host "rm -rf /var/nagios"
ssh $host "rm -rf /usr/lib/hadoop"
ssh $host "rm -rf /usr/lib/hbase"
ssh $host "rm -rf /usr/lib/hcatalog"
ssh $host "rm -rf /usr/lib/hive"
ssh $host "rm -rf /usr/lib/oozie"
ssh $host "rm -rf /usr/lib/sqoop"
ssh $host "rm -rf /usr/lib/zookeeper"
ssh $host "rm -rf /var/lib/hive"
ssh $host "rm -rf /var/lib/ganglia"
ssh $host "rm -rf /var/lib/oozie"
ssh $host "rm -rf /var/lib/zookeeper"
ssh $host "rm -rf /var/lib/hadoop-hdfs"
ssh $host "rm -rf /var/lib/hadoop-mapreduce"
ssh $host "rm -rf /var/lib/hadoop-yarn"
ssh $host "rm -rf /var/tmp/oozie"
ssh $host "rm -rf /tmp/hive"
ssh $host "rm -rf /tmp/nagios"
ssh $host "rm -rf /tmp/ambari-qa"
ssh $host "rm -rf /tmp/sqoop-ambari-qa"
ssh $host "rm -rf /hadoop/oozie"
ssh $host "rm -rf /hadoop/zookeeper"
ssh $host "rm -rf /hadoop/mapred"
ssh $host "rm -rf /hadoop/hdfs"
ssh $host "rm -rf /tmp/hadoop-hive"
ssh $host "rm -rf /tmp/hadoop-nagios"
ssh $host "rm -rf /tmp/hadoop-hcat"
ssh $host "rm -rf /tmp/hadoop-ambari-qa"
ssh $host "rm -rf /tmp/hsperfdata_hbase"
ssh $host "rm -rf /tmp/hsperfdata_hive"
ssh $host "rm -rf /tmp/hsperfdata_nagios"
ssh $host "rm -rf /tmp/hsperfdata_oozie"
ssh $host "rm -rf /tmp/hsperfdata_zookeeper"
ssh $host "rm -rf /tmp/hsperfdata_mapred"
ssh $host "rm -rf /tmp/hsperfdata_hdfs"
ssh $host "rm -rf /tmp/hsperfdata_hcat"
ssh $host "rm -rf /tmp/hsperfdata_ambari-qa"
ssh $host "rm -rf /tmp/hsperfdata_admin"
ssh $host "rm -rf /tmp/hsperfdata_spark"
#删除ambari相关包
ssh $host "yum remove -y ambari-*"
ssh $host "yum remove -y postgresql"
ssh $host "rm -rf /var/lib/ambari*"
ssh $host "rm -rf /var/log/ambari*"
ssh $host "rm -rf /etc/ambari*"
echo "$logPre======>$host is done! \n"
done
# Program:
# uninstall ambari automatic
# History:
# 2014/01/13 - Ivan - 2862099249@qq.com - First release
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
#取得集群的所有主机名,这里需要注意:/etc/hosts配置的IP和主机名只能用一个空格分割
hostList=$(cat /etc/hosts | tail -n +3 | cut -d ' ' -f 2)
yumReposDir=/etc/yum.repos.d/
alterNativesDir=/etc/alternatives/
pingCount=5
logPre=TDP
read -p "Please input your master hostname: " master
master=${master:-"master"}
ssh $master "ambari-server stop"
#重置ambari数据库
ssh $master "ambari-server reset"
for host in $hostList
do
#echo $host
#检测主机的连通性
unPing=$(ping $host -c $pingCount | grep 'Unreachable' | wc -l)
if [ "$unPing" == "$pingCount" ]; then
echo -e "$logPre======>$host is Unreachable,please check '/etc/hosts' file"
continue
fi
echo "$logPre======>$host deleting... \n"
#1.)删除hdp.repo、HDP.repo、HDP-UTILS.repo和ambari.repo
ssh $host "cd $yumReposDir"
ssh $host "rm -rf $yumReposDir/hdp.repo"
ssh $host "rm -rf $yumReposDir/HDP*"
ssh $host "rm -rf $yumReposDir/ambari.repo"
#删除HDP相关的安装包
ssh $host "yum remove -y sqoop.noarch"
ssh $host "yum remove -y lzo-devel.x86_64"
ssh $host "yum remove -y hadoop-libhdfs.x86_64"
ssh $host "yum remove -y rrdtool.x86_64"
ssh $host "yum remove -y hbase.noarch"
ssh $host "yum remove -y pig.noarch"
ssh $host "yum remove -y lzo.x86_64"
ssh $host "yum remove -y ambari-log4j.noarch"
ssh $host "yum remove -y oozie.noarch"
ssh $host "yum remove -y oozie-client.noarch"
ssh $host "yum remove -y gweb.noarch"
ssh $host "yum remove -y snappy-devel.x86_64"
ssh $host "yum remove -y hcatalog.noarch"
ssh $host "yum remove -y python-rrdtool.x86_64"
ssh $host "yum remove -y nagios.x86_64"
ssh $host "yum remove -y webhcat-tar-pig.noarch"
ssh $host "yum remove -y snappy.x86_64"
ssh $host "yum remove -y libconfuse.x86_64"
ssh $host "yum remove -y webhcat-tar-hive.noarch"
ssh $host "yum remove -y ganglia-gmetad.x86_64"
ssh $host "yum remove -y extjs.noarch"
ssh $host "yum remove -y hive.noarch"
ssh $host "yum remove -y hadoop-lzo.x86_64"
ssh $host "yum remove -y hadoop-lzo-native.x86_64"
ssh $host "yum remove -y hadoop-native.x86_64"
ssh $host "yum remove -y hadoop-pipes.x86_64"
ssh $host "yum remove -y nagios-plugins.x86_64"
ssh $host "yum remove -y hadoop.x86_64"
ssh $host "yum remove -y zookeeper.noarch"
ssh $host "yum remove -y hadoop-sbin.x86_64"
ssh $host "yum remove -y ganglia-gmond.x86_64"
ssh $host "yum remove -y libganglia.x86_64"
ssh $host "yum remove -y perl-rrdtool.x86_64"
ssh $host "yum remove -y epel-release.noarch"
ssh $host "yum remove -y compat-readline5*"
ssh $host "yum remove -y fping.x86_64"
ssh $host "yum remove -y perl-Crypt-DES.x86_64"
ssh $host "yum remove -y exim.x86_64"
ssh $host "yum remove -y ganglia-web.noarch"
ssh $host "yum remove -y perl-Digest-HMAC.noarch"
ssh $host "yum remove -y perl-Digest-SHA1.x86_64"
ssh $host "yum remove -y bigtop-jsvc.x86_64"
#删除快捷方式
ssh $host "cd $alterNativesDir"
ssh $host "rm -rf hadoop-etc"
ssh $host "rm -rf zookeeper-conf"
ssh $host "rm -rf hbase-conf"
ssh $host "rm -rf hadoop-log"
ssh $host "rm -rf hadoop-lib"
ssh $host "rm -rf hadoop-default"
ssh $host "rm -rf oozie-conf"
ssh $host "rm -rf hcatalog-conf"
ssh $host "rm -rf hive-conf"
ssh $host "rm -rf hadoop-man"
ssh $host "rm -rf sqoop-conf"
ssh $host "rm -rf hadoop-confone"
#删除用户
ssh $host "userdel -rf nagios"
ssh $host "userdel -rf hive"
ssh $host "userdel -rf ambari-qa"
ssh $host "userdel -rf hbase"
ssh $host "userdel -rf oozie"
ssh $host "userdel -rf hcat"
ssh $host "userdel -rf mapred"
ssh $host "userdel -rf hdfs"
ssh $host "userdel -rf rrdcached"
ssh $host "userdel -rf zookeeper"
ssh $host "userdel -rf sqoop"
ssh $host "userdel -rf puppet"
ssh $host "userdel -rf flume"
ssh $host "userdel -rf tez"
ssh $host "userdel -rf yarn"
ssh $host "userdel -rf storm"
ssh $host "userdel -rf knox"
ssh $host "userdel -rf kafka"
ssh $host "userdel -rf falcon"
ssh $host "userdel -rf hcat"
ssh $host "userdel -rf atlas"
ssh $host "userdel -rf mahout"
ssh $host "userdel -rf spark"
#删除文件夹
ssh $host "rm -rf /hadoop"
ssh $host "rm -rf /etc/hadoop"
ssh $host "rm -rf /etc/hbase"
ssh $host "rm -rf /etc/hcatalog"
ssh $host "rm -rf /etc/hive"
ssh $host "rm -rf /etc/ganglia"
ssh $host "rm -rf /etc/nagios"
ssh $host "rm -rf /etc/oozie"
ssh $host "rm -rf /etc/sqoop"
ssh $host "rm -rf /etc/zookeeper"
ssh $host "rm -rf /etc/kafka"
ssh $host "rm -rf /etc/falcon"
ssh $host "rm -rf /etc/yarn"
ssh $host "rm -rf /etc/spark"
ssh $host "rm -rf /etc/flume"
ssh $host "rm -rf /etc/mapred"
ssh $host "rm -rf /etc/ambari-qa"
ssh $host "rm -rf /etc/tez"
ssh $host "rm -rf /var/run/hadoop"
ssh $host "rm -rf /var/run/hbase"
ssh $host "rm -rf /var/run/hive"
ssh $host "rm -rf /var/run/ganglia"
ssh $host "rm -rf /var/run/nagios"
ssh $host "rm -rf /var/run/oozie"
ssh $host "rm -rf /var/run/zookeeper"
ssh $host "rm -rf /var/run/ambari-metrics-monitor"
ssh $host "rm -rf /var/run/ambari-server"
ssh $host "rm -rf /var/run/hadoop-mapreduce"
ssh $host "rm -rf /var/run/hadoop-yarn"
ssh $host "rm -rf /var/run/spark"
ssh $host "rm -rf /var/log/hadoop"
ssh $host "rm -rf /var/log/hbase"
ssh $host "rm -rf /var/log/hive"
ssh $host "rm -rf /var/log/nagios"
ssh $host "rm -rf /var/log/oozie"
ssh $host "rm -rf /var/log/zookeeper"
ssh $host "rm -rf /var/log/hadoop-mapreduce"
ssh $host "rm -rf /var/log/hadoop-yarn"
ssh $host "rm -rf /var/log/spark"
ssh $host "rm -rf /var/nagios"
ssh $host "rm -rf /usr/lib/hadoop"
ssh $host "rm -rf /usr/lib/hbase"
ssh $host "rm -rf /usr/lib/hcatalog"
ssh $host "rm -rf /usr/lib/hive"
ssh $host "rm -rf /usr/lib/oozie"
ssh $host "rm -rf /usr/lib/sqoop"
ssh $host "rm -rf /usr/lib/zookeeper"
ssh $host "rm -rf /var/lib/hive"
ssh $host "rm -rf /var/lib/ganglia"
ssh $host "rm -rf /var/lib/oozie"
ssh $host "rm -rf /var/lib/zookeeper"
ssh $host "rm -rf /var/lib/hadoop-hdfs"
ssh $host "rm -rf /var/lib/hadoop-mapreduce"
ssh $host "rm -rf /var/lib/hadoop-yarn"
ssh $host "rm -rf /var/tmp/oozie"
ssh $host "rm -rf /tmp/hive"
ssh $host "rm -rf /tmp/nagios"
ssh $host "rm -rf /tmp/ambari-qa"
ssh $host "rm -rf /tmp/sqoop-ambari-qa"
ssh $host "rm -rf /hadoop/oozie"
ssh $host "rm -rf /hadoop/zookeeper"
ssh $host "rm -rf /hadoop/mapred"
ssh $host "rm -rf /hadoop/hdfs"
ssh $host "rm -rf /tmp/hadoop-hive"
ssh $host "rm -rf /tmp/hadoop-nagios"
ssh $host "rm -rf /tmp/hadoop-hcat"
ssh $host "rm -rf /tmp/hadoop-ambari-qa"
ssh $host "rm -rf /tmp/hsperfdata_hbase"
ssh $host "rm -rf /tmp/hsperfdata_hive"
ssh $host "rm -rf /tmp/hsperfdata_nagios"
ssh $host "rm -rf /tmp/hsperfdata_oozie"
ssh $host "rm -rf /tmp/hsperfdata_zookeeper"
ssh $host "rm -rf /tmp/hsperfdata_mapred"
ssh $host "rm -rf /tmp/hsperfdata_hdfs"
ssh $host "rm -rf /tmp/hsperfdata_hcat"
ssh $host "rm -rf /tmp/hsperfdata_ambari-qa"
ssh $host "rm -rf /tmp/hsperfdata_admin"
ssh $host "rm -rf /tmp/hsperfdata_spark"
#删除ambari相关包
ssh $host "yum remove -y ambari-*"
ssh $host "yum remove -y postgresql"
ssh $host "rm -rf /var/lib/ambari*"
ssh $host "rm -rf /var/log/ambari*"
ssh $host "rm -rf /etc/ambari*"
echo "$logPre======>$host is done! \n"
done
发表评论
-
Canal相关理解
2017-12-29 16:18 459转载:http://www.importnew.com/251 ... -
kettle部署
2017-12-26 16:04 7191.将jmbi sql先上生产环境, 参考附件jmbi.sql ... -
crontab定时运行MR不行,手动shell可以执行成功问题排查过程
2017-12-26 15:48 858设置了定时任务,但MR任务没有执行。 第一步:手动执行she ... -
Flume+kafka+Spark Steaming demo2
2017-11-22 13:15 458一,flume配置 # Name the components ... -
Flume+Kafka+Spark Steaming demo
2017-11-21 15:21 441一.准备flume配置 a1.sources = r1 a1. ... -
HBase表导出成HDFS
2017-10-19 19:40 897导出步骤:在old cluster上/opt/cloudera ... -
zepplin实战
2017-10-13 16:10 361一句话介绍Zeppelin 以笔记(Note)的形式展示的数据 ... -
Azkaban安装
2017-10-10 18:32 905一.下载 https://github.com/azkaban ... -
KYKIN安装
2017-09-30 17:35 121. Kylin的一些概念 No. 关键字 解释 1 Kyl ... -
KYKIN安装
2017-09-30 17:40 3591. Kylin的一些概念 No. 关键字 解释 1 Kyl ... -
Logstash安装部署配置
2017-04-28 10:24 1022为了实现各业务平台日志信息采集到大数据平台hdf ... -
HBASE API
2017-04-18 11:01 471package org.jumore.test; impor ... -
linux ssh 相互密码登录
2017-02-22 13:40 4141.修改集群各机器名称 vim /etc/sysconfig/ ... -
Kettle Linux 安装部署
2017-02-15 17:20 1352一.安装JDK环境:根据自己的linux系统选择相应的版本,比 ... -
hadoop环境搭建
2017-01-23 17:31 351192.168.23.231 server1 192.168. ... -
环境安装
2017-01-17 16:26 391物理机部署分配 3台物理机上部署 Zookeeper 3个,F ... -
Storm demo
2016-12-19 15:50 439public class SentenceSpout exte ... -
运行Hadoop jar 第三方jar包依赖
2016-08-22 13:47 1016将自己编写的MapReduce程序打包成jar后,在运行 ha ... -
windows10下运行MR错误
2016-07-05 13:45 1654当在windows下运行MR程序时,会报各种错误。现把这次碰到 ... -
HBase问题
2016-06-16 17:02 3051.java.net.UnknownHostException ...
相关推荐
Ambari完全卸载脚本,参数加host列表文件,卸载前请确认脚本是否符合本地真实情况
ambari集群安装后不成功的反安装过程,经过积累的操作记录
ambari_HDP 完全卸载脚本,包括所有 Ambari 和 HDP 组件及数据库文件,日志文件。工具包由 3 个脚本组成,解压之后不要丢失其中的任何一个文件。使用方法:解压后,进入 uninstall_hdp_ambri 目录,运行: sudo ./...
执行命令: sh cleanAmbariNew.sh hostfile 其中,hostfile文件内容(机器地址): 可以reboot重启下,防止启用组件端口会被占用 注意:脚本中删除Postgres数据库(重装会造成数据丢失)
安装ambari集群后,若果需要卸载的同学,可以参考下。未必能直接用,但是借鉴是可以的。毕竟环境不同,我这边的环境可以用,你那边的环境未必可行。
通过ambari安装hadoop及组件有时候会失败,卸载清除非常麻烦,通过此脚本可以快速实现用户删除,目录删除,组件卸载,非常好用。
Apache Ambari是一款开源的工具,专为简化Hadoop集群的部署、管理和监控而设计。Ambari由Apache软件基金会维护,是大数据生态系统中的重要组件,尤其在HDP(Hortonworks Data Platform)中占据核心地位。这个"apache...
局域网下离线安装Ambari,卸载旧版本数据库,安装新的Mysql,一些组件的验证,详细叙述过程
Apache Ambari 是一个用于管理和监控 Hadoop 生态系统服务的开源工具,它提供了一个用户友好的 Web 界面和 REST API,使得集群的部署、配置、管理和维护变得更加简单。在 HDP(Hortonworks Data Platform)环境中,...
该资源为Ambari2.7.5预编译安装包,资源已放到百度网盘,可以先下载《ambari预编译安装包网盘下载地址.txt》获取网盘地址进行下载,资源内容:ambari-2.7.5.0-centos7.tar.gz 如有其他ambari相关资源需求可私信我
Apache Ambari 操作指南 摘要: Apache Ambari 是一个基于Web的管理工具,用于监控和管理大规模的分布式系统,如Hadoop集群。Ambari从集群节点和服务收集了大量的信息,并把它们表现为容易使用的,集中化的接口:...
Ambari-Doris增强型参数配置是针对Apache Ambari集成Doris的一种高级设置方法,旨在优化Doris在Ambari环境中的性能和稳定性。Apache Ambari是一款用于Hadoop集群管理和监控的开源工具,而Doris是阿里巴巴开源的一款...
ambari2.7.5集成HDP3,本身不带impala、kudu 故集成cloudera的impala、kudu安装方式 ambari插件安装方式。 解压放到/var/lib/ambari-server/resources/stacks/HDP/3.1/services/下
Apache Ambari 是一个用于管理和监控 Hadoop 集群的开源工具,它提供了一个直观的 Web UI 和 REST API。Ambari 2.7.4 版本是该工具的一个稳定版本,支持多种 Hadoop 组件的安装、配置和管理。而 Hue 是一个开源的 ...
**大数据Ambari之flume集成编译好的源码包** Apache Ambari 是一个用于管理和监控Hadoop集群的开源工具,它提供了直观的Web界面和RESTful API,使得安装、配置、管理Hadoop生态系统变得更加简单。Flume是Apache的一...
Ambari 安装指南中文翻译版 Ambari 是一个基于 Apache 的开源集群管理平台,能够帮助用户快速部署、管理和监控 Hadoop 集群。下面是根据给定文件信息生成的相关知识点: Apache Ambari 概述 Apache Ambari 是一个...
通过ambari server api 添加节点 可以脱离ambari dashboard
本文档主要介绍了使用 Ambari 搭建大数据平台的安装手册,对于大数据的安装和配置进行了详细的介绍。 大数据平台的基本概念: * 大数据是指无法在一定时间内用传统处理方式处理的巨量数据,包括结构化、非结构化和...