假设我们打算在8 台主机上部署一个 TiDB 集群:
主机名IP部署服务数据盘挂载
host1 172.18.0.11 PD1
host2 172.18.0.12 PD2/data
host3 172.18.0.13 PD3/data
host4 172.18.0.14 TiKV1/data
host5 172.18.0.15 TiKV2/data
host6 172.18.0.16 TiKV3/data
host7 172.18.0.17 tidb
host8 172.18.0.18 TiKV4/data
host9 172.18.0.19 pushgateway
host10 172.18.0.20 prometheus
host11 172.18.0.21 grafana
新建子网
docker network create --subnet=172.18.0.0/16 shadownet
/data/tidbconf/pd.toml
[metric]
# prometheus client push interval, set "0s" to disable prometheus.
interval = "15s"
# prometheus pushgateway address, leaves it empty will disable prometheus.
address = "172.18.0.19:9091"
job="pd"
/data/tidbconf/tikv.toml
[metric]
# the Prometheus client push interval. Setting the value to 0s stops Prometheus client from pushing.
interval = "15s"
# the Prometheus pushgateway address. Leaving it empty stops Prometheus client from pushing.
address = "172.18.0.19:9091"
# the Prometheus client push job name. Note: A node id will automatically append, e.g., "tikv_1".
job = "tikv"
/data/tidbconf/tidb.toml
[status]
# If enable status report HTTP service.
report-status = true
# TiDB status port.
#status-port = 10080
# Prometheus pushgateway address, leaves it empty will disable prometheus push.
metrics-addr = "172.18.0.19:9091"
# Prometheus client push interval in second, set \"0\" to disable prometheus push.
metrics-interval = 15
job="tidb"
/data/prometheus/prometheus.yml
global:
scrape_interval: 15s
scrape_timeout: 10s
evaluation_interval: 15s
alerting:
alertmanagers:
- static_configs:
- targets: []
scheme: http
timeout: 10s
scrape_configs:
- job_name: prometheus
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /metrics
scheme: http
static_configs:
- targets:
- localhost:9090
- job_name: 'push-metrics'
static_configs:
- targets: ['172.18.0.19:9091']
docker run -d --name pd1 \
--network=shadownet --ip=172.18.0.11 \
--privileged \
-v /etc/localtime:/etc/localtime:ro \
-v /data:/data \
pingcap/pd:latest \
--name="pd1" \
--data-dir="/data/pd1" \
--client-urls="http://0.0.0.0:2379" \
--advertise-client-urls="http://172.18.0.11:2379" \
--peer-urls="http://0.0.0.0:2380" \
--advertise-peer-urls="http://172.18.0.11:2380" \
--initial-cluster="pd1=http://172.18.0.11:2380,pd2=http://172.18.0.12:2380,pd3=http://172.18.0.13:2380" \
--config="/data/tidbconf/pd.toml"
docker run -d --name pd2 \
--network=shadownet --ip=172.18.0.12 \
--privileged \
-v /etc/localtime:/etc/localtime:ro \
-v /data:/data \
pingcap/pd:latest \
--name="pd2" \
--data-dir="/data/pd2" \
--client-urls="http://0.0.0.0:2379" \
--advertise-client-urls="http://172.18.0.12:2379" \
--peer-urls="http://0.0.0.0:2380" \
--advertise-peer-urls="http://172.18.0.12:2380" \
--initial-cluster="pd1=http://172.18.0.11:2380,pd2=http://172.18.0.12:2380,pd3=http://172.18.0.13:2380" \
--config="/data/tidbconf/pd.toml"
docker run -d --name pd3 \
--network=shadownet --ip=172.18.0.13 \
--privileged \
-v /etc/localtime:/etc/localtime:ro \
-v /data:/data \
pingcap/pd:latest \
--name="pd3" \
--data-dir="/data/pd3" \
--client-urls="http://0.0.0.0:2379" \
--advertise-client-urls="http://172.18.0.13:2379" \
--peer-urls="http://0.0.0.0:2380" \
--advertise-peer-urls="http://172.18.0.13:2380" \
--initial-cluster="pd1=http://172.18.0.11:2380,pd2=http://172.18.0.12:2380,pd3=http://172.18.0.13:2380" \
--config="/data/tidbconf/pd.toml"
docker run -d --name tikv1 \
--network=shadownet --ip=172.18.0.14 \
--privileged \
--ulimit nofile=1000000:1000000 \
-v /etc/localtime:/etc/localtime:ro \
-v /data:/data \
pingcap/tikv:latest \
--addr="0.0.0.0:20160" \
--advertise-addr="172.18.0.14:20160" \
--data-dir="/data/tikv1" \
--pd="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \
--config="/data/tidbconf/tikv.toml"
docker run -d --name tikv2 \
--network=shadownet --ip=172.18.0.15 \
--privileged \
--ulimit nofile=1000000:1000000 \
-v /etc/localtime:/etc/localtime:ro \
-v /data:/data \
pingcap/tikv:latest \
--addr="0.0.0.0:20160" \
--advertise-addr="172.18.0.15:20160" \
--data-dir="/data/tikv2" \
--pd="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \
--config="/data/tidbconf/tikv.toml"
docker run -d --name tikv3 \
--network=shadownet --ip=172.18.0.16 \
--privileged \
--ulimit nofile=1000000:1000000 \
-v /etc/localtime:/etc/localtime:ro \
-v /data:/data \
pingcap/tikv:latest \
--addr="0.0.0.0:20160" \
--advertise-addr="172.18.0.16:20160" \
--data-dir="/data/tikv3" \
--pd="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \
--config="/data/tidbconf/tikv.toml"
docker run -d --name tikv4 \
--network=shadownet --ip=172.18.0.18 \
--privileged \
--ulimit nofile=1000000:1000000 \
-v /etc/localtime:/etc/localtime:ro \
-v /data:/data \
pingcap/tikv:latest \
--addr="0.0.0.0:20160" \
--advertise-addr="172.18.0.18:20160" \
--data-dir="/data/tikv4" \
--pd="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \
--config="/data/tidbconf/tikv.toml"
docker run -d --name tidb1 \
-p 4000:4000 \
-p 10080:10080 \
--network=shadownet --ip=172.18.0.17 \
--privileged \
-v /etc/localtime:/etc/localtime:ro \
-v /data:/data \
pingcap/tidb:latest \
--store=tikv \
--path="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \
--config="/data/tidbconf/tidb.toml"
使用 MySQL 标准客户端连接 TiDB 测试
登录 host1 并确保已安装 MySQL 命令行客户端,执行:
$ mysql -h 127.0.0.1 -P 4000 -u root -D test
mysql> show databases;
+--------------------+
| Database |
+--------------------+
| INFORMATION_SCHEMA |
| PERFORMANCE_SCHEMA |
| mysql |
| test |
+--------------------+
4 rows in set (0.00 sec)
SET PASSWORD FOR 'root'@'%' = PASSWORD('test')
内外测试机
#docker run -it --network=shadownet --ip=172.18.0.26 centos:6.8 /bin/bash
docker run --network=shadownet --ip=172.18.0.19 -d --name pushgateway prom/pushgateway
docker run --network=shadownet --ip=172.18.0.20 -d --privileged -v /data/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro \
-p 9090:9090 --name prometheus quay.io/prometheus/prometheus
docker run --network=shadownet --ip=172.18.0.21 -d -p 3000:3000 --name grafana grafana/grafana
点击 Grafana Logo -> 点击 Dashboards -> 点击 Import -> 选择需要的 Dashboard 配置文件上传 -> 选择对应的 data source
测试清理命令
echo pd1 |xargs docker stop|xargs docker rm
echo pd2 |xargs docker stop|xargs docker rm
echo pd3 |xargs docker stop|xargs docker rm
echo tikv1 |xargs docker stop|xargs docker rm
echo tikv2 |xargs docker stop|xargs docker rm
echo tikv3 |xargs docker stop|xargs docker rm
echo tikv4 |xargs docker stop|xargs docker rm
echo tidb1 |xargs docker stop|xargs docker rm
echo pushgateway |xargs docker stop|xargs docker rm
echo prometheus |xargs docker stop|xargs docker rm
echo grafana |xargs docker stop|xargs docker rm
参考
https://pingcap.com/docs-cn/op-guide/monitor/
https://github.com/pingcap/tidb/blob/master/config/config.toml.example
https://prometheus.io/docs/prometheus/latest/querying/basics/
相关推荐
示例:使用Docker搭建基于Nginx+Tomcat的分布式部署架构。
Docker搭建Redis主从+哨兵模式集群 本文档主要介绍了使用Docker搭建Redis主从复制和哨兵模式集群的步骤。下面是详细的知识点总结: 一、Docker安装 * 下载Docker二进制文件(离线安装包),使用wget命令下载 * ...
docker-compose快速搭建 Prometheus+Grafana监控系统
现在网上有很多关于监控平台,不过大部分开源平台不够开放,想要汉化和自定义监控平台随意添加仪表板的话还是首选prometheus+grafana,汉化在本资源主要介绍的前端vue的汉化,prometheus主要用于采集微服务数据或者...
"基于Docker搭建Hadoop集群" 在本文中,我们将介绍如何基于Docker搭建Hadoop集群。Hadoop是大数据处理的常用工具,而Docker则是当前最流行的容器化技术。通过将Hadoop部署到Docker容器中,我们可以更方便地管理和...
Docker+Jenkins+GitLab+Maven+SpringBoot&SpringCloud;自动化构建
基于docker进行Grafana + prometheus实现服务监听(node-exporter 文件)
docker搭建zookeeper+solr集群.md
ubuntu 18 docker 搭建Prometheus+Grafana.rar
Docker+SSM+noVNC资料Docker+SSM+noVNC资料Docker+SSM+noVNC资料Docker+SSM+noVNC资料Docker+SSM+noVNC资料Docker+SSM+noVNC资料Docker+SSM+noVNC资料Docker+SSM+noVNC资料Docker+SSM+noVNC资料Docker+SSM+noVNC资料...
在容器化环境中,如Docker,Prometheus可以提供强大的监控能力,帮助运维人员实时了解容器的运行状态、资源利用率以及服务性能。 在监控Docker容器时,Prometheus的主要优势包括其灵活性、强大的查询语言PromQL以及...
基于docker-compose构建filebeat + Logstash +Elasticsearch+ kibana日志系统 对nginx日志进行正则切割字段。 https://www.jianshu.com/p/f7927591d530
1.Docker搭建RabbitMQ集群
docker+jenkins+tomcat+mysql+redis+nginx,实现jenkins自动构建部署。Java+maven是单独搭建的。
Docker搭建分布式集群nebula操作手册,亲自操作整理,无坑。
基于centos7.9系统使用docker安装部署Prometheus+Grafana方式实现对Li_docker-Prometheus-Grafana
docker容器中搭建kafka集群环境,kafka集群配置注意事项与优化
Docker 本身就是基于 Linux 的,所以首先以我的一台服务器做实验。虽然最后跑 wordcount 已经由于内存不足而崩掉,但是之前的过程还是可以参考的。 连接服务器 使用 ssh 命令连接远程服务器。 ssh root@[Your IP ...
work文档