`

docker搭建tidb集群+Prometheus监控

 
阅读更多

假设我们打算在8 台主机上部署一个 TiDB 集群:

 



 

 

 

主机名IP部署服务数据盘挂载

host1 172.18.0.11 PD1 

host2 172.18.0.12 PD2/data

host3 172.18.0.13 PD3/data

host4 172.18.0.14 TiKV1/data

host5 172.18.0.15 TiKV2/data

host6 172.18.0.16 TiKV3/data

host7 172.18.0.17 tidb

host8 172.18.0.18 TiKV4/data

host9 172.18.0.19 pushgateway

host10 172.18.0.20 prometheus

host11 172.18.0.21 grafana

 

 新建子网

docker network create --subnet=172.18.0.0/16 shadownet

 

/data/tidbconf/pd.toml

[metric]

# prometheus client push interval, set "0s" to disable prometheus.

interval = "15s"

# prometheus pushgateway address, leaves it empty will disable prometheus.

address = "172.18.0.19:9091"

job="pd"

 

/data/tidbconf/tikv.toml

[metric]

# the Prometheus client push interval. Setting the value to 0s stops Prometheus client from pushing.

interval = "15s"

# the Prometheus pushgateway address. Leaving it empty stops Prometheus client from pushing.

address = "172.18.0.19:9091"

# the Prometheus client push job name. Note: A node id will automatically append, e.g., "tikv_1".

job = "tikv"

 

 

/data/tidbconf/tidb.toml

[status]

# If enable status report HTTP service.

report-status = true

 

# TiDB status port.

#status-port = 10080

 

# Prometheus pushgateway address, leaves it empty will disable prometheus push.

metrics-addr = "172.18.0.19:9091"

 

# Prometheus client push interval in second, set \"0\" to disable prometheus push.

metrics-interval = 15

 

job="tidb"

 

 

 

/data/prometheus/prometheus.yml

global:

  scrape_interval: 15s

  scrape_timeout: 10s

  evaluation_interval: 15s

alerting:

  alertmanagers:

  - static_configs:

    - targets: []

    scheme: http

    timeout: 10s

scrape_configs:

- job_name: prometheus

  scrape_interval: 15s

  scrape_timeout: 10s

  metrics_path: /metrics

  scheme: http

  static_configs:

  - targets:

    - localhost:9090

- job_name: 'push-metrics'

  static_configs:

  - targets: ['172.18.0.19:9091']

 

 

 

docker run -d --name pd1 \

  --network=shadownet --ip=172.18.0.11 \

  --privileged \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/pd:latest \

  --name="pd1" \

  --data-dir="/data/pd1" \

  --client-urls="http://0.0.0.0:2379" \

  --advertise-client-urls="http://172.18.0.11:2379" \

  --peer-urls="http://0.0.0.0:2380" \

  --advertise-peer-urls="http://172.18.0.11:2380" \

  --initial-cluster="pd1=http://172.18.0.11:2380,pd2=http://172.18.0.12:2380,pd3=http://172.18.0.13:2380" \

  --config="/data/tidbconf/pd.toml" 

 

 

docker run -d --name pd2 \

  --network=shadownet --ip=172.18.0.12 \

  --privileged \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/pd:latest \

  --name="pd2" \

  --data-dir="/data/pd2" \

  --client-urls="http://0.0.0.0:2379" \

  --advertise-client-urls="http://172.18.0.12:2379" \

  --peer-urls="http://0.0.0.0:2380" \

  --advertise-peer-urls="http://172.18.0.12:2380" \

  --initial-cluster="pd1=http://172.18.0.11:2380,pd2=http://172.18.0.12:2380,pd3=http://172.18.0.13:2380" \

  --config="/data/tidbconf/pd.toml"

 

 

docker run -d --name pd3 \

  --network=shadownet --ip=172.18.0.13 \

  --privileged \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/pd:latest \

  --name="pd3" \

  --data-dir="/data/pd3" \

  --client-urls="http://0.0.0.0:2379" \

  --advertise-client-urls="http://172.18.0.13:2379" \

  --peer-urls="http://0.0.0.0:2380" \

  --advertise-peer-urls="http://172.18.0.13:2380" \

  --initial-cluster="pd1=http://172.18.0.11:2380,pd2=http://172.18.0.12:2380,pd3=http://172.18.0.13:2380" \

  --config="/data/tidbconf/pd.toml"

  

 

 

docker run -d --name tikv1 \

  --network=shadownet --ip=172.18.0.14 \

  --privileged \

  --ulimit nofile=1000000:1000000 \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/tikv:latest \

  --addr="0.0.0.0:20160" \

  --advertise-addr="172.18.0.14:20160" \

  --data-dir="/data/tikv1" \

  --pd="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \

  --config="/data/tidbconf/tikv.toml" 

  

  

 

 

docker run -d --name tikv2 \

  --network=shadownet --ip=172.18.0.15 \

  --privileged \

  --ulimit nofile=1000000:1000000 \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/tikv:latest \

  --addr="0.0.0.0:20160" \

  --advertise-addr="172.18.0.15:20160" \

  --data-dir="/data/tikv2" \

  --pd="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \

  --config="/data/tidbconf/tikv.toml" 

 

 

docker run -d --name tikv3 \

  --network=shadownet --ip=172.18.0.16 \

  --privileged \

  --ulimit nofile=1000000:1000000 \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/tikv:latest \

  --addr="0.0.0.0:20160" \

  --advertise-addr="172.18.0.16:20160" \

  --data-dir="/data/tikv3" \

  --pd="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \

  --config="/data/tidbconf/tikv.toml" 

  

 

docker run -d --name tikv4 \

  --network=shadownet --ip=172.18.0.18 \

  --privileged \

  --ulimit nofile=1000000:1000000 \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/tikv:latest \

  --addr="0.0.0.0:20160" \

  --advertise-addr="172.18.0.18:20160" \

  --data-dir="/data/tikv4" \

  --pd="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \

  --config="/data/tidbconf/tikv.toml"

 

 

  docker run -d --name tidb1 \

  -p 4000:4000 \

  -p 10080:10080 \

  --network=shadownet --ip=172.18.0.17 \

  --privileged \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/tidb:latest \

  --store=tikv \

  --path="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \

  --config="/data/tidbconf/tidb.toml"

 

 

使用 MySQL 标准客户端连接 TiDB 测试

 

登录 host1 并确保已安装 MySQL 命令行客户端,执行:

 

$ mysql -h 127.0.0.1 -P 4000 -u root -D test

mysql> show databases;

+--------------------+

| Database           |

+--------------------+

| INFORMATION_SCHEMA |

| PERFORMANCE_SCHEMA |

| mysql              |

| test               |

+--------------------+

4 rows in set (0.00 sec)

 

SET PASSWORD FOR 'root'@'%' = PASSWORD('test')

 

 内外测试机

#docker run  -it --network=shadownet --ip=172.18.0.26 centos:6.8 /bin/bash

 

docker run --network=shadownet --ip=172.18.0.19 -d --name pushgateway  prom/pushgateway  

docker run --network=shadownet --ip=172.18.0.20 -d --privileged -v /data/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro \

 -p 9090:9090 --name prometheus   quay.io/prometheus/prometheus  

docker run --network=shadownet --ip=172.18.0.21 -d -p 3000:3000 --name grafana grafana/grafana

 

 

点击 Grafana Logo -> 点击 Dashboards -> 点击 Import -> 选择需要的 Dashboard 配置文件上传 -> 选择对应的 data source

 

 

 

测试清理命令

echo pd1 |xargs docker stop|xargs docker rm

echo pd2 |xargs docker stop|xargs docker rm

echo pd3 |xargs docker stop|xargs docker rm

echo tikv1 |xargs docker stop|xargs docker rm

echo tikv2 |xargs docker stop|xargs docker rm

echo tikv3 |xargs docker stop|xargs docker rm

echo tikv4 |xargs docker stop|xargs docker rm

echo tidb1 |xargs docker stop|xargs docker rm

 

echo pushgateway |xargs docker stop|xargs docker rm

echo prometheus |xargs docker stop|xargs docker rm

echo grafana |xargs docker stop|xargs docker rm

 

 

参考

https://pingcap.com/docs-cn/op-guide/monitor/

https://github.com/pingcap/tidb/blob/master/config/config.toml.example

https://prometheus.io/docs/prometheus/latest/querying/basics/

  • 大小: 24.9 KB
  • 大小: 38.1 KB
分享到:
评论

相关推荐

    大数据集群在+Kubernetes+上服务化实践-SACC2021年中国系统架构师大会.pdf

    结合文档内容,容器化技术(尤其是Kubernetes)与大数据技术(如TiDB、Prometheus等)的结合是大数据集群服务化的关键。容器化为大数据技术提供了更为灵活的运行环境和更高效的资源利用,同时也带来了更细粒度的管理...

    1.7 Automate App Operation.pdf

    比如数据库(PostgreSQL、MySQL、TiDB)、协调服务(etcd、ZooKeeper)、流处理(Kafka、Heron)、大数据处理(Spark、Hadoop)、存储系统(Ceph、GlusterFS)以及日志分析(ElasticSearch)和监控(Prometheus)等...

    谷粒商城分布式doc文档.zip

    例如,使用MySQL集群或者分布式数据库如TiDB、Cassandra等,保证数据的高可用性和一致性。 5. 消息队列 为了解耦服务间的依赖,谷粒商城可能使用了RabbitMQ、Kafka或RocketMQ等消息队列。消息队列可以异步处理业务...

    大型网站技术架构核心原理与案例分析

    Prometheus、Grafana用于性能指标的实时监控,ELK(Elasticsearch、Logstash、Kibana)堆栈处理日志收集和分析。 以上只是《大型网站技术架构核心原理与案例分析》中部分核心知识点的概述。这本书详细讨论了这些...

    大型网站架构

    通过工具如Prometheus、ELK Stack(Elasticsearch、Logstash、Kibana)进行性能监控和日志分析,及时发现并解决问题。 12. 持续集成/持续部署(CI/CD): Jenkins、GitLab CI/CD等工具实现自动化构建、测试和部署,...

    微服务架构及弱网优化方案.pptx

    - **Prometheus/Grafana**:用于监控系统性能和资源使用情况,及时发现问题。 - **Elasticsearch/Filebeat/Kibana**:收集和分析日志数据,帮助快速定位问题。 - **Zipkin/Jaeger**:进行分布式追踪,了解请求在不同...

Global site tag (gtag.js) - Google Analytics