一,环境说明
操作系统:Red Hat Enterprise Linux Server release 7.0
all in one 安装
二,yum源
wget https://rdo.fedorapeople.org/rdo-release.rpm
rpm -ivh rdo-release.rpm
wget http://ftp.sjtu.edu.cn/fedora/epel/7/x86_64/e/epel-release-7-5.noarch.rpm
rpm -ivh epel-release-7-2.noarch.rpm
wget -O /etc/yum.repos.d/epel-erlang.repo http://repos.fedorapeople.org/repos/peter/erlang/epel-erlang.repo
vi /etc/yum.repos.d/epel-erlang.repo
[epel-erlang]
baseurl=https://repos.fedorapeople.org/repos/peter/erlang/epel-5/x86_64/
[epel-erlang-source]
baseurl=https://repos.fedorapeople.org/repos/peter/erlang/epel-5/SRPMS/
yum clean all;
yum makecache;
yum update;
三,准备
#默认安装完后会把rpm包删除,keepcache改为1,不会删除,方便以后确认版本
vi /etc/yum.conf
keepcache=1
vi /etc/hosts
127.0.0.1 rhel7
四,安装kvm libvirt,配置网络环境,graphics采用spice
yum install openssh-clients
yum install qemu-kvm
yum install libvirt
yum install tunctl
yum install spice-vdagent
#检查kvm是否安装成功
lsmod | grep kvm
#修改qemu配置
vi /etc/libvirt/qemu.conf
vnc_allow_host_audio = 1
cgroup_controllers = [ "cpu", "cpuacct", "devices", "memory" ]
clear_emulator_capabilities=0
user = "root"
group = "root"
cgroup_device_acl = [
"/dev/null", "/dev/full", "/dev/zero",
"/dev/random", "/dev/urandom",
"/dev/ptmx", "/dev/kvm", "/dev/kqemu",
"/dev/rtc", "/dev/hpet","/dev/net/tun",
]
vi /etc/selinux/config
change the line "SELINUX=enforcing" to SELINUX=permissive
#执行命令
setenforce permissive
systemctl enable libvirtd.service;
systemctl restart libvirtd.service;
#redhat7 默认没有ifconfig
yum install net-tools
#创建虚拟网卡
vi /root/tap.sh
tunctl -u root
brctl addif br0 tap0
ifconfig tap0 promisc up
chmod 777 /root/tap.sh
echo '/root/tap.sh' >> /etc/rc.local
#改网卡
cd /etc/sysconfig/network-scripts/
cp ifcfg-eth0 ifcfg-br0
vi /etc/sysconfig/network-scripts/ifcfg-eth0
BOOTPROTO=manual
BRIDGE=br0
DEVICE=eth0
vi /etc/sysconfig/network-scripts/ifcfg-br0
NAME=br0
TYPE=Bridge
DEVICE=br0
#开放所有端口
iptables -I INPUT -p tcp -m multiport --dports 1:65535 -j ACCEPT
#重启
reboot
五,安装ntp、rabbitmq服务
yum install ntp
vi /etc/ntp.conf
server rhel7 iburst
systemctl enable ntpd.service
systemctl restart ntpd.service
yum install yum-plugin-priorities
yum install openstack-selinux
#rabbitmq依赖
yum install erlang
yum install rabbitmq-server
systemctl enable rabbitmq-server.service
systemctl restart rabbitmq-server.service
#修改时可能报错,先systemctl stop rabbitmq-server.service 再 systemctl start rabbitmq-server.service 试试
rabbitmqctl change_password guest 123456
六,安装mysql
yum install mariadb mariadb-server MySQL-python
vi /etc/my.cnf
[mysqld]
key_buffer_size = 16M
bind-address = 0.0.0.0
default-storage-engine = innodb
collation-server = utf8_general_ci
init-connect = 'SET NAMES utf8'
character-set-server = utf8
systemctl enable mariadb.service;
systemctl start mariadb.service;
mysql_secure_installation
#设置允许远程访问
mysql -u root -p
GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY '123456' WITH GRANT OPTION;
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '123456' WITH GRANT OPTION;
FLUSH PRIVILEGES;
七,安装 Identity service(keystone)
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '123456';
openssl rand -hex 123456
#openssl rand -hex 10
# ee0341733ea29917c41c
yum install openstack-keystone python-keystoneclient
vi /etc/keystone/keystone.conf
[DEFAULT]
admin_token = admin
verbose = True
log_dir = /var/log/keystone
[database]
connection = mysql://keystone:123456@rhel7/keystone
[token]
provider = keystone.token.providers.uuid.Provider
driver = keystone.token.persistence.backends.sql.Token
keystone-manage pki_setup --keystone-user keystone --keystone-group keystone;
chown -R keystone:keystone /var/log/keystone;
chown -R keystone:keystone /etc/keystone/ssl;
chmod -R o-rwx /etc/keystone/ssl;
su -s /bin/sh -c "keystone-manage db_sync" keystone
systemctl enable openstack-keystone.service
systemctl restart openstack-keystone.service
export OS_SERVICE_TOKEN=admin;
export OS_SERVICE_ENDPOINT=http://rhel7:35357/v2.0;
keystone tenant-create --name admin;
keystone user-create --name admin --pass 123456;
keystone role-create --name admin;
keystone user-role-add --tenant admin --user admin --role admin;
keystone role-create --name _member_;
keystone user-role-add --tenant admin --user admin --role _member_;
keystone tenant-create --name demo --description "Demo Tenant";
keystone user-create --name demo --pass 123456;
keystone user-role-add --tenant demo --user demo --role _member_;
keystone tenant-create --name service --description "Service Tenant";
keystone service-create --name keystone --type identity --description "OpenStack Identity";
keystone endpoint-create --service-id $(keystone service-list | awk '/ identity / {print $2}') --publicurl http://rhel7:5000/v2.0 --internalurl http://rhel7:5000/v2.0 --adminurl http://rhel7:35357/v2.0 --region regionOne
unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT;
export OS_USERNAME=admin;
export OS_PASSWORD=123456;
export OS_TENANT_NAME=admin;
export OS_AUTH_URL=http://rhel7:35357/v2.0;
#验证
keystone user-list
八,安装Image service(glance)
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '123456';
source admin-openrc.sh
keystone user-create --name glance --pass 123456;
keystone user-role-add --user glance --tenant service --role admin;
keystone service-create --name glance --type image --description "OpenStack Image Service";
keystone endpoint-create --service-id $(keystone service-list | awk '/ image / {print $2}') --publicurl http://rhel7:9292 --internalurl http://rhel7:9292 --adminurl http://rhel7:9292 --region regionOne;
yum install openstack-glance python-glanceclient
vi /etc/glance/glance-api.conf
[database]
connection = mysql://glance:123456@rhel7/glance
[keystone_authtoken]
auth_uri = http://rhel7:5000/v2.0
identity_uri = http://rhel7:35357
admin_tenant_name = service
admin_user = glance
admin_password = 123456
[paste_deploy]
flavor = keystone
[glance_store]
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
[DEFAULT]
verbose = True
vi /etc/glance/glance-registry.conf
[database]
connection = mysql://glance:123456@rhel7/glance
[keystone_authtoken]
auth_uri = http://rhel7:5000/v2.0
identity_uri = http://rhel7:35357
admin_tenant_name = service
admin_user = glance
admin_password = 123456
[paste_deploy]
flavor = keystone
[DEFAULT]
verbose = True
su -s /bin/sh -c "glance-manage db_sync" glance
systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl restart openstack-glance-api.service openstack-glance-registry.service
#验证
glance image-list
glance image-create --name "CentOS64" --file /data/CentOS64.qcow2 --disk-format qcow2 --container-format bare --is-public True --progress
九,安装Compute service
CREATE DATABASE nova;
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '123456';
keystone user-create --name nova --pass 123456;
keystone user-role-add --user nova --tenant service --role admin;
keystone service-create --name nova --type compute --description "OpenStack Compute";
keystone endpoint-create --service-id $(keystone service-list | awk '/ compute / {print $2}') --publicurl http://rhel7:8774/v2/%\(tenant_id\)s --internalurl http://rhel7:8774/v2/%\(tenant_id\)s --adminurl http://rhel7:8774/v2/%\(tenant_id\)s --region regionOne
yum install openstack-nova-api openstack-nova-cert openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler python-novaclient
vi /etc/nova/nova.conf
#注意[database] 放在[conductor]下面,不能放在开头或者结尾
[database]
connection = mysql://nova:123456@rhel7/nova
[DEFAULT]
rpc_backend = rabbit
rabbit_host = rhel7
rabbit_password = 123456
auth_strategy = keystone
my_ip = 172.26.22.109
vncserver_listen = 172.26.22.109
vncserver_proxyclient_address = 172.26.22.109
verbose = True
[keystone_authtoken]
auth_uri = http://rhel7:5000/v2.0
identity_uri = http://rhel7:35357
admin_tenant_name = service
admin_user = nova
admin_password = 123456
[glance]
host = rhel7
su -s /bin/sh -c "nova-manage db sync" nova
systemctl enable openstack-nova-api.service openstack-nova-cert.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service openstack-nova-cert.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
#验证
nova list
十,安装计算节点compute node
yum install openstack-nova-compute sysfsutils
vi /etc/nova/nova.conf
[DEFAULT]
vnc_enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 172.26.22.109
novncproxy_base_url = http://rhel7:6080/vnc_auto.html
egrep -c '(vmx|svm)' /proc/cpuinfo
若结果为0
vi /etc/nova/nova.conf
virt_type = qemu
若结果大于0
vi /etc/nova/nova.conf
virt_type = kvm
systemctl enable libvirtd.service openstack-nova-compute.service;
systemctl restart libvirtd.service;
systemctl restart openstack-nova-compute.service;
Verify operation
nova service-list
十一,安装network server (neutron)
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '123456';
keystone user-create --name neutron --pass 123456;
keystone user-role-add --user neutron --tenant service --role admin;
keystone service-create --name neutron --type network --description "OpenStack Networking";
keystone endpoint-create --service-id $(keystone service-list | awk '/ network / {print $2}') --publicurl http://rhel7:9696 --adminurl http://rhel7:9696 --internalurl http://rhel7:9696 --region regionOne
yum install openstack-neutron openstack-neutron-ml2 python-neutronclient which
keystone tenant-get service
vi /etc/neutron/neutron.conf
[database]
connection = mysql://neutron:123456@rhel7/neutron
[DEFAULT]
auth_strategy = keystone
rpc_backend=neutron.openstack.common.rpc.impl_kombu
rabbit_host=172.26.22.109
rabbit_password=123456
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://172.26.22.109:8774/v2
nova_admin_username = nova
nova_admin_tenant_id = adf8c51b227b47548551dd00c89a743a
nova_admin_password = 123456
nova_admin_auth_url = http://172.26.22.109:35357/v2.0
nova_region_name = regionOne
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
[keystone_authtoken]
auth_uri=http://172.26.22.109:5000
auth_host=172.26.22.109
auth_port=35357
auth_protocol=http
admin_tenant_name=service
admin_user=nova
admin_password=123456
vi /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = gre
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True
vi /etc/nova/nova.conf
#network serice
network_api_class = nova.network.neutronv2.api.API
neutron_url = http://172.26.22.109:9696
neutron_auth_strategy = keystone
neutron_admin_tenant_name = service
neutron_admin_username = neutron
neutron_admin_password = 123456
neutron_admin_auth_url = http://172.26.22.109:35357/v2.0
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
security_group_api = neutron
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade juno" neutron
systemctl restart openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service
systemctl enable neutron-server.service;
systemctl start neutron-server.service;
Verify operation
neutron ext-list
十二,安装network node
vi /etc/sysctl.conf
net.ipv4.ip_forward=1
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
sysctl -p
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch
---注释掉/etc/neutron/neutron.conf中 [service_providers]所有有效的部分
vi /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
use_namespaces = True
vi /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
use_namespaces = True
vi /etc/neutron/metadata_agent.ini
[DEFAULT]
auth_url = http://172.26.22.109:5000/v2.0
auth_region = regionOne
admin_tenant_name = service
admin_user = neutron
admin_password = 123456
nova_metadata_ip = 172.26.22.109
metadata_proxy_shared_secret = 123456
vi /etc/nova/nova.conf
[DEFAULT]
#metadata
service_neutron_metadata_proxy = true
neutron_metadata_proxy_shared_secret = 123456
vi /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = gre
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[ovs]
local_ip = 172.26.22.109
tunnel_type = gre
enable_tunneling = True
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True
systemctl restart openstack-nova-api.service
systemctl enable openvswitch.service;
systemctl start openvswitch.service;
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
cp /usr/lib/systemd/system/neutron-openvswitch-agent.service /usr/lib/systemd/system/neutron-openvswitch-agent.service.orig
sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
systemctl enable neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-ovs-cleanup.service
systemctl start neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
Verify operation
neutron agent-list
十三,安装dashboard
yum install openstack-dashboard httpd mod_wsgi memcached python-memcached
vi /etc/openstack-dashboard/local_settings
#OPENSTACK_HOST = "controller"
ALLOWED_HOSTS = ['*']
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.
MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
setsebool -P httpd_can_network_connect on
chown -R apache:apache /usr/share/openstack-dashboard/static
systemctl enable httpd.service memcached.service;
systemctl start httpd.service memcached.service;
Verify operation
http://172.26.83.109/dashboard
十四,测试安装结果(Launch an instance)
#创建镜像
glance image-create --name "vm1" --file /home/linux-microcore-3.8.2.qcow2 --disk-format qcow2 --container-format bare --is-public True --progress
#添加网桥bt-int,br-ex
ovs-vsctl add-br br-int
ovs-vsctl add-br br-ex
#eth2 外网ip网关地址
ovs-vsctl add-port br-ex eth1
#重启网络服务使配置生效
systemctl restart neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service;
#创建外网
neutron net-create ext-net --shared --router:external=True
#neutron subnet-create ext-net --name ext-subnet --allocation-pool start=192.168.100.101,end=192.168.100.200 --disable-dhcp --gateway 192.168.100.1 192.168.100.0/24
#neutron subnet-create ext-net --name ext-subnet --allocation-pool start=182.26.18.2,end=182.26.18.253 --disable-dhcp --gateway 182.26.18.1 182.26.18.0/24
#创建内网
neutron net-create int-net
neutron subnet-create int-net --name int-subnet --dns-nameserver 202.99.96.68 --gateway 192.168.1.254 192.168.1.0/24
#创建路由,并且连接到外部网络
neutron router-create router1
neutron router-interface-add router1 int-subnet
#neutron router-gateway-set router1 ext-net
#创建虚拟机
neutron net-list | awk '/ int-net / { print $2 }'
nova flavor-list
nova image-list
nova boot --flavor m1.small --image CentOS64 --nic net-id=486c5a56-4079-4c56-b225-fdc4a412895a --security-group default instance1
nova boot --flavor 17c03b75-69d8-4ca0-ac4c-c7e37c8c5297 --image CentOS64 --nic net-id=46e1aeec-e084-49d8-b8e9-bec3293d7475 --security-group default instance1
#查看虚拟机
nova list
十五,spice支持
yum install openstack-nova-spicehtml5proxy
rpm -ivh spice-html5-0.1.5-1.el6.noarch.rpm
vi /etc/nova/nova.conf
[DEFAULT]
vnc_enabled = False
[spice]
agent_enabled = True
enabled = False
html5proxy_base_url = http://172.26.83.109:6082/spice_auto.html
keymap = en-us
server_listen = 0.0.0.0
server_proxyclient_address = 172.26.22.109
systemctl enable openstack-nova-spicehtml5proxy.service;
systemctl restart openstack-nova-spicehtml5proxy.service;
systemctl restart httpd.service memcached.service
systemctl stop openstack-nova-novncproxy.service;
systemctl restart openstack-nova-api.service openstack-nova-cert.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-compute.service;
yum install -y spice-server spice-protocol
systemctl disable openstack-nova-novncproxy.service;
systemctl stop openstack-nova-novncproxy.service;
相关推荐
OpenStack-Juno- Installation Guide for CentOS7.0
官网发布的最新版openstack-juno 配置文档
OpenStack-train版安装手册,包含环境介绍、环境安装、安装Spice控制台、实例热迁移的libvirt、实例调整实例大小、负载均衡即服、务网络QoS等
openstack-utils-2017.11+git.1480685772.571a0f8-1.1.noarch.rpm,方便完成OpenStack配置文件的快速修改,命令格式为:openstack-config --set 。
centos7下centos-release-openstack-pike
openstack-install-guide-yum-kilo
本文档旨在指导用户如何在Ubuntu 14.04上安装并配置OpenStack,版本为Juno。OpenStack是一款开源云操作系统,它由多个独立但又相互关联的服务组成。这些服务包括计算(Compute)、身份认证服务(Identity Service)...
根据提供的文档信息,本文将详细解析《OpenStack Juno管理节点HA安装手册v2.0》中的关键知识点,包括但不限于部署目标、系统要求、安装准备、Pacemaker与Corosync的安装配置等内容。 ### 部署目标 文档中明确指出...
OpenStack-train版操作手册,门户管理、角色管理、角色资源、资源概览、租户隔离、VPC与虚拟网络创建、网络创建、创建虚拟路由器、多网卡、DHCP分配实例IP、创建云主机、热迁移云主机、云主机备份、控制台灯
centos-release-openstack-train-1-1.el7.centos.noarch.rpm
官方离线安装包,测试可用。使用rpm -ivh [rpm完整包名] 进行安装
openstack-q版本镜像下载地址
官方离线安装包,测试可用。使用rpm -ivh [rpm完整包名] 进行安装
总之,"openstack-rpm-64"是一个针对64位Linux系统的OpenStack部署工具,通过RPM包简化了OpenStack在这些系统上的安装和管理过程。正确配置和安装这些RPM包,可以让你快速搭建起一个功能齐全的OpenStack云环境。
sudo yum install -y openstack-nova openstack-neutron openstack-keystone openstack-glance openstack-cinder openstack-swift ``` 安装完毕后,需要配置各个服务以适应你的环境。这涉及到编辑一系列配置文件,...
centos7下安装centos-release-openstack-ocata的yum源设置
### OpenStack-Dashboard安装与配置详解 #### 一、OpenStack-Dashboard简介 OpenStack-Dashboard,也称为Horizon,是OpenStack项目中的一个组件,它提供了一个基于Web的用户界面来管理OpenStack云资源。通过这个...
openstack-M资源包 |--cirros-0.3.4-x86_64-disk.img |--local_settings |--openstack_compute_install.sh |--openstack_rpm.tar.gz |--openstack-manuals_html.tar.gz |--openstack-mitaka-autoinstall.sh |--user_...
官方离线安装包,测试可用。使用rpm -ivh [rpm完整包名] 进行安装
官方离线安装包,亲测可用