controller 节点 跑glance nova (MQ)messaging apache keystone neutron mysql memcache chrony empd
epmd 介绍如下
参考 http://blog.csdn.net/mituan1234567/article/details/52767290
在《Erlang/OTP 并发编程实战》中,对 epmd 有如下描述:
-
- epmd 代表 Erlang 端口映射守护进程(Erlang Port Mapper Daemon)。
- 每启动一个节点,都会检查本地机器上是否运行着 epmd ,如果没有,节点就会自行启动 epmd 。
- epmd 会追踪在本地机器上运行的每个节点,并记录分配给它们的端口。
- 当一台机器上的 Erlang 节点试图与某远程节点通信时,本地的 epmd 就会联络远程机器上的 epmd(默认使用 TCP/IP 端口 4369),询问在远程机器上有没有叫相应名字的节点。如果有,远程的 epmd 就会回复一个端口号,通过该端口便可直接与远程节点通信
有两块网卡,一块能上网并且要有IP,一个不能给IP。具体配置如下
将第一个接口配置为管理网络接口: 可以上网
# 修改以下内容
ONBOOT=Yes
BOOTPROTO=Static
# 新增以下内容
IPADDR=192.168.0.20
NETMASK=255.255.255.0
GATEWAY=192.168.0.1
配置第二块网卡作为 provider 网络:
不要改变 键``HWADDR`` 和 UUID 。
文件包含以下内容
DEVICE=INTERFACE_NAME
TYPE=Ethernet
ONBOOT=yes
BOOTPROTO="none"
rpm -q git
yum -y install git
git clone https://github.com/BillWang139967/openstack_install.git
vim openstack_install/etc/main-config.rc
#!/bin/bash # # Unattended installer for Openstack # # Sample Config # version 1.0.1 # # # Use Private Repository # USE_PRIVATE_REPOS="no" DEFAULT_PASS="itnihao" # The hostname or IP address of a suitable more accurate (lower stratum) NTP server. # The configuration supports multiple server keys. # (Controller side) NTP_SERVERS="0.centos.pool.ntp.org 1.centos.pool.ntp.org 2.centos.pool.ntp.org 3.centos.pool.ntp.org" RABBIT_USER="openstack" RABBIT_PASS="$DEFAULT_PASS" MYSQL_PASS="$DEFAULT_PASS" ADMIN_PASS="$DEFAULT_PASS" DEMO_PASS="$DEFAULT_PASS" SERVICE_PASSWORD="$DEFAULT_PASS" METADATA_SECRET="$DEFAULT_PASS" # SERVICE_TENANT_NAME="service" # ADMIN_TENANT_NAME="admin" # DEMO_TENANT_NAME="demo" # INVIS_TENANT_NAME="invisible_to_admin" # ADMIN_USER_NAME="admin" # DEMO_USER_NAME="demo" ADMIN_RC_FILE="admin-openrc" DEMO_RC_FILE="demo-openrc" ################################################################################################################ #{{{account # # MARIADB INFORMATION # MYSQLDB_ADMIN="root" MYSQLDB_PASSWORD="$DEFAULT_PASS" MYSQLDB_PASSWORD="$DEFAULT_PASS" MYSQLDB_PORT="3306" # OPENSTACK SERVICES DATABASE ACCESS INFORMATION # # Here we define the database name, database user and database user password. # In normal conditions, you should only need to change the password # # Keystone: KEYSTONE_DBNAME="keystone" KEYSTONE_DBUSER="keystone" KEYSTONE_DBPASS="$DEFAULT_PASS" # Glance GLANCE_DBNAME="glance" GLANCE_DBUSER="glance" GLANCE_DBPASS="$DEFAULT_PASS" # Neutron NEUTRON_DBNAME="neutron" NEUTRON_DBUSER="neutron" NEUTRON_DBPASS="$DEFAULT_PASS" # Nova NOVA_DBNAME="nova" NOVAAPI_DBNAME="nova_api" NOVACELL_DBNAME="nova_cell0" NOVA_DBUSER="nova" NOVA_DBPASS="$DEFAULT_PASS" # Cinder CINDER_DBNAME="cinder" CINDER_DBUSER="cinder" CINDER_DBPASS="$DEFAULT_PASS" # Swift SWIFT_DBNAME="swift" SWIFT_DBUSER="swift" SWIFT_DBPASS="$DEFAULT_PASS" #}}} ################################################################################################################ #{{{gluster config # # # OPENSTACK CLUSTER INFORMATION # # # Sample config - 1 controller + 2 computes # CONTROLLER_NODES="controller" # CONTROLLER_NODES_IP="10.1.0.10" # COMPUTE_NODES="compute1 compute2" # COMPUTE_NODES_IP="10.1.0.11 10.1.0.12" # # Sample config - All-In-One # CONTROLLER_NODES="controller" # CONTROLLER_NODES_IP="10.1.0.10" # COMPUTE_NODES="" # COMPUTE_NODES_IP="" # Controller Nodes # Specific Ip address - In this version, support 1 controller. # It'll be map to each other like controller - 10.1.0.10 CONTROLLER_NODES="controller" 主机名 CONTROLLER_NODES_IP="192.168.0.20" # Compute Nodes: # Specific Ip addresses # It'll be map to each other like compute1 - 10.1.0.11 COMPUTE_NODES="compute" 计算节点主机名 这两句话的意思就是我要去哪里找计算节点就是在这里 COMPUTE_NODES_IP="192.168.0.21" 计算节点IP #}}} ################################################################################################################ #{{{service # # OPENSTACK KEYSTONE # User declaration in Keystone # INSTALL_KEYSTONE="yes" KEYSTONE_USER="keystone" KEYSTONE_SERVICE="keystone" KEYSTONE_PASS="$DEFAULT_PASS" ################################################################################################################ # # OPENSTACK GLANCE # INSTALL_GLANCE="yes" GLANCE_USER="glance" GLANCE_PASS="$DEFAULT_PASS" GLANCE_SERVICE="glance" ################################################################################################################ # # OPENSTACK NOVA # INSTALL_NOVA="yes" NOVA_USER="nova" NOVA_SERVICE="nova" NOVA_PASS="$DEFAULT_PASS" ###****************************************************ocata PLACEMENT_USER="placement" PLACEMENT_SERVICE="placement" PLACEMENT_PASS="${DEFAULT_PASS}" # # Over-subscription CPU/RAM/DISK control. Please read in openstack manuals what this mean. # You probably want to use those defaults... then again, maybe not. RAM_ALLOCATION_RATIO="1.5" CPU_ALLOCATION_RATIO="16.0" DISK_ALLOCATION_RATIO="1.0" # # Libvirt Type configuration # VIRT_TYPE="kvm" ################################################################################################################ # # OPENSTACK NEUTRON # INSTALL_NEUTRON="yes" NEUTRON_USER="neutron" NEUTRON_PASS="$DEFAULT_PASS" NEUTRON_SERVICE="neutron" # # Choose ML2 Plugin # "openvswitch" or "linuxbridge" # ML2_PLUGIN="linuxbridge" # # Choose network option # "self-service" or "provider" # NETWORK_OPT="self-service" # Also, you need to set up your network vlan ranges. Sample: # network_vlan_ranges="provider:1:20,provider:20:200,physical02:100:300" NETWORK_VLAN_RANGES="provider" FLAT_NETWORKS="provider" PROVIDER_INTERFACE="eno16777736" BRIDGE_MAPPINGS="provider:${PROVIDER_INTERFACE}" PROVIDER_BRIDGE="br-provider" # If you are going to use gre or vxlan, change the following variables according # to your preferences TUNNEL_ID_RANGES="1:100" VNI_RANGES="110:1000" ################################################################################################################ # # OPENSTACK HORIZON # INSTALL_HORIZON="yes" TIMEZONE="Asia\/Shanghai" #}}} ################################################################################################################ #{{{option # # OPENSTACK SWIFT # INSTALL_SWIFT="no" SWIFT_USER="swift" SWIFT_PASS="$DEFAULT_PASS" SWIFT_SERVICE="swift" # Swift replication information PARTITION_POWER="5" PARTITION_MIN_HOURS="1" REPLICA_COUNT="3" SWIFT_HASH="$DEFAULT_PASS" # # OPENSTACK CINDER # INSTALL_CINDER="no" CINDER_USER="cinder" CINDER_PASS="$DEFAULT_PASS" CINDER_SERVICE="cinder" CINDER_SERVICE_V2="cinderv2" CINDER_SERVICE_V3="cinderv3" # Depending of your volumen backend selection, set your default volume type. Our default is "lvm" DEFAULT_VOLUME_TYPE="lvm" # Our default is to use iscsi. Put here the iscsi server IP. It can be the one of the # controller, if you are using a LVM "cinder-volumes" inside the controller. CINDER_ISCSI_IP_ADDRESS="192.168.56.60" CINDER_LVMNAME="cinder-volumes" ## ceph RBD_POOL=volumes RBD_USER=cinder RBD_MAX_CLONE_DEPTH=5 RBD_STORE_CHUNK_SIZE=4 RADOS_CONNECT_TIMEOUT=-1 RBD_SECRET_UUID=a852df2b-55e1-4c1b-9fa2-61e77feaf30f #}}} ############################################################################################################### # # END OF THE CONFIGURATION FILE #
cd openstack_install/
/bin/bash main-installer.sh controller install
. /etc/openstack-control-script-config/admin-openrc
openstack compute service list
vi tools/create_net.sh 创建网络,修改此脚本
# 本地物理网络(provider)起始IP
START_IP_ADDRESS=192.168.0.60
# 本地物理网络(provider)终止IP
END_IP_ADDRESS=192.168.0.100
DNS_RESOLVER=8.8.8.8
# 本地物理网络(provider)网关
PROVIDER_NETWORK_GATEWAY=192.168.0.1
PROVIDER_NETWORK_CIDR=192.168.0.0/24
bash tools/create_net.sh provider
compute节点
yum -y install git
git clone https://github.com/BillWang139967/openstack_install.git
vim openstack_install/etc/main-config.rc
CONTROLLER_NODES="controller" 控制节点的主机
CONTROLLER_NODES_IP="192.168.0.20" 控制节点的IP
COMPUTE_NODES="compute" 计算节点的主机
COMPUTE_NODES_IP="192.168.0.21" 计算节点的IP
cd openstack_install/
bash main-installer.sh compute install
. /etc/openstack-control-script-config/admin-openrc
openstack network agent list
openstack compute service list
参考文献
https://github.com/BillWang139967/openstack_install/wiki/ready#1-%E7%8E%AF%E5%A2%83%E8%A6%81%E6%B1%82
http://www.cnblogs.com/yaohong/p/7253222.html