打开APP
userphoto
未登录

开通VIP,畅享免费电子书等14项超值服

开通VIP
OpenStack multihost quantum-gre模式部署笔记[grizzly]

OpenStack multihost quantum-gre模式部署笔记[grizzly]

(2013-08-23 20:20:53)

OpenStack multihostquantum-gre模式部署笔记[grizzly]

环境准备

创建3台虚拟机,分别作为controller节点(10.1.101.24<2,2048>compute1节点(10.1.101.25<2,2048>compute2节点(10.1.101.26<2,2048>


为所有3个节点添加grizzly的源并更新系统

echo debhttp://ubuntu-cloud.archive.canonical.com/ubuntuprecise-updates/grizzly main \

 >>/etc/apt/sources.list.d/grizzly.list

apt-get update &&apt-get install ubuntu-cloud-keyring

apt-get update

 

控制节点

网卡配置如下:

cat /etc/network/interfaces

# This file describes the network interfaces availableon your system

# and how to activate them. For more information, seeinterfaces(5).

# The loopback network interface

auto lo

iface lo inet loopback

 

# External Network

auto eth0

iface eth0 inet static

address 10.1.101.24

netmask 255.255.255.0

gateway 10.1.101.254

#dns-nameservers 192.168.1.3

dns-nameservers 10.1.101.51

 

auto eth1

iface eth1 inet static

address 192.168.1.24

netmask 255.255.255.0

 

auto eth2

iface eth2 inet static

address 192.168.2.24

netmask 255.255.255.0

 

hosts主机文件配置:

cat /etc/hosts

127.0.0.1      localhost

192.168.1.24   controller

192.168.1.25   compute1

192.168.1.26   compute2

 

# The following lines are desirable for IPv6 capablehosts

::1    ip6-localhost ip6-loopback

fe00::0 ip6-localnet

ff00::0 ip6-mcastprefix

ff02::1 ip6-allnodes

ff02::2 ip6-allrouters

 

设置本机hostname

cat /etc/hostname

controller

 

开启IP转发功能

vim  /etc/sysctl.conf:

net.ipv4.conf.all.rp_filter = 0

net.ipv4.conf.default.rp_filter = 0

 

重启网络

/etc/init.d/networking restar


验证

sysctl -e -p /etc/sysctl.conf

 

安装ntp服务并设置

apt-get install -y ntp

sed -i 's/server ntp.ubuntu.com/ \

server ntp.ubuntu.com \

server 127.127.1.0 \

fudge 127.127.1.0 stratum 10/g'/etc/ntp.conf

 

重启ntp服务

service ntp restart

 

设置环境变量

cat > /root/novarc<< EOF

export OS_TENANT_NAME=admin

export OS_USERNAME=admin

export OS_PASSWORD=password

export MYSQL_PASS=password

export SERVICE_PASSWORD=password

export RABBIT_PASSWORD=password

exportOS_AUTH_URL="http://localhost:5000/v2.0/"

exportSERVICE_ENDPOINT="http://localhost:35357/v2.0"

export SERVICE_TOKEN=mutil_host

export MASTER="$(/sbin/ifconfig eth0 \

      | awk '/inet addr/ {print $2}' | cut -f2 -d ":")"

export LOCAL_IP="$(/sbin/ifconfig eth1 \

      | awk '/inet addr/ {print $2}' | cut -f2 -d ":")"

EOF

 

使环境变量生效

cat /root/novarc >>/etc/profile

source /etc/profile

 

设置MYSQL管理员密码

cat << MYSQL_PRESEED |debconf-set-selections

mysql-server-5.5 mysql-server/root_password password$MYSQL_PASS

mysql-server-5.5 mysql-server/root_password_againpassword $MYSQL_PASS

mysql-server-5.5 mysql-server/start_on_boot booleantrue

MYSQL_PRESEED

 

安装mysql

apt-get -y install mysql-server python-mysqldbcurl

 

设置允许非本机连接

sed -i 's/127.0.0.1/0.0.0.0/g'/etc/mysql/my.cnf

sed -i '44 i skip-name-resolve'/etc/mysql/my.cnf 可省) 

 

重启mysql服务

service mysql restart

 

创建openstack相关的数据库

mysql -uroot -p$MYSQL_PASS<< EOF

CREATE DATABASE nova;

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIEDBY '$MYSQL_PASS';

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost'IDENTIFIED BY '$MYSQL_PASS';

CREATE DATABASE glance;

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%'IDENTIFIED BY '$MYSQL_PASS';

GRANT ALL PRIVILEGES ON glance.* TO'glance'@'localhost' IDENTIFIED BY '$MYSQL_PASS';

CREATE DATABASE keystone;

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%'IDENTIFIED BY '$MYSQL_PASS';

GRANT ALL PRIVILEGES ON keystone.* TO'keystone'@'localhost' IDENTIFIED BY '$MYSQL_PASS';

CREATE DATABASE cinder;

GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%'IDENTIFIED BY '$MYSQL_PASS';

GRANT ALL PRIVILEGES ON cinder.* TO'cinder'@'localhost' IDENTIFIED BY '$MYSQL_PASS';

CREATE DATABASE quantum;

GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'%'IDENTIFIED BY '$MYSQL_PASS';

GRANT ALL PRIVILEGES ON quantum.* TO'quantum'@'localhost' IDENTIFIED BY '$MYSQL_PASS';

FLUSH PRIVILEGES;

EOF

 

安装rabbitmq服务

apt-get -y install rabbitmq-server

修改密码为password,并重启服务

rabbitmqctl change_password guest$RABBIT_PASSWORD

service rabbitmq-server restart

 

安装keystone服务

apt-get install -y keystone python-keystonepython-keystoneclient

 

配置

sed -i -e " s/# admin_token = ADMIN/admin_token =$SERVICE_TOKEN/g; \

s/# bind_host = 0.0.0.0/bind_host = 0.0.0.0/g;\

s/# public_port = 5000/public_port = 5000/g;\

s/# admin_port = 35357/admin_port = 35357/g;\

s/# compute_port = 8774/compute_port = 8774/g;\

s/# verbose = True/verbose = True/g; \

s/# idle_timeout/idle_timeout/g"/etc/keystone/keystone.conf

 

sed -i '/connection =.*/{s|sqlite:///.*|mysql://'"keystone"':'"$MYSQL_PASS"'@'"$MASTER"'/keystone|g}'\

     /etc/keystone/keystone.conf

 

重启keystone服务,并同步数据

service keystone restart

keystone-manage db_sync

 

创建keystone数据导入脚本(见附件)

vim keystone-data.sh

#!/bin/bash

# Modify these variables as needed

ADMIN_PASSWORD=${ADMIN_PASSWORD:-password}

SERVICE_PASSWORD=${SERVICE_PASSWORD:-$ADMIN_PASSWORD}

DEMO_PASSWORD=${DEMO_PASSWORD:-$ADMIN_PASSWORD}

export OS_SERVICE_TOKEN="mutil_host"

exportOS_SERVICE_ENDPOINT="http://localhost:35357/v2.0"

SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}

#

MYSQL_USER=keystone

MYSQL_DATABASE=keystone

MYSQL_HOST=localhost

MYSQL_PASSWORD=${MYSQL_PASS}

#

KEYSTONE_REGION=RegionOne

KEYSTONE_HOST=$(/sbin/ifconfig eth1 | awk '/inet addr/{print $2}' | cut -f2 -d ":")

 

# Shortcut function to get a newly generatedID

function get_field() {

   while read data; do

       if [ "$1" -lt 0 ]; then

           field="(\$(NF$1))"

       else

           field="\$$(($1 + 1))"

       fi

       echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print$field}"

   done

}

 

# Tenants

ADMIN_TENANT=$(keystone tenant-create --name=admin |grep " id " | get_field 2)

DEMO_TENANT=$(keystone tenant-create --name=demo | grep" id " | get_field 2)

SERVICE_TENANT=$(keystone tenant-create--name=$SERVICE_TENANT_NAME | grep " id " | get_field2)

 

# Users

ADMIN_USER=$(keystone user-create --name=admin--pass="$ADMIN_PASSWORD" --email=admin@domain.com | grep " id " |get_field 2)

DEMO_USER=$(keystone user-create --name=demo--pass="$DEMO_PASSWORD" --email=demo@domain.com--tenant-id=$DEMO_TENANT | grep " id " | get_field2)

NOVA_USER=$(keystone user-create --name=nova--pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT--email=nova@domain.com | grep " id " | get_field 2)

GLANCE_USER=$(keystone user-create --name=glance--pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT--email=glance@domain.com | grep " id " | get_field2)

QUANTUM_USER=$(keystone user-create --name=quantum--pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT--email=quantum@domain.com | grep " id " | get_field2)

CINDER_USER=$(keystone user-create --name=cinder--pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT--email=cinder@domain.com | grep " id " | get_field2)

 

# Roles

ADMIN_ROLE=$(keystone role-create --name=admin | grep "id " | get_field 2)

MEMBER_ROLE=$(keystone role-create --name=Member | grep" id " | get_field 2)

 

# Add Roles to Users in Tenants

keystone user-role-add --user-id $ADMIN_USER --role-id$ADMIN_ROLE --tenant-id $ADMIN_TENANT

keystone user-role-add --tenant-id $SERVICE_TENANT--user-id $NOVA_USER --role-id $ADMIN_ROLE

keystone user-role-add --tenant-id $SERVICE_TENANT--user-id $GLANCE_USER --role-id $ADMIN_ROLE

keystone user-role-add --tenant-id $SERVICE_TENANT--user-id $QUANTUM_USER --role-id $ADMIN_ROLE

keystone user-role-add --tenant-id $SERVICE_TENANT--user-id $CINDER_USER --role-id $ADMIN_ROLE

keystone user-role-add --tenant-id $DEMO_TENANT--user-id $DEMO_USER --role-id $MEMBER_ROLE

 

# Create services

COMPUTE_SERVICE=$(keystone service-create --name nova--type compute --description 'OpenStack Compute Service' | grep "id " | get_field 2)

VOLUME_SERVICE=$(keystone service-create --name cinder--type volume --description 'OpenStack Volume Service' | grep " id" | get_field 2)

IMAGE_SERVICE=$(keystone service-create --name glance--type image --description 'OpenStack Image Service' | grep " id "| get_field 2)

IDENTITY_SERVICE=$(keystone service-create --namekeystone --type identity --description 'OpenStack Identity' | grep" id " | get_field 2)

EC2_SERVICE=$(keystone service-create --name ec2 --typeec2 --description 'OpenStack EC2 service' | grep " id " | get_field2)

NETWORK_SERVICE=$(keystone service-create --namequantum --type network --description 'OpenStack Networking service'| grep " id " | get_field 2)

 

# Create endpoints

keystone endpoint-create --region $KEYSTONE_REGION--service-id $COMPUTE_SERVICE --publicurl'http://'"$KEYSTONE_HOST"':8774/v2/$(tenant_id)s' --adminurl'http://'"$KEYSTONE_HOST"':8774/v2/$(tenant_id)s' --internalurl'http://'"$KEYSTONE_HOST"':8774/v2/$(tenant_id)s'

keystone endpoint-create --region $KEYSTONE_REGION--service-id $VOLUME_SERVICE --publicurl'http://'"$KEYSTONE_HOST"':8776/v1/$(tenant_id)s' --adminurl'http://'"$KEYSTONE_HOST"':8776/v1/$(tenant_id)s' --internalurl'http://'"$KEYSTONE_HOST"':8776/v1/$(tenant_id)s'

keystone endpoint-create --region $KEYSTONE_REGION--service-id $IMAGE_SERVICE --publicurl'http://'"$KEYSTONE_HOST"':9292' --adminurl'http://'"$KEYSTONE_HOST"':9292' --internalurl'http://'"$KEYSTONE_HOST"':9292'

keystone endpoint-create --region $KEYSTONE_REGION--service-id $IDENTITY_SERVICE --publicurl'http://'"$KEYSTONE_HOST"':5000/v2.0' --adminurl'http://'"$KEYSTONE_HOST"':35357/v2.0' --internalurl'http://'"$KEYSTONE_HOST"':5000/v2.0'

keystone endpoint-create --region $KEYSTONE_REGION--service-id $EC2_SERVICE --publicurl'http://'"$KEYSTONE_HOST"':8773/services/Cloud' --adminurl'http://'"$KEYSTONE_HOST"':8773/services/Admin' --internalurl'http://'"$KEYSTONE_HOST"':8773/services/Cloud'

keystone endpoint-create --region $KEYSTONE_REGION--service-id $NETWORK_SERVICE --publicurl'http://'"$KEYSTONE_HOST"':9696/' --adminurl'http://'"$KEYSTONE_HOST"':9696/' --internalurl'http://'"$KEYSTONE_HOST"':9696/'

 

运行keystone-data.sh脚本

bash keystone-data.sh

 

安装glance组件

apt-get -y install glance

 

配置

sed -i -e " s/%SERVICE_TENANT_NAME%/service/g;\

s/%SERVICE_USER%/glance/g;s/%SERVICE_PASSWORD%/$SERVICE_PASSWORD/g; \

" /etc/glance/glance-api.conf /etc/glance/glance-registry.conf

 

sed -i '/sql_connection =.*/{s|sqlite:///.*|mysql://'"glance"':'"$MYSQL_PASS"'@'"$MASTER"'/glance|g}'\

      /etc/glance/glance-registry.conf/etc/glance/glance-api.conf

 

sed -i " s/notifier_strategy = noop/notifier_strategy =rabbit/g;\

       s/rabbit_password = guest/rabbit_password = $RABBIT_PASSWORD/g;"\

      /etc/glance/glance-api.conf

 

cat << EOF>>/etc/glance/glance-api.conf

flavor = keystone+cachemanagement

EOF

 

cat << EOF>>/etc/glance/glance-registry.conf

flavor = keystone

EOF

 

重启glance相关服务并同步数据

service glance-api restart

service glance-registry restart

glance-manage db_sync

 

下载镜像测试glance服务

wgethttps://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img

 

添加cirros镜像

glance add name=cirros is_public=truecontainer_format=bare \

disk_format=qcow2 </root/cirros-0.3.0-x86_64-disk.img

 

查看镜像

glance index

ID                                  Name                          DiskFormat         ContainerFormat    Size

------------------------------------------------------------------ ---------------------------------------- --------------

932fad9e-3f93-4e76-ab58-2fad4c4acd5fcirros-0.3.0-x86_64           qcow2               bare                       9761280

 

 

安装cinder 组件

apt-get install -y cinder-api cinder-schedulercinder-volume iscsitarget \

   open-iscsi iscsitarget-dkms python-cinderclientlinux-headers-`uname -r`

 

修改iscsitarget配置文件并重启服务

sed -i 's/false/true/g'/etc/default/iscsitarget

service iscsitarget start

service open-iscsi start

 

配置cinder文件

cat >/etc/cinder/cinder.conf<<EOF

[DEFAULT]

rootwrap_config =/etc/cinder/rootwrap.conf

sql_connection =mysql://cinder:$MYSQL_PASS@$MASTER:3306/cinder

iscsi_helper = ietadm

volume_group = cinder-volumes

rabbit_password= $RABBIT_PASSWORD

logdir=/var/log/cinder

verbose=true

auth_strategy = keystone

EOF

 

sed -i -e " s/%SERVICE_TENANT_NAME%/service/g;\

    s/%SERVICE_USER%/cinder/g;s/%SERVICE_PASSWORD%/$SERVICE_PASSWORD/g; " \

    /etc/cinder/api-paste.ini

 

同步cinder数据,并重启相关服务

cinder-manage db sync

service cinder-api restart

service cinder-scheduler restart

service cinder-volume restart

 

 

安装quantum组件

apt-get install quantum-serverquantum-plugin-openvswitch

 

修改配置文件quantum.conf如下

vim/etc/quantum/quantum.conf

[DEFAULT]

verbose = True

allow_overlapping_ips =True

rabbit_host = 192.168.1.24

rabbit_password = password

[keystone_authtoken]

admin_tenant_name = service

admin_user = quantum

admin_password = password

signing_dir =/var/lib/quantum/keystone-signing

 

修改配置文件ovs_quantum_plugin.ini

vim/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini

[DEFAULT]

core_plugin =quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2

rabbit_host = 192.168.1.24

rabbit_password = password

host=192.168.1.24

[DATABASE] 

sql_connection =mysql://quantum:password@192.169.1.24/quantum

reconnect_interval =2 

[OVS] 

tenant_network_type =gre 

enable_tunneling =True 

tunnel_id_ranges =1:1000 

[AGENT] 

polling_interval =2 

[SECURITYGROUP] 

firewall_driver =quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver


重启quantum服务

/etc/init.d/quantum-serverrestart

 

安装nova组件

apt-get install -y nova-api nova-cert nova-commonnova-conductor \

   nova-scheduler python-nova python-novaclient nova-consoleauth novnc\

nova-novncproxy

 

修改配置文件api-paste.ini

sed -i -e " s/127.0.0.1/$MASTER/g;s/%SERVICE_TENANT_NAME%/service/g; \

          s/%SERVICE_USER%/nova/g; s/%SERVICE_PASSWORD%/$SERVICE_PASSWORD/g;" \

          /etc/nova/api-paste.ini

 

修改nova.conf文件如下

cat /etc/nova/nova.conf

cat /etc/nova/nova.conf

[DEFAULT]

dhcpbridge_flagfile=/etc/nova/nova.conf

dhcpbridge=/usr/bin/nova-dhcpbridge

logdir=/var/log/nova

state_path=/var/lib/nova

lock_path=/var/lock/nova

force_dhcp_release=True

iscsi_helper=tgtadm

libvirt_use_virtio_for_bridges=True

connection_type=libvirt

root_helper=sudo nova-rootwrap/etc/nova/rootwrap.conf

verbose=True

ec2_private_dns_show_ip=True

api_paste_config=/etc/nova/api-paste.ini

volumes_path=/var/lib/nova/volumes

enabled_apis=ec2,osapi_compute,metadata

 

sql_connection=mysql://nova:password@localhost/nova

rabbit_password=password

auth_strategy=keystone

 

# Cinder

volume_api_class=nova.volume.cinder.API

osapi_volume_listen_port=5900

 

#scheduler

scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter

 

# Networking

network_api_class=nova.network.quantumv2.api.API

quantum_url=http://192.168.1.24:9696

quantum_auth_strategy=keystone

quantum_admin_tenant_name=service

quantum_admin_username=quantum

quantum_admin_password=password

quantum_admin_auth_url=http://192.168.1.24:35357/v2.0

libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver

linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver

host=192.168.1.24

service_quantum_metadata_proxy =True

 

# SecurityGroups

#firewall_driver=nova.virt.firewall.NoopFirewallDriver

#security_group_api=quantum

 

# Metadata

quantum_metadata_proxy_shared_secret=password

service_quantum_metadata_proxy=true

metadata_listen = 192.168.1.24

metadata_listen_port = 8775

 

# Glance

glance_api_servers=192.168.1.24:9292

image_service=nova.image.glance.GlanceImageService

 

# novnc

novnc_enable=true

novncproxy_base_url=http://10.1.101.24:6080/vnc_auto.html

vncserver_proxyclient_address=10.1.101.24

vncserver_listen=0.0.0.0

 

同步数据库并重启相关服务

nova-manage db sync

service nova-api restart

service nova-cert restart

service nova-consoleauth restart

service nova-scheduler restart

service nova-novncproxy restart

service nova-conductor restart

 

查看服务(确保nova-certnova-consoleauthnova-schedulernova-conductor均开启)

nova-manage servicelist     

Binary          Host                                Zone            Status    State Updated_At

nova-cert       controller                          internal        enabled   :-)   2013-08-2002:53:49

nova-consoleauthcontroller                          internal        enabled   :-)   2013-08-2002:53:46

nova-scheduler  controller                          internal        enabled   :-)   2013-08-2002:53:41

nova-conductor  controller                          internal        enabled   :-)   2013-08-2002:53:43

 

安装dashboard组件

apt-get -y install apache2 libapache2-mod-wsgiopenstack-dashboard memcached python-memcache

 

 

 

计算节点

确保系统已经grizzly-update

 

网卡配置

cat /etc/network/interfaces

# This file describes the network interfaces availableon your system

# and how to activate them. For more information, seeinterfaces(5).

 

# The loopback network interface

auto lo

iface lo inet loopback

 

auto eth0

iface eth0 inet static

address 10.1.101.27

netmask 255.255.255.0

gateway 10.1.101.254

#dns-nameservers 192.168.1.3

dns-nameservers 10.1.101.51

 

auto eth1

iface eth1 inet static

address 192.168.1.27

netmask 255.255.255.0

 

auto eth2

iface eth2 inet static

address 192.168.2.27

netmask 255.255.255.0

 

 

修改hosts文件

vim /etc/hosts

127.0.0.1      localhost

192.168.1.24   controller

192.168.1.25   compute1

192.168.1.26   compute2

 

# The following lines are desirable for IPv6 capablehosts

::1    ip6-localhost ip6-loopback

fe00::0 ip6-localnet

ff00::0 ip6-mcastprefix

ff02::1 ip6-allnodes

ff02::2 ip6-allrouters

 

修改hostname

vim /etc/hostname

compute1

 

设置IP转发

vim  /etc/sysctl.conf:

net.ipv4.ip_forward = 1

net.ipv4.conf.all.forwarding = 1

 

重启网络,并验证

/etc/init.d/networking restart

sysctl -e -p /etc/sysctl.conf

 

设置环境变量

cat > /root/novarc<< EOF

export OS_TENANT_NAME=admin

export OS_USERNAME=admin

export OS_PASSWORD=password

export MYSQL_PASS=password

export SERVICE_PASSWORD=password

export RABBIT_PASSWORD=password

export SERVICE_TOKEN=mutil_host

export CONTROLLER_IP=192.168.1.24

export MASTER="$(/sbin/ifconfig eth0 | awk '/inet addr/{print $2}' | cut -f2 -d ":")"

export LOCAL_IP="$(/sbin/ifconfig eth1 | awk '/inetaddr/ {print $2}' | cut -f2 -d ":")"

exportOS_AUTH_URL="http://192.168.1.24:5000/v2.0/"

exportSERVICE_ENDPOINT="http://192.168.1.24:35357/v2.0"

EOF

 

使环境变量生效

cat /root/novarc >>/etc/profile

source /etc/profile

 

安装ntp服务,并设置,重启

apt-get install -y ntp

sed -i -e " s/server ntp.ubuntu.com/server$CONTROLLER_IP/g" /etc/ntp.conf

service ntp restart

 

安装OpenVSwitch

apt-get installopenvswitch-datapath-source 

module-assistant auto-installopenvswitch-datapath 

apt-get install openvswitch-switchopenvswitch-brcompat

 

修改openvswitchbrcompat配置文件

sed -i 's/# BRCOMPAT=no/BRCOMPAT=yes/g'/etc/default/openvswitch-switch 

echo 'brcompat' >>/etc/modules 

 

重启openvswitch

/etc/init.d/openvswitch-switch restart

 

(再次启动,确保ovs-brcompatdovs-vswitchdovsdb-server等服务都启动

直到检查出现:

lsmod | grep brcompat

   brcompat              13512  0

   openvswitch           84038  7 brcompat

如果还是启动不了的话,用下面命令:

/etc/init.d/openvswitch-switchforce-reload-kmod

 

重启openvswitch服务

service openvswitch-switch restart

 

添加网桥br-intbr-ex

ovs-vsctl add-br br-int

ovs-vsctl add-br br-ex

ifconfig br-ex promisc up

ovs-vsctl add-port br-ex eth2

 

修改网卡配置文件

cat /etc/network/interfaces

# This file describes the network interfaces availableon your system

# and how to activate them. For more information, seeinterfaces(5).

 

# The loopback network interface

auto lo

iface lo inet loopback

 

auto eth0

iface eth0 inet static

address 10.1.101.25

netmask 255.255.255.0

gateway 10.1.101.254

#dns-nameservers 192.168.1.3

dns-nameservers 10.1.101.51

 

auto eth1

iface eth1 inet static

address 192.168.1.25

netmask 255.255.255.0

 

auto br-ex

iface br-ex inet static

address 192.168.2.25

netmask 255.255.255.0

up ip link set $IFACE promisc on

down ip link set $IFACE promisc off

 

auto eth2

iface eth2 inet manual

up ifconfig $IFACE 0.0.0.0 up

up ip link set $IFACE promisc on

down ip link set $IFACE promisc off

down ifconfig $IFACE down

 

重启网络

 /etc/init.d/networkingrestart

 

安装quantum插件

apt-get install -y quantum-plugin-openvswitch-agentquantum-dhcp-agent quantum-l3-agent quantum-metadata-agent--force-yes

 

修改quantum.conf配置文件

vim /etc/quantum/quantum.conf

[DEFAULT]

verbose = False

state_path = /var/lib/quantum

lock_path = $state_path/lock

bind_host = 0.0.0.0

bind_port = 9696

core_plugin =quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2

api_paste_config =/etc/quantum/api-paste.ini

control_exchange = quantum

allow_overlapping_ips = True

rabbit_host = 192.168.1.24

rabbit_password = password

rabbit_port = 5672

notification_driver =quantum.openstack.common.notifier.rpc_notifie

[AGENT]

root_helper = sudo quantum-rootwrap/etc/quantum/rootwrap.conf

[keystone_authtoken]

auth_host = 192.168.1.24

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = quantum

admin_password = password

signing_dir =/var/lib/quantum/keystone-signing

 

修改ovs_quantum_plugin.ini配置文件

vim/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini

[DATABASE]

sql_connection =mysql://quantum:password@192.168.1.24/quantum

reconnect_interval = 2

[OVS]

tenant_network_type = gre

enable_tunneling = True

tunnel_id_ranges = 1:1000

integration_bridge = br-int

tunnel_bridge = br-tun

local_ip =192.168.1.25

[AGENT]

polling_interval = 2

[SECURITYGROUP]

firewall_driver =quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

 

修改l3_agent.ini配置文件

vim /etc/quantum/l3_agent.ini

[DEFAULT]

debug = True

use_namespaces = True

external_network_bridge = br-ex

signing_dir =/var/cache/quantum 

admin_tenant_name =service 

admin_user = quantum 

admin_password =password 

auth_url =http://192.168.1.24:35357/v2.0 

l3_agent_manager =quantum.agent.l3_agent.L3NATAgentWithStateRepor

root_helper = sudo quantum-rootwrap/etc/quantum/rootwrap.conf 

interface_driver =quantum.agent.linux.interface.OVSInterfaceDriver 

enable_multi_host =True 

 

修改dhcp_agent.ini配置文件

vim /etc/quantum/dhcp_agent.ini

[DEFAULT]

debug = true

use_namespaces = True

signing_dir =/var/cache/quantum 

admin_tenant_name =service 

admin_user = quantum 

admin_password =password 

auth_url =http://192.168.1.24:35357/v2.0 

dhcp_agent_manager =quantum.agent.dhcp_agent.DhcpAgentWithStateReport 

root_helper = sudo quantum-rootwrap/etc/quantum/rootwrap.conf 

state_path =/var/lib/quantum 

interface_driver =quantum.agent.linux.interface.OVSInterfaceDriver 

dhcp_driver =quantum.agent.linux.dhcp.Dnsmasq 

enable_multi_host =True 

enable_isolated_metadata =False

 

修改metadata_agent.ini配置文件

vim /etc/quantum/metadata_agent.ini

[DEFAULT]

debug = True

auth_url =http://192.168.1.24:35357/v2.0

auth_region = RegionOne

admin_tenant_name = service

admin_user = quantum

admin_password = password

nova_metadata_ip = 192.168.1.24

nova_metadata_port = 8775

 

重启quantum相关的agent服务

service quantum-plugin-openvswitch-agentrestart

service quantum-dhcp-agent restart

service quantum-l3-agent restart

service quantum-metadata-agent restart

 

 

安装nova-compute组件

apt-get install -y nova-compute--force-yes

 

修改api-paste.ini文件

vim /etc/nova/api-paste.ini

[filter:authtoken]

paste.filter_factory =keystoneclient.middleware.auth_token:filter_factory

auth_host = 192.168.1.24

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = nova

admin_password = password

signing_dir =/var/lib/nova/keystone-signing

auth_version = v2.0

 

修改nova.conf配置文件

vim /etc/nova/nova.conf

[DEFAULT]

dhcpbridge_flagfile=/etc/nova/nova.conf

dhcpbridge=/usr/bin/nova-dhcpbridge

logdir=/var/log/nova

state_path=/var/lib/nova

lock_path=/var/lock/nova

force_dhcp_release=True

iscsi_helper=tgtadm

libvirt_use_virtio_for_bridges=True

connection_type=libvirt

root_helper=sudo nova-rootwrap/etc/nova/rootwrap.conf

verbose=True

ec2_private_dns_show_ip=True

api_paste_config=/etc/nova/api-paste.ini

volumes_path=/var/lib/nova/volumes

enabled_apis=ec2,osapi_compute,metadata

 

#cinder

volume_api_class =nova.volume.cinder.API

osapi_volume_listen_port=5900

 

#scheduler

scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter

 

# General

rabbit_host=192.168.1.24

rabbit_password=password

auth_strategy=keystone

ec2_host=192.168.1.24

ec2_url=http://192.168.1.24:8773/services/Cloud

 

# Networking

# libvirt_use_virtio_for_bridges=True

network_api_class=nova.network.quantumv2.api.API

quantum_url=http://192.168.1.24:9696

quantum_auth_strategy=keystone

quantum_admin_tenant_name=service

quantum_admin_username=quantum

quantum_admin_password=password

quantum_admin_auth_url=http://192.168.1.24:35357/v2.0

libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver

linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver

host=192.168.1.25

service_quantum_metadata_proxy = True

 

# Security Groups

#firewall_driver=nova.virt.firewall.NoopFirewallDriver

#security_group_api=quantum

 

# Compute #

compute_driver=libvirt.LibvirtDriver

connection_type=libvirt

 

# Glance

glance_api_servers=192.168.1.24:9292

image_service=nova.image.glance.GlanceImageService

 

# novnc

vnc_enabled=true

vncserver_proxyclient_address=10.1.101.25

novncproxy_base_url=http://10.1.101.24:6080/vnc_auto.html

vncserver_listen=0.0.0.0

 

重启nova-compute服务

service nova-computerestart

参照以上过程部署compute2节点。

 

在controller节点中为openstack设置好ICMP和TCP规则:

# Obtain default security group ID

quantum security-group-list

# Enable ICMP and TCP ports

quantum security-group-rule-create --protocolicmp --direction ingress {security group ID}

quantum security-group-rule-create --protocolicmp --direction egress {security group ID}

quantum security-group-rule-create --protocoltcp --direction egress --port-range-min 1 --port-range-max 65535{security group ID}

quantum security-group-rule-create --protocoltcp --direction ingress --port-range-min 1 --port-range-max 65535{security group ID}

 ====================================================================================



 (1)查看novaquantum的服务

 



 

(2)准备quantum的网络(脚本pre_network.sh见附件)

#!/bin/bash

# Create Tenant and User #

tenant=TenantA

user=UserA

usermail=usera@mutil_host.com

role=Member

if keystone tenant-list | grep -q$tenant;then

   echo "Tenant $tenant existed!"

else

   tenant_id=`keystone tenant-create --name $tenant | awk '/id/{print$4}'`

fi

if keystone user-list | grep -q$user;then

   echo "User $user existed!"

else

   keystone user-create --name=$user --pass=password --tenant-id$tenant_id --email=$usermail

fi

keystone user-role-add --tenant$tenant  --user $user --role $role

# Create virtual router and sub-network#

quantum net-create Ext-Net--provider:network_type local --router:external true

quantum subnet-create --allocation-pool start=192.168.2.100,end=192.168.2.200 --gateway192.168.2.1 Ext-Net 192.168.2.0/24--enable_dhcp=False

quantum --os-tenant-name $tenant--os-username $user --os-password password --os-auth-url=http://localhost:5000/v2.0 net-create$tenant-Net

subnet_id=`quantum --os-tenant-name$tenant --os-username $user --os-passwordpassword  --os-auth-url=http://localhost:5000/v2.0subnet-create $tenant-Net 10.0.0.0/24 | awk '$2~/^id/{print$4}'`

quantum --os-tenant-name $tenant--os-username $user --os-password password --os-auth-url=http://localhost:5000/v2.0 router-create$tenant-R1

quantum --os-tenant-name $tenant--os-username $user --os-password password --os-auth-url=http://localhost:5000/v2.0 router-interface-add$tenant-R1  ${subnet_id}

quantum router-gateway-set $tenant-R1Ext-Net

运行脚本:bash pre_network.sh

 

(3)为了充分测试,需要创建多个租户,使用脚本tenant.sh(见附件)

(每次运行请修改脚本tenantuserusermail变量)

root@controller:~# cattenant.sh

#!/bin/bash

# Create Tenant and User #

tenant=TenantB

user=UserB

usermail=userb@mutil_host.com

role=Member

if keystone tenant-list | grep -q$tenant;then

   echo "Tenant $tenant existed!"

else

   tenant_id=`keystone tenant-create --name $tenant | awk '/id/{print$4}'`

fi

if keystone user-list | grep -q$user;then

   echo "User $user existed!"

else

   keystone user-create --name=$user --pass=password --tenant-id$tenant_id --email=$usermail

fi

keystone user-role-add --tenant$tenant  --user $user --role $role

# Create virtual router and sub-network#

quantum --os-tenant-name $tenant--os-username $user --os-password password --os-auth-url=http://localhost:5000/v2.0 net-create$tenant-Net

subnet_id=`quantum --os-tenant-name$tenant --os-username $user --os-passwordpassword  --os-auth-url=http://localhost:5000/v2.0subnet-create $tenant-Net 10.0.9.0/24| awk '$2~/^id/{print $4}'`

quantum --os-tenant-name $tenant--os-username $user --os-password password --os-auth-url=http://localhost:5000/v2.0 router-create$tenant-R1

quantum --os-tenant-name $tenant--os-username $user --os-password password --os-auth-url=http://localhost:5000/v2.0 router-interface-add$tenant-R1  ${subnet_id}

quantum router-gateway-set $tenant-R1Ext-Net

运行脚本:bash tenant.sh(每次新创建tenant前请修改脚本tenantuserusermail变量)

 

(4)准备资源,创建新的flavor

nova  flavor-createm2.tiny 6 64 0 1

 

查看tenantuser

 



(5)查看namespase,分布在计算节点上





(6) 为租户创建虚拟机

nova --os-tenant-name TenantH--os-username UserH --os-password password--os-auth-url="http://192.168.1.24:5000/v2.0/" list

nova --os-tenant-name TenantH--os-username UserH --os-password password--os-auth-url="http://192.168.1.24:5000/v2.0/" boot --flavor 6--image cirros vm005



(7)通过namespaceping租户的虚拟机

 


(8)ssh进入虚拟机[cubswinJ],并ping其它虚拟机

进入cirros虚拟机的密码是[cubswinJ]

 


(9)TenantH的虚拟机vm005准备floatingip

 


新建的floatingip192.168.2.17

主要命令:

quantum floatingip-list

quantum --os-tenant-name TenantH--os-username UserH --os-password password--os-auth-url="http://192.168.1.24:5000/v2.0/"net-list

quantum --os-tenant-name TenantH--os-username UserH --os-password password--os-auth-url="http://192.168.1.24:5000/v2.0/" floatingip-createExt-net


(10) TenantH的虚拟机vm005[10.0.8.6]绑定floatingip[192.168.2.17]

 


主要命令:

查看vm005的私有ip地址[vm005– 10.0.8.6]

nova --os-tenant-name TenantH--os-username UserH --os-password password--os-auth-url="http://192.168.1.24:5000/v2.0/" list

查看floatingip[192.168.2.17]端口号

quantum floatingip-list

查看fixedip[10.0.8.6]的端口号

quantum --os-tenant-name TenantH--os-username UserH --os-password password--os-auth-url="http://192.168.1.24:5000/v2.0/"port-list

vm005[10.0.8.6]绑定floatingip[192.168.2.17]

quantum --os-tenant-name TenantH--os-username UserH --os-password password--os-auth-url="http://192.168.1.24:5000/v2.0/"floatingip-associate  floatingip-port fixedip-port

 

(11)验证floatingip是否生效,并通过floatingipssh进入虚拟机[cubswinJ]

进入cirros虚拟机的密码是[cubswinJ]

 



进一步测试mutil-host的特性:

 


compute1compute2这个两个计算节点都有DHCPagentL3agentOpenvSwitch agent,可以手动关闭其中一套L3dhch插件(比如[关闭compute1L3和关闭compute2dhcp][关闭compute2L3和关闭compute1dhcp][关闭任意一个compute节点中的任意一个L3dhcp]),只要整个集群中至少有一个upL3dhcp,这样不会影响整个系统的使用,依然可以创建新的租户和虚拟机。这就是mutil-host带来的好处。

 


当重启compute节点后无法使用nova将vm启动,报错:
libvirtError: Unable to add bridgeqbrxxxxxxx port tapxxxxxxx: Invalid argument
vim/usr/share/pyshared/nova/virt/libvirt/vif.py
335
336     def plug_ovs_hybrid(self,instance, vif):
337        """Plug using hybrid strategy
338
339        Create a per-VIF linux bridge, then link thatbridge to the OVS
340        integration bridge via a veth device, setting upthe other end
341        of the veth device just like a normal OVS port. Then boot the
342        VIF on the linux bridge using standard libvirtmechanisms.
343        """
344        super(LibvirtGenericVIFDriver,
345             self).plug(instance, vif)
346
347        network, mapping = vif
348        iface_id =self.get_ovs_interfaceid(mapping)
349        br_name =self.get_br_name(mapping['vif_uuid'])
350        v1_name, v2_name =self.get_veth_pair_names(mapping['vif_uuid'])
351
352        if not linux_net.device_exists(br_name):
353           utils.execute('brctl', 'addbr', br_name, run_as_root=True)
354           utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
355           utils.execute('brctl', 'stp', br_name, 'off',run_as_root=True)
356
357        if not linux_net.device_exists(v2_name):
358           linux_net._create_veth_pair(v1_name, v2_name)
359           utils.execute('ip', 'link', 'set', br_name, 'up',run_as_root=True)
360           LOG.info('info_zhangguoqing')
361           try:
362              v1_tap="tap"+br_name[3:]
363              LOG.info("zhangguoqing br_name,v1_tap:%s %s"%(br_name,v1_tap))
364              try:
365                  utils.execute('ovs-vsctl','del-port', br_name, v1_tap, run_as_root=True)
366                  LOG.info("zhangguoqing execok for :ovs-vsctl del-port %s %s" %(br_name, v1_tap))
367               exceptException,e:
368                  LOG.info("zhangguoiqng execwrong for:ovs-vsctl del-port  %s %s"%(br_name,v1_tap))
369                  pass
370              utils.execute('brctl', 'addif', br_name, v1_name,run_as_root=True)
371              LOG.info("zhangguoqing exec ok for:brctl addif %s %s"%(br_name,v1_name))
372           except Exception,e:
373              LOG.info("zhangguoqing exec wrong for:brctl addif %s %s"%(br_name,v1_name))
374               pass
375           linux_net.create_ovs_vif_port(self.get_bridge_name(network),
376                                     v2_name,iface_id, mapping['mac'],
377                                    instance['uuid'])
378
 
来自:http://blog.sina.com.cn/s/blog_6de3aa8a0101lnar.html


本站仅提供存储服务,所有内容均由用户发布,如发现有害或侵权内容,请点击举报
打开APP,阅读全文并永久保存 查看更多类似文章
猜你喜欢
类似文章
【热】打开小程序,算一算2024你的财运
keystone role-list 报错404
利用Devstack测试OpenStack管理微软Hyper
OpenStack Keystone的基本概念理解
安装openstack on ubuntu12.10
Centos7上部署openstack mitaka配置详解(将疑难点都进行划分)
三节点搭建openstack
更多类似文章 >>
生活服务
热点新闻
分享 收藏 导长图 关注 下载文章
绑定账号成功
后续可登录账号畅享VIP特权!
如果VIP功能使用有故障,
可点击这里联系客服!

联系客服