Friday, 20 September 2019

Openstack HA - Rocky - 4 - Ceph

/etc/hosts
10.1.17.21      osd1
10.1.17.22      osd2
10.1.17.23      osd3
10.1.17.24      mon1
10.1.17.25      admin

sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
sudo rpm --import 'https://download.ceph.com/keys/release.asc'
rpm -Uvh https://download.ceph.com/rpm-nautilus/el7/noarch/ceph-release-1-1.el7.noarch.rpm

 =================================================== Setup Ceph ==============================================================

# admin node
yum install ceph-deploy
# all node
yum install snappy leveldb gdisk python-argparse gperftools-libs
yum install ceph

# mon1

# mon1 - ceph
vi /etc/sudoers.d/cephuser
cephuser ALL = (root) NOPASSWD:ALL
cephuser ALL = (ceph) NOPASSWD:ALL

uuidgen
362118c6-6d4d-452b-84c1-467d6fc7965e
/etc/ceph/ceph.conf
[global]
fsid = 362118c6-6d4d-452b-84c1-467d6fc7965e
mon initial members = mon1
mon host = 10.1.17.24
public network = 10.1.0.0/16
cluster network = 10.1.0.0/16
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3     # Write an object n times.
osd pool default min size = 2 # Allow writing n copies in a degraded state.
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1

# mon1 - mon
sudo ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *';
sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *';
sudo ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r';
sudo ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring;
sudo ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring

monmaptool --create --add mon1 10.1.17.24 --fsid 362118c6-6d4d-452b-84c1-467d6fc7965e /tmp/monmap;
sudo mkdir /var/lib/ceph/mon/ceph-mon1
sudo -u ceph ceph-mon --mkfs -i mon1 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring

sudo systemctl start ceph-mon@mon1
sudo systemctl status ceph-mon@mon1

sudo ceph mon enable-msgr2
sudo systemctl restart ceph-mon@mon1


sudo ceph status

# mon1 - mgr
sudo ceph auth get-or-create mgr.mon1 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-mon1/
sudo ceph auth get mgr.mon1 -o /var/lib/ceph/mgr/ceph-mon1/keyring

systemctl start ceph-mgr@mon1
systemctl status ceph-mgr@mon1
systemctl enable ceph-mgr@mon1

# mon1 - mgr - dashboard
https://docs.ceph.com/docs/nautilus/mgr/dashboard/#enabling
yum install ceph-mgr-dashboard
sudo ceph mgr module enable dashboard
sudo ceph dashboard create-self-signed-cert
sudo openssl req -new -nodes -x509 -subj "/O=IT/CN=ceph-mgr-dashboard" -days 3650 -keyout dashboard.key -out dashboard.crt -extensions v3_ca
sudo ceph config-key set mgr/dashboard/crt -i dashboard.crt;
sudo ceph config-key set mgr/dashboard/key -i dashboard.key
(
sudo ceph config-key set mgr/dashboard/ceph-mon1/crt -i dashboard.crt
sudo ceph config-key set mgr/dashboard/ceph-mon1/key -i dashboard.key
)
sudo ceph mgr module enable dashboard
[cephuser@mon1 ceph]$ sudo ceph mgr services
{
    "dashboard": "https://mon1:8443/"
}
sudo ceph dashboard ac-user-create admin admin123 administrator
sudo ceph mgr module ls

# osd1 - osd
sudo ceph auth get client.bootstrap-osd -o /var/lib/ceph/bootstrap-osd/ceph.keyring;
sudo ceph-volume lvm create --data /dev/sdb;
sudo ceph-volume lvm list
sudo ceph osd tree
sudo systemctl start ceph-osd@0
sudo systemctl status ceph-osd@0
sudo systemctl enable ceph-osd@0

sudo ceph-volume lvm create --data /dev/sdc;
sudo ceph-volume lvm list;
sudo ceph osd tree;
sudo systemctl start ceph-osd@3;
sudo systemctl status ceph-osd@3;
sudo systemctl enable ceph-osd@3

# osd2 - osd
sudo ceph auth get client.bootstrap-osd -o /var/lib/ceph/bootstrap-osd/ceph.keyring;
sudo ceph-volume lvm create --data /dev/sdb;
sudo ceph-volume lvm list
sudo ceph osd tree
sudo systemctl start ceph-osd@1;
sudo systemctl status ceph-osd@1;
sudo systemctl enable ceph-osd@1

sudo ceph-volume lvm create --data /dev/sdc;
sudo ceph-volume lvm list;
sudo ceph osd tree;
sudo systemctl start ceph-osd@4;
sudo systemctl status ceph-osd@4;
sudo systemctl enable ceph-osd@4
# osd3 - osd
sudo ceph auth get client.bootstrap-osd -o /var/lib/ceph/bootstrap-osd/ceph.keyring;
sudo ceph-volume lvm create --data /dev/sdb;
sudo ceph-volume lvm list
sudo ceph osd tree
sudo systemctl start ceph-osd@2;
sudo systemctl status ceph-osd@2;
sudo systemctl enable ceph-osd@2

sudo ceph-volume lvm create --data /dev/sdc;
sudo ceph-volume lvm list;
sudo ceph osd tree;
sudo systemctl start ceph-osd@5;
sudo systemctl status ceph-osd@5;
sudo systemctl enable ceph-osd@5

=================================================== Intergrate Ceph ==============================================================
https://gist.github.com/vanduc95/97c4110338e0319a11d4b8ab36c2134a
https://github.com/hocchudong/Ghichep-Storage/blob/master/ChienND/Ceph/Configure%20Block%20Ceph%20with%20OpenStack.md

sudo ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rwx pool=images'
sudo ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
sudo ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'

[cephuser@mon1 ceph]$ sudo ceph auth list
installed auth entries:

osd.0
        key: AQCSGEFdQ4hlOxAA3LKSDPGi8uuwixFFcjKcIA==
        caps: [mgr] allow profile osd
        caps: [mon] allow profile osd
        caps: [osd] allow *
osd.1
        key: AQAyHUFduEncAhAAy2mPsbLIi2zlQRtd1DeRvQ==
        caps: [mgr] allow profile osd
        caps: [mon] allow profile osd
        caps: [osd] allow *
osd.2
        key: AQCdHUFdNvaDEhAAe65RT2A6hGtbn1LFDUBnKg==
        caps: [mgr] allow profile osd
        caps: [mon] allow profile osd
        caps: [osd] allow *
osd.3
        key: AQBAQ0FdNXSfCBAAGPEzpghsROGvHNnuEO4GyA==
        caps: [mgr] allow profile osd
        caps: [mon] allow profile osd
        caps: [osd] allow *
osd.4
        key: AQCFQ0FdGQ6hFBAAm0oPpLDafTSBLAHHROGAGg==
        caps: [mgr] allow profile osd
        caps: [mon] allow profile osd
        caps: [osd] allow *
osd.5
        key: AQCyQ0FdLS7EOBAAvrtmrV0qwPg+YxQeuwlUug==
        caps: [mgr] allow profile osd
        caps: [mon] allow profile osd
        caps: [osd] allow *
client.admin
        key: AQD1GUBdoCGoMxAAupOhVEj7pOhWDIQxNcJ+2g==
        caps: [mds] allow *
        caps: [mgr] allow *
        caps: [mon] allow *
        caps: [osd] allow *
client.bootstrap-mds
        key: AQAE/0BdcQMgFxAARY1TaeQ96r89wqy4cZQJpA==
        caps: [mon] allow profile bootstrap-mds
client.bootstrap-mgr
        key: AQAE/0BdDxcgFxAADJf48NLxnPCNBm8qNYndgQ==
        caps: [mon] allow profile bootstrap-mgr
client.bootstrap-osd
        key: AQD1GUBdZd3VORAAD4A88M7DT7W4DAkigIqt9w==
        caps: [mgr] allow r
        caps: [mon] profile bootstrap-osd
client.bootstrap-rbd
        key: AQAE/0Bdbj8gFxAAgfNWRpRXg5wYm7ziSxEKWw==
        caps: [mon] allow profile bootstrap-rbd
client.bootstrap-rbd-mirror
        key: AQAE/0Bdw1IgFxAAwRAtoUr/vVutL2WjH6hMLw==
        caps: [mon] allow profile bootstrap-rbd-mirror
client.bootstrap-rgw
        key: AQAE/0BdVGkgFxAAhJguR6hzPJGw7b/TtjezAg==
        caps: [mon] allow profile bootstrap-rgw
client.cinder
        key: AQDDT0FdBH4qOBAAyrgmDM9GQEXFaxoBQVy1ug==
        caps: [mon] allow r
        caps: [osd] allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rwx pool=images
client.cinder-backup
        key: AQAgUkFdkjKKLhAAJUvbI4PTmBYYzGcq1sajvw==
        caps: [mon] allow r
        caps: [osd] allow class-read object_prefix rbd_children, allow rwx pool=backups
client.glance
        key: AQAOUkFdMc0tGxAAWZ/1wIOS9VesnyrM0w/rqw==
        caps: [mon] allow r
        caps: [osd] allow class-read object_prefix rbd_children, allow rwx pool=images
mgr.admin
        key: AQCBHkFdcZ7lIRAAUcdwctHdgNZVjpv1aNo3PA==
        caps: [mds] allow *
        caps: [mon] allow profile mgr
        caps: [osd] allow *
mgr.mon1
        key: AQBUH0FdsIYwLhAA4QAhzwvz4feG1mzx+lwdGw==
        caps: [mds] allow *
        caps: [mon] allow profile mgr
        caps: [osd] allow *
       
ceph osd pool create volumes 128
ceph osd pool create images 128;
ceph osd pool create backups 128;
ceph osd pool create vms 128
       
# computes & controls
yum-config-manager --disable centos-ceph-luminous
sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
sudo rpm --import 'https://download.ceph.com/keys/release.asc'
rpm -Uvh https://download.ceph.com/rpm-nautilus/el7/noarch/ceph-release-1-1.el7.noarch.rpm

yum install -y python-rbd (Cho glance-api)
yum install -y ceph-common (Cho nova-compute, cinder-backup, cinder-volume)

scp /etc/ceph/ceph* control1:/etc/ceph
scp /etc/ceph/ceph* control2:/etc/ceph
scp /etc/ceph/ceph* control3:/etc/ceph
scp /etc/ceph/ceph* compute2:/etc/ceph
scp /etc/ceph/ceph* compute3:/etc/ceph


ceph auth get-or-create client.glance | tee /etc/ceph/ceph.client.glance.keyring;
chown glance:glance /etc/ceph/ceph.client.glance.keyring;
ceph auth get-or-create client.cinder | tee /etc/ceph/ceph.client.cinder.keyring;
chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring;
ceph auth get-or-create client.cinder-backup | tee /etc/ceph/ceph.client.cinder-backup.keyring;
chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring

# control - cinder - rbd: volumes

[root@control1 ~(keystone)]# uuidgen
8e7fb7d7-e7ae-4401-a1f9-deeaf75ab4a4

vi /etc/cinder/cinder.conf
[DEFAULT]
...
enabled_backends=ceph
glance_api_version = 2
[ceph]
volume_backend_name=ceph
volume_driver=cinder.volume.drivers.rbd.RBDDriver
rbd_pool=volumes
rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot=false
rbd_max_clone_depth=5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user=cinder
rbd_secret_uuid=8e7fb7d7-e7ae-4401-a1f9-deeaf75ab4a4

systemctl restart openstack-nova-api.service;
systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service   

# compute - nova - rbd

# mon1
ceph auth get-or-create client.cinder | ssh root@compute2 sudo tee /etc/ceph/ceph.client.cinder.keyring
ceph auth get-or-create client.cinder | ssh root@compute3 sudo tee /etc/ceph/ceph.client.cinder.keyring
[client.cinder]
        key = AQDDT0FdBH4qOBAAyrgmDM9GQEXFaxoBQVy1ug==
ceph auth get-key client.cinder | ssh root@compute2 tee /root/client.cinder.key
ceph auth get-key client.cinder | ssh root@compute3 tee /root/client.cinder.key
AQDDT0FdBH4qOBAAyrgmDM9GQEXFaxoBQVy1ug==

# computes - rbd: vms
vi /etc/nova/nova.conf

[libvirt]
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
rbd_secret_uuid = 8e7fb7d7-e7ae-4401-a1f9-deeaf75ab4a4
disk_cachemodes="network=writeback"
hw_disk_discard = unmap
live_migration_flag= "VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"


cat > secret.xml <<EOF
<secret ephemeral='no' private='no'>
  <uuid>8e7fb7d7-e7ae-4401-a1f9-deeaf75ab4a4</uuid>
  <usage type='ceph'>
    <name>client.cinder secret</name>
  </usage>
</secret>
EOF

virsh secret-define --file secret.xml
Secret 8e7fb7d7-e7ae-4401-a1f9-deeaf75ab4a4 created

sudo virsh secret-set-value --secret 8e7fb7d7-e7ae-4401-a1f9-deeaf75ab4a4 --base64 $(cat /root/client.cinder.key)
systemctl restart libvirtd.service openstack-nova-compute.service
openstack volume type create --property volume_backend_name='ceph'  volume_from_ceph

# check volumes
[root@mon1 ceph]# rbd -p vms ls
RBD images are thin-provisionned
04117f32-9c15-44f2-bd02-227e18f2e4e5_disk
[root@mon1 ceph]# rbd -p volumes ls
volume-288f892c-fcb0-46e4-91ef-3c8be9dd04a1
volume-697ff017-c39b-401e-ae46-9c65ba023b6d
volume-81816a8e-4892-4cfd-91d9-aa6663a17527

rbd diff volumes/volume-288f892c-fcb0-46e4-91ef-3c8be9dd04a1 | awk '{ SUM += $2 } END { print SUM/1024/1024 " MB" }'
1003.36 MB

rbd diff volumes/volume-288f892c-fcb0-46e4-91ef-3c8be9dd04a1 | awk '{ SUM += $2 } END { print SUM/1024/1024 " MB" }'
rbd diff volumes/04117f32-9c15-44f2-bd02-227e18f2e4e5_disk | awk '{ SUM += $2 } END { print SUM/1024/1024 " MB" }'



Openstack HA - Rocky - 3 - VM


=================================================== MariaDB  ==============================================================

MariaDB [cinder]>
delete from `volumes` where display_name = "ceph_inst2_vol1";
delete from `instances` where uuid = "9f6eafed-09d0-43e1-8d78-342ae1585b4c";
delete from block_device_mapping where instance_uuid='9f6eafed-09d0-43e1-8d78-342ae1585b4c'
=================================================== VM  ==============================================================

openstack image create "cirros" --file /root/cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --public
openstack image create "cirros_2" --file /root/cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --public
openstack image create "win2k16" --file /shared/winsrv-2016.qcow2 --disk-format qcow2 --container-format bare --public;
openstack image create "win2k19" --file /shared/winsrv-2019.qcow2 --disk-format qcow2 --container-format bare --public

openstack flavor create --id 1 --ram 1024 --disk 1 --vcpu 1 tiny
openstack flavor create --id 2 --ram 4096 --disk 10 --vcpu 2 small
openstack flavor create --id 3 --ram 4096 --disk 30 --vcpu 2 medium
openstack flavor create --id 4 --ram 4096 --disk 50 --vcpu 2 medium2

ssh-keygen -q -N ""
openstack keypair create --public-key ~/.ssh/id_rsa.pub key1

# network

# create network vlan111
openstack network create --share --provider-physical-network physnet1 --provider-network-type vlan --provider-segment=111 pro_vlan111
openstack subnet create --subnet-range 10.1.0.0/16 --gateway 10.1.0.1 --network  pro_vlan111 --allocation-pool start=10.1.17.80,end=10.1.17.90 pro_vlan111_subnet1

# create network vlan126
openstack network create --share --provider-physical-network physnet1 --provider-network-type vlan --provider-segment=126 pro_vlan126
openstack subnet create --subnet-range 192.168.126.0/24 --gateway 192.168.126.1 --network pro_vlan126 --allocation-pool start=192.168.126.80,end=192.168.126.90 pro_vlan126_subnet1

# review network on bridge
[root@compute3 ns-metadata-proxy]#  ovs-ofctl dump-flows br-em2 | grep mod_vlan_vid
 cookie=0x933b73fd15900d31, duration=278563.998s, table=2, n_packets=33178, n_bytes=2821333, idle_age=94, hard_age=65534, priority=4,in_port=2,dl_vlan=2 actions=mod_vlan_vid:111,NORMAL
 cookie=0x933b73fd15900d31, duration=94978.599s, table=2, n_packets=1066, n_bytes=97007, idle_age=1690, hard_age=65534, priority=4,in_port=2,dl_vlan=7 actions=mod_vlan_vid:126,NORMAL

 [root@compute3 ns-metadata-proxy]# ovs-ofctl dump-flows br-int | grep mod_vlan_vid
 cookie=0x5983d9e34dc90c71, duration=278757.400s, table=0, n_packets=17731335, n_bytes=1431007426, idle_age=0, hard_age=65534, priority=3,in_port=1,dl_vlan=111 actions=mod_vlan_vid:2,resubmit(,60)
 cookie=0x5983d9e34dc90c71, duration=95172s, table=0, n_packets=1134, n_bytes=217793, idle_age=2232, hard_age=65534, priority=3,in_port=1,dl_vlan=126 actions=mod_vlan_vid:7,resubmit(,60)
 ===== router

# create router1 add to pro_vlan111
openstack router create router1
openstack network set --external pro_vlan111
openstack router set router1 --external-gateway pro_vlan111

openstack router create router2;
openstack network set --external pro_vlan111;
openstack router set router2 --external-gateway pro_vlan111

openstack router create router3
openstack network set --external pro_vlan111;
openstack router set router3 --external-gateway pro_vlan111



=========== no dvr =============
# create subnet int_net1 add to router1
openstack network create --provider-network-type vxlan int_net1
openstack subnet create int_net1_sub1 --network int_net1 --subnet-range 192.168.1.0/24 --gateway 192.168.1.1
openstack router add subnet router1 int_net1_sub1

# create subnet int_net2 add to router1
openstack network create --provider-network-type vxlan int_net2
openstack subnet create int_net2_sub1 --network int_net2 --subnet-range 192.168.2.0/24 --gateway 192.168.2.1
openstack router add subnet router1 int_net2_sub1


=========== dvr =============
# create subnet int_net4 add to router1
openstack network create --provider-network-type vxlan int_net4;
openstack subnet create int_net4_sub --network int_net4 --subnet-range 192.168.104.0/24 --gateway 192.168.104.1 --dns-nameserver 192.168.104.1;
openstack router add subnet router2 int_net4_sub

# create subnet int_net5 add to router1
openstack network create --provider-network-type vxlan int_net5;
openstack subnet create int_net5_sub --network int_net5 --subnet-range 192.168.105.0/24 --gateway 192.168.105.1 --dns-nameserver 192.168.105.1;
openstack router add subnet router2 int_net5_sub

==========

# Create VM

# vlan 111 net0
openstack server create --flavor 1 --image cirros --nic net-id=aa3f480d-f4c9-4a41-aedd-30ae275ef371 --key-name key1 net0_inst1
openstack server add security group net0_inst1 sg_linux
openstack server create --flavor 3 --image win2k16 --nic net-id=aa3f480d-f4c9-4a41-aedd-30ae275ef371 --key-name key1 inst7
openstack server create --flavor 3 --image inst8_vm1 --nic net-id=aa3f480d-f4c9-4a41-aedd-30ae275ef371 --key-name key1 inst2
openstack server create --flavor 3 --image centos --nic net-id=aa3f480d-f4c9-4a41-aedd-30ae275ef371 --key-name key1 net0_centos_1
openstack server add security group net0_centos_1 sg_linux;

# vlan 126 net0
openstack server create --flavor 3 --image centos --nic net-id=4af496b7-dd33-418d-b9ec-e2b7341dd0d9 --key-name key1 vlan126-centos-1
openstack server add security group vlan126-centos-1 sg_linux;

# vxlan int_net1 (192.168.1.0/24)
openstack server create --flavor 1 --image cirros --nic net-id=833d4b29-e87b-4f95-b424-800e98b45115 --key-name key1 net1_inst1
openstack server add security group net1_inst1 sg_linux

openstack server create --flavor 3 --image web_net1_inst --nic net-id=833d4b29-e87b-4f95-b424-800e98b45115 --key-name key1 net1-centos-1
openstack server create --flavor 3 --image web_net1_inst --nic net-id=833d4b29-e87b-4f95-b424-800e98b45115 --key-name key1 net1-centos-2
openstack server add security group net1-centos-1 sg_linux
openstack server add security group net1-centos-2 sg_linux
# port in int_net2_sub1
openstack port create int_net2_sub1_p1 --network b79e931e-889e-4ace-a3e4-a5dbcb2958e1
openstack port create int_net2_sub1_p2 --network b79e931e-889e-4ace-a3e4-a5dbcb2958e1

# vxlan int_net2
openstack server create --flavor 1 --image cirros --nic net-id=b99ed8b1-7c68-4516-a3ed-def5465f63e0 --key-name key1 inst4

# vxlan int_net3
openstack server create --flavor 1 --image cirros --nic net-id=06ffa670-d97a-4be3-a324-fc4520d6075e --key-name key1 inst5

ssh -i /root/.ssh/id_rsa centos@10.1.17.106
# vxlan int_net4
# net4_inst1 192.168.104.6 compute2
# net4_inst2 192.168.104.17 compute3

openstack server create --flavor 1 --image cirros --nic net-id=50a78bea-b44b-493d-ab4d-2913a5c30067 --key-name key1 --availability-zone nova:compute2:compute2 net4_inst1;
openstack server create --flavor 1 --image cirros --nic net-id=50a78bea-b44b-493d-ab4d-2913a5c30067 --key-name key1 --availability-zone nova:compute3:compute3 net4_inst2
openstack server add security group net4_inst1 sg_linux;
openstack server add security group net4_inst2 sg_linux

ip netns exec qrouter-5a29c08d-c051-40cf-87e3-89c3a6d48163 tcpdump -nei qr-15ba535b-e1

# vxlan int_net5
# net5_inst1 192.168.105.16 compute2
# net5_inst2 192.168.105.11 compute3
openstack server create --flavor 1 --image cirros --nic net-id=055fac7d-e09f-49b4-8f46-a2cb94508f49 --key-name key1 --availability-zone nova:compute2:compute2 net5_inst1;
openstack server create --flavor 1 --image cirros --nic net-id=055fac7d-e09f-49b4-8f46-a2cb94508f49 --key-name key1 --availability-zone nova:compute3:compute3 net5_inst2


ssh -i /root/.ssh/id_rsa centos@10.1.17.106

openstack server delete inst4;
openstack server delete inst5;
openstack server delete inst6

# ceph
openstack volume create --type volume_from_ceph --size 10 ceph_inst1
openstack server create --flavor 3 --image win2k16 --block-device source=697ff017-c39b-401e-ae46-9c65ba023b6d --nic net-id=aa3f480d-f4c9-4a41-aedd-30ae275ef371 --key-name key1 ceph_inst1
openstack server add security group ceph_inst1 group2

openstack volume create --type volume_from_ceph --size 5 ceph-share-2
# Linux security group 1 ( Linux )
openstack security group rule create group1 --protocol tcp --dst-port 22:22 --remote-ip 0.0.0.0/0
openstack security group rule create group1 --protocol icmp --remote-ip 0.0.0.0/0

# Windows security group 2 ( Windows )
openstack security group rule create group2 --protocol udp --dst-port 3389:3389 --remote-ip 0.0.0.0/0
openstack security group rule create group2 --protocol tcp --dst-port 3389:3389 --remote-ip 0.0.0.0/0
openstack security group rule create group2 --protocol icmp --remote-ip 0.0.0.0/0

# Linux security group  ( Linux )
openstack security group create group_web
openstack security group rule create group_web --protocol tcp --dst-port 80:80 --remote-ip 0.0.0.0/0
openstack security group rule create group_web --protocol tcp --dst-port 443:443 --remote-ip 0.0.0.0/0
# Web Server
openstack server add security group inst_net1_inst1 group1
openstack server add security group net1_inst2 group_web

openstack server add security group inst5 group1
openstack server add security group inst8 group2



[root@control1 ~(keystone)]# openstack server list
+--------------------------------------+--------+--------+-------------------------+--------+--------+
| ID                                   | Name   | Status | Networks                | Image  | Flavor |
+--------------------------------------+--------+--------+-------------------------+--------+--------+
| 64f90883-0ac0-4ebd-b099-9a2969d84939 | inst5  | ACTIVE | int_net3=192.168.103.4  | cirros | tiny   |

[root@compute2 ~]#  ip netns exec qdhcp-06ffa670-d97a-4be3-a324-fc4520d6075e ping 192.168.103.4
PING 192.168.103.4 (192.168.103.4) 56(84) bytes of data.
64 bytes from 192.168.103.4: icmp_seq=1 ttl=64 time=0.578 ms

ip netns exec qdhcp-06ffa670-d97a-4be3-a324-fc4520d6075e ssh cirros@192.168.103.4

================= KVM to OSP
# vm1 1 * hdd

scp vm1.img control1:/shared
openstack image create "kvm_rh6" --file /shared/vm1.img --disk-format qcow2 --container-format bare --public
openstack server create --flavor 3 --image kvm_rh76 --nic net-id=aa3f480d-f4c9-4a41-aedd-30ae275ef371,v4-fixed-ip=10.1.17.201 --key-name key1 net0-kvm-rh67-2
openstack server add security group net0-kvm-rh67-2 sg_linux;
# vm2 2 * hdd
openstack image create "vm2" --file /shared/vm2.img --disk-format qcow2 --container-format bare --public
openstack image create "vm2-hdd2" --file /shared/vm2-hdd2.qcow2 --disk-format qcow2 --container-format bare --public
openstack server create --flavor 3 --image vm2 --nic net-id=aa3f480d-f4c9-4a41-aedd-30ae275ef371,v4-fixed-ip=10.1.17.201 --key-name key1 net0-vm2
openstack server add volume net0-vm2 vm2-hdd2
openstack server add security group net0-vm2 sg_linux;


Openstack HA - Rocky - 2 - OSP Services

rsync -avP `cat /shared/rsync_include.txt` /shared/osp_config/`hostname`/

rsync_include.txt
/etc/neutron
/etc/nova
/etc/glance
/etc/keystone
/etc/cinder

tar -cf /shared/osp_config.tar /shared/osp_config

 =================================================== Glance  ==============================================================

openstack user create --domain default --project service --password servicepassword glance
openstack role add --project service --user glance admin
openstack service create --name glance --description "OpenStack Image service" image

openstack endpoint create --region RegionOne image public http://vip:9292;
openstack endpoint create --region RegionOne image admin http://vip:9292;
openstack endpoint create --region RegionOne image internal http://vip:9292

create database glance;
grant all privileges on glance.* to glance@'localhost' identified by 'password';
grant all privileges on glance.* to glance@'%' identified by 'password';
flush privileges;

yum --enablerepo=centos-openstack-rocky,epel -y install openstack-glance


vi /etc/glance/glance-api.conf
mv /etc/glance/glance-registry.conf /etc/glance/glance-registry.conf.org
vi /etc/glance/glance-registry.conf
chmod 640 /etc/glance/glance-api.conf /etc/glance/glance-registry.conf;
chown root:glance /etc/glance/glance-api.conf /etc/glance/glance-registry.conf;
vi /etc/glance/glance-api.conf
su -s /bin/bash glance -c "glance-manage db_sync";
systemctl start openstack-glance-api openstack-glance-registry;
systemctl enable openstack-glance-api openstack-glance-registry

== network node NFS share

yum install nfs-utils
systemctl enable rpcbind
systemctl enable nfs-server
systemctl enable nfs-lock
systemctl enable nfs-idmap
systemctl start rpcbind
systemctl start nfs-server
systemctl start nfs-lock
systemctl start nfs-idmap
mkdir /home/nfsshare
chmod -R 755 /home/nfsshare
chown nfsnobody:nfsnobody  /home/nfsshare
vi /etc/exports
    /home/nfsshare            *(rw,sync,no_root_squash,no_all_squash)
systemctl restart nfs-server
showmount -e

== control1 -> 3

echo "network2:/home/nfsshare             /shared   nfs defaults 0 0" >> /etc/fstab
mkdir /shared
mount /shared


 =================================================== Nova  ==============================================================

openstack endpoint create --region RegionOne compute public http://vip:8774/v2.1/%\(tenant_id\)s
openstack endpoint create --region RegionOne compute internal http://vip:8774/v2.1/%\(tenant_id\)s
openstack endpoint create --region RegionOne compute admin http://vip:8774/v2.1/%\(tenant_id\)s
openstack endpoint create --region RegionOne placement public http://vip:8778;
openstack endpoint create --region RegionOne placement internal http://vip:8778;
openstack endpoint create --region RegionOne placement admin http://vip:8778

create database nova;
grant all privileges on nova.* to nova@'localhost' identified by 'password';
grant all privileges on nova.* to nova@'%' identified by 'password';
create database nova_api;
grant all privileges on nova_api.* to nova@'localhost' identified by 'password';
grant all privileges on nova_api.* to nova@'%' identified by 'password';
create database nova_placement;
grant all privileges on nova_placement.* to nova@'localhost' identified by 'password';
grant all privileges on nova_placement.* to nova@'%' identified by 'password';
create database nova_cell0;
grant all privileges on nova_cell0.* to nova@'localhost' identified by 'password';
grant all privileges on nova_cell0.* to nova@'%' identified by 'password';
flush privileges;
SHOW GLOBAL STATUS LIKE 'wsrep_last%';


yum --enablerepo=centos-openstack-rocky,epel -y install openstack-nova
mv /etc/nova/nova.conf /etc/nova/nova.conf.org
vi /etc/nova/nova.conf
chmod 640 /etc/nova/nova.conf;
chgrp nova /etc/nova/nova.conf




vi /etc/httpd/conf.d/00-nova-placement-api.conf
# add near line 15

  <Directory /usr/bin>
    Require all granted
  </Directory>

</VirtualHost>


su -s /bin/bash nova -c "nova-manage api_db sync"
su -s /bin/bash nova -c "nova-manage cell_v2 map_cell0";
su -s /bin/bash nova -c "nova-manage db sync";
su -s /bin/bash nova -c "nova-manage cell_v2 create_cell --name cell1";
systemctl restart httpd

chown nova. /var/log/nova/nova-placement-api.log
for service in api consoleauth conductor scheduler novncproxy; do
systemctl start openstack-nova-$service
systemctl enable openstack-nova-$service
done
# show status
for service in api consoleauth conductor scheduler novncproxy; do
systemctl status openstack-nova-$service
done

# restart
for service in api consoleauth conductor scheduler novncproxy; do
systemctl restart openstack-nova-$service
done

for service in api consoleauth conductor scheduler novncproxy; do
systemctl stop openstack-nova-$service
done

for service in api consoleauth conductor scheduler novncproxy; do
systemctl start openstack-nova-$service
done


[root@dlp ~(keystone)]# openstack compute service list




DELETE FROM compute_node_stats WHERE compute_node_id='2';


openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_host vip;
openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_userid openstack;
openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_password password

=================================================== Neutron  ==============================================================

openstack user create --domain default --project service --password servicepassword neutron;
openstack role add --project service --user neutron admin;
openstack service create --name neutron --description "OpenStack Networking service" network;
openstack endpoint create --region RegionOne network public http://vip:9696;
openstack endpoint create --region RegionOne network internal http://vip:9696;
openstack endpoint create --region RegionOne network admin http://vip:9696

mysql -u root -p
create database neutron_ml2;
grant all privileges on neutron_ml2.* to neutron@'localhost' identified by 'password';
grant all privileges on neutron_ml2.* to neutron@'%' identified by 'password';
flush privileges;
exit


------------+---------------------------+---------------------------+------------
            |                           |                           |
        eno1|vip              eno1|10.1.17.15              eno1|10.1.17.17
+-----------+-----------+   +-----------+-----------+   +-----------+-----------+
|    [ Control Node ]   |   |    [ Network Node ]   |   |    [ Compute Node ]   |
|                       |   |                       |   |                       |
|  MariaDB    RabbitMQ  |   |      Open vSwitch     |   |        Libvirt        |
|  Memcached  httpd     |   |        L2 Agent       |   |     Nova Compute      |
|  Keystone   Glance    |   |        L3 Agent       |   |      Open vSwitch     |
|  Nova API             |   |                       |   |        L2 Agent       |
|  Neutron Server       |   |                       |   |    Metadata Agent     |
|                       |   |                       |   |      DHCP Agent       |
+-----------------------+   +-----------------------+   +-----------------------+

 yum --enablerepo=centos-openstack-rocky,epel -y install openstack-neutron openstack-neutron-ml2

========================== server ==========================

mv /etc/neutron/neutron.conf /etc/neutron/neutron.conf.org
vi /etc/neutron/neutron.conf
chmod 640 /etc/neutron/neutron.conf
chgrp neutron /etc/neutron/neutron.conf

mv /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.org
vi /etc/neutron/plugins/ml2/ml2_conf.ini
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini


vi /etc/nova/nova.conf 
#(17)
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/bash neutron -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head"
systemctl start neutron-server neutron-metadata-agent
systemctl enable neutron-server neutron-metadata-agent
systemctl restart openstack-nova-api

========================== compute2  ==========================

yum --enablerepo=centos-openstack-rocky,epel -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch
mv /etc/neutron/neutron.conf /etc/neutron/neutron.conf.org
mv /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.org
mv /etc/neutron/plugins/ml2/openvswitch_agent.ini /etc/neutron/plugins/ml2/openvswitch_agent.ini.org
mv /etc/neutron/dhcp_agent.ini /etc/neutron/dhcp_agent.ini.org
mv /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.org

vi /etc/neutron/neutron.conf
chmod 640 /etc/neutron/neutron.conf
chgrp neutron /etc/neutron/neutron.conf

vi /etc/neutron/plugins/ml2/ml2_conf.ini
vi /etc/neutron/plugins/ml2/openvswitch_agent.ini
vi /etc/nova/nova.conf
#(17)
#(19)

ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
systemctl start openvswitch
systemctl enable openvswitch
ovs-vsctl add-br br-int
systemctl restart openstack-nova-compute
systemctl start neutron-openvswitch-agent
systemctl enable neutron-openvswitch-agent
cat /etc/neutron/dhcp_agent.ini.org | grep -v "#" | grep . >> /etc/neutron/dhcp_agent.ini
cat  /etc/neutron/metadata_agent.ini.org | grep -v "#" | grep . >> /etc/neutron/metadata_agent.ini


 for service in dhcp-agent metadata-agent openvswitch-agent; do
systemctl start neutron-$service
systemctl enable neutron-$service
done

for service in dhcp-agent metadata-agent openvswitch-agent; do
systemctl restart neutron-$service
done

 for service in dhcp-agent metadata-agent openvswitch-agent; do
systemctl status neutron-$service
done

========================== network2  ==========================

yum --enablerepo=centos-openstack-rocky,epel -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch

mv /etc/neutron/neutron.conf /etc/neutron/neutron.conf.org
mv /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.org
mv /etc/neutron/plugins/ml2/openvswitch_agent.ini /etc/neutron/plugins/ml2/openvswitch_agent.ini.org
mv /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.org
cat /etc/neutron/l3_agent.ini.org | grep -v "#" | grep . >>  /etc/neutron/l3_agent.ini

vi /etc/neutron/neutron.conf
vi /etc/neutron/plugins/ml2/ml2_conf.ini
vi /etc/neutron/plugins/ml2/openvswitch_agent.ini

ovs-vsctl add-br br-eno2
ovs-vsctl add-port br-eno2 eno2


for service in l3-agent openvswitch-agent; do
systemctl start neutron-$service
systemctl enable neutron-$service
done

for service in l3-agent openvswitch-agent; do
systemctl restart neutron-$service
done

========================== network3  ==========================

yum --enablerepo=centos-openstack-rocky,epel -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch

mv /etc/neutron/neutron.conf /etc/neutron/neutron.conf.org
mv /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.org
mv /etc/neutron/plugins/ml2/openvswitch_agent.ini /etc/neutron/plugins/ml2/openvswitch_agent.ini.org
mv /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.org
cat /etc/neutron/l3_agent.ini.org | grep -v "#" | grep . >>  /etc/neutron/l3_agent.ini

vi /etc/neutron/neutron.conf
vi /etc/neutron/l3_agent.ini
vi /etc/neutron/plugins/ml2/ml2_conf.ini
vi /etc/neutron/plugins/ml2/openvswitch_agent.ini

ovs-vsctl add-br br-eno2
ovs-vsctl add-port br-eno2 eno2


for service in l3-agent openvswitch-agent; do
systemctl start neutron-$service
systemctl enable neutron-$service
done

for service in l3-agent openvswitch-agent; do
systemctl restart neutron-$service
done
=================================================== Metadata  ==============================================================

ip netns exec qdhcp-aa3f480d-f4c9-4a41-aedd-30ae275ef371 tcpdump -i tap84cb0ed2-80
ip netns exec qdhcp-aa3f480d-f4c9-4a41-aedd-30ae275ef371 netstat -anp

ip netns exec qrouter-2b661bf4-4e5c-4239-8918-397e260ed85d iptables -L -t nat | grep 169
REDIRECT   tcp  --  anywhere             169.254.169.254      tcp dpt:http redir ports 9697

ip netns exec qrouter-2b661bf4-4e5c-4239-8918-397e260ed85d netstat -anp
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name   
tcp        0      0 0.0.0.0:9697            0.0.0.0:*               LISTEN      30888/haproxy      
Active UNIX domain sockets (servers and established)
Proto RefCnt Flags       Type       State         I-Node   PID/Program name     Path
unix  2      [ ]         DGRAM                    659086   30888/haproxy       


[root@network2 ~]# ps -f --pid 30888 | fold -s -w 90
UID        PID  PPID  C STIME TTY          TIME CMD
neutron  30888     1  0 Jun25 ?        00:00:01 haproxy -f
/var/lib/neutron/ns-metadata-proxy/2b661bf4-4e5c-4239-8918-397e260ed85d.conf


[root@network2 ~]# cat /var/lib/neutron/ns-metadata-proxy/2b661bf4-4e5c-4239-8918-397e260ed85d.conf

global
    log         /dev/log local0 info
    log-tag     haproxy-metadata-proxy-2b661bf4-4e5c-4239-8918-397e260ed85d
    user        neutron
    group       neutron
    maxconn     1024
    pidfile     /var/lib/neutron/external/pids/2b661bf4-4e5c-4239-8918-397e260ed85d.pid
    daemon

defaults
    log global
    mode http
    option httplog
    option dontlognull
    option http-server-close
    option forwardfor
    retries                 3
    timeout http-request    30s
    timeout connect         30s
    timeout client          32s
    timeout server          32s
    timeout http-keep-alive 30s

listen listener
    bind 0.0.0.0:9697
    server metadata /var/lib/neutron/metadata_proxy
    http-request add-header X-Neutron-Router-ID 2b661bf4-4e5c-4239-8918-397e260ed85d
   



 curl http://vip:8775


 cat /var/lib/neutron/dhcp/b99ed8b1-7c68-4516-a3ed-def5465f63e0/opts
tag:tag0,option:dns-server,10.1.17.15
tag:tag0,option:classless-static-route,169.254.169.254/32,192.168.102.1,0.0.0.0/0,192.168.102.1
tag:tag0,249,169.254.169.254/32,192.168.102.1,0.0.0.0/0,192.168.102.1
tag:tag0,option:router,192.168.102.1[root@compute2 ~]#

=
=
=
=
=
=
# ban dau VM duoc cap IP boi dhcp co cau hinh nhu sau:
tag:tag0,option:classless-static-route,
169.254.169.254/32,192.168.126.80,
0.0.0.0/0,192.168.126.1
tag:tag0,249,169.254.169.254/32,192.168.126.80,0.0.0.0/0,192.168.126.1
tag:tag0,option:router,192.168.126.1

# route: => vao metadata 169.254.169.254  thi di qua IP 192.168.126.80

[centos@vlan126-centos-1 ~]$ netstat -rn
Kernel IP routing table
Destination     Gateway         Genmask         Flags   MSS Window  irtt Iface
0.0.0.0         192.168.126.1   0.0.0.0         UG        0 0          0 eth0
169.254.169.254 192.168.126.80  255.255.255.255 UGH       0 0          0 eth0
192.168.126.0   0.0.0.0         255.255.255.0   U         0 0          0 eth0


# proxy IP: 192.168.126.80
[root@compute3 ns-metadata-proxy]# cat 61aa4820-74b2-44dc-9228-176f23ad471d.conf

global
    log         /dev/log local0 info
    log-tag     haproxy-metadata-proxy-61aa4820-74b2-44dc-9228-176f23ad471d
    user        neutron
    group       neutron
    maxconn     1024
    pidfile     /var/lib/neutron/external/pids/61aa4820-74b2-44dc-9228-176f23ad471d.pid
    daemon

defaults
    log global
    mode http
    option httplog
    option dontlognull
    option http-server-close
    option forwardfor
    retries                 3
    timeout http-request    30s
    timeout connect         30s
    timeout client          32s
    timeout server          32s
    timeout http-keep-alive 30s

listen listener
    bind 169.254.169.254:80
    server metadata /var/lib/neutron/metadata_proxy
    http-request add-header X-Neutron-Network-ID 61aa4820-74b2-44dc-9228-176f23ad471d

[root@compute3 ns-metadata-proxy]# file  /var/lib/neutron/metadata_proxy
/var/lib/neutron/metadata_proxy: socket

=================================================== Horizon  ==============================================================

yum --enablerepo=centos-openstack-rocky,epel -y install openstack-dashboard
cp /etc/openstack-dashboard/local_settings /etc/openstack-dashboard/local_settings.org;
cat /etc/openstack-dashboard/local_settings.org  |grep -v "#" | grep . >> /etc/openstack-dashboard/local_settings
watch -d -n 1 'memcached-tool 127.0.0.1:11211 stats |grep get_hits'
vi /etc/openstack-dashboard/local_settings
vi /etc/httpd/conf.d/openstack-dashboard.conf
 # line 4: add

WSGIDaemonProcess dashboard
WSGIProcessGroup dashboard
WSGISocketPrefix run/wsgi
WSGIApplicationGroup %{GLOBAL}

systemctl restart httpd


=================================================== Cinder  ==============================================================


openstack user create --domain default --project service --password servicepassword cinder;
openstack role add --project service --user cinder admin;
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3;
openstack endpoint create --region RegionOne volumev3 public http://vip:8776/v3/%\(tenant_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://vip:8776/v3/%\(tenant_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://vip:8776/v3/%\(tenant_id\)s

mysql -u root -p
create database cinder;
grant all privileges on cinder.* to cinder@'localhost' identified by 'password';
grant all privileges on cinder.* to cinder@'%' identified by 'password';
flush privileges;
exit

yum --enablerepo=centos-openstack-rocky,epel -y install openstack-cinder
mv /etc/cinder/cinder.conf /etc/cinder/cinder.conf.org
vi /etc/cinder/cinder.conf


[DEFAULT]
my_ip = 10.1.17.101
log_dir = /var/log/cinder
state_path = /var/lib/cinder
auth_strategy = keystone

transport_url = rabbit://openstack:password@vip
enable_v3_api = True

[database]
connection = mysql+pymysql://cinder:password@vip/cinder

[keystone_authtoken]
www_authenticate_uri = http://vip:5000
auth_url = http://vip:5000
memcached_servers = control1:11211,control2:11211,control3:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = servicepassword

[oslo_concurrency]
lock_path = $state_path/tmp

[root@dlp ~(keystone)]# chmod 640 /etc/cinder/cinder.conf
[root@dlp ~(keystone)]# chgrp cinder /etc/cinder/cinder.conf
[root@dlp ~(keystone)]# su -s /bin/bash cinder -c "cinder-manage db sync"
[root@dlp ~(keystone)]# systemctl start openstack-cinder-api openstack-cinder-scheduler
[root@dlp ~(keystone)]# systemctl enable openstack-cinder-api openstack-cinder-scheduler
[root@dlp ~(keystone)]# echo "export OS_VOLUME_API_VERSION=3" >> ~/keystonerc
[root@dlp ~(keystone)]# source ~/keystonerc
[root@dlp ~(keystone)]# openstack volume service list

# Network2 - Storage Node
[root@network3 ~]# cat /etc/cinder/cinder.conf
enabled_backends=lvmdriver-1,lvmdriver-2
#28
backup_driver = cinder.backup.drivers.nfs
backup_mount_point_base = $state_path/backup_nfs
backup_share = network2:/home/cinder-backup

[lvmdriver-1]
volume_group=vg_1
volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver
volume_backend_name=big_vg
target_helper = lioadm
target_protocol = iscsi
[lvmdriver-2]
volume_group=vg_2
volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver
volume_backend_name=big_vg
target_helper = lioadm
target_protocol = iscsi

[root@control1 ceph(keystone)]# openstack volume type list
+--------------------------------------+--------------------+-----------+
| ID                                   | Name               | Is Public |
+--------------------------------------+--------------------+-----------+
| 2f8db0d9-7401-4c2a-ba28-36279d8476eb | volume_from_big_vg | True      |
+--------------------------------------+--------------------+-----------+
[root@control1 ceph(keystone)]# openstack volume type show volume_from_big_vg
+--------------------+--------------------------------------+
| Field              | Value                                |
+--------------------+--------------------------------------+
| access_project_ids | None                                 |
| description        | None                                 |
| id                 | 2f8db0d9-7401-4c2a-ba28-36279d8476eb |
| is_public          | True                                 |
| name               | volume_from_big_vg                   |
| properties         | volume_backend_name='big_vg'         |
| qos_specs_id       | None                                 |
+--------------------+--------------------------------------+

[root@control1 ceph(keystone)]# openstack volume service list
+------------------+----------------------+------+---------+-------+----------------------------+
| Binary           | Host                 | Zone | Status  | State | Updated At                 |
+------------------+----------------------+------+---------+-------+----------------------------+
| cinder-scheduler | control1             | nova | enabled | up    | 2019-07-31T10:51:22.000000 |
| cinder-scheduler | control2             | nova | enabled | up    | 2019-07-31T10:51:28.000000 |
| cinder-scheduler | control3             | nova | enabled | up    | 2019-07-31T10:51:22.000000 |
| cinder-volume    | network3@lvmdriver-1 | nova | enabled | down  | 2019-07-31T10:53:51.000000 |
| cinder-volume    | network3@lvmdriver-2 | nova | enabled | down  | 2019-07-31T10:53:53.000000 |
| cinder-volume    | network3@lvmdriver-3 | nova | enabled | down  | 2019-07-17T07:30:44.000000 |
| cinder-backup    | network3             | nova | enabled | down  | 2019-07-31T10:53:52.000000 |
+------------------+----------------------+------+---------+-------+----------------------------+

 openstack volume type create --property volume_backend_name='ceph'  volume_from_ceph

========================== add node compute3 : DVR + Compute ==========================

compute2:
scp /etc/nova/nova.conf compute3:/etc/nova/nova.conf;
scp /etc/neutron/neutron.conf compute3:/etc/neutron/neutron.conf;
scp /etc/neutron/plugins/ml2/ml2_conf.ini compute3:/etc/neutron/plugins/ml2/ml2_conf.ini;
scp /etc/neutron/plugins/ml2/openvswitch_agent.ini compute3:/etc/neutron/plugins/ml2/openvswitch_agent.ini;
scp /etc/neutron/dhcp_agent.ini compute3:/etc/neutron/dhcp_agent.ini;
scp /etc/neutron/metadata_agent.ini compute3:/etc/neutron/metadata_agent.ini
network2:
scp  /etc/neutron/l3_agent.ini compute3:/etc/neutron/l3_agent.ini
chown root:neutron /etc/neutron/neutron.conf /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/openvswitch_agent.ini /etc/neutron/dhcp_agent.ini /etc/neutron/l3_agent.ini



for service in l3-agent dhcp-agent metadata-agent openvswitch-agent; do
systemctl start neutron-$service
systemctl enable neutron-$service
done

for service in l3-agent dhcp-agent metadata-agent openvswitch-agent; do
systemctl restart neutron-$service
done

 for service in l3-agent dhcp-agent metadata-agent openvswitch-agent; do
systemctl status neutron-$service
done


==========================  network2 : LBaaSv2  ==========================



openstack floating ip create pro_vlan111
openstack server add floating ip net1_inst2 10.1.17.169

[1]     On Control Node, Change settings like follows.
# install from Rocky, EPEL

[root@dlp ~(keystone)]# yum --enablerepo=centos-openstack-rocky,epel -y install openstack-neutron-lbaas net-tools
[root@dlp ~(keystone)]# vi /etc/neutron/neutron.conf
# add to [service_plugins]

service_plugins = router,lbaasv2
[root@dlp ~(keystone)]# vi /etc/neutron/neutron_lbaas.conf
# line 207: add

[service_providers]
service_provider = LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default

[root@dlp ~(keystone)]# vi /etc/neutron/lbaas_agent.ini
# add into [DEFAULT] section

[DEFAULT]
interface_driver = openvswitch
[root@dlp ~(keystone)]# su -s /bin/bash neutron -c "neutron-db-manage --subproject neutron-lbaas --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head"

[root@dlp ~(keystone)]# systemctl restart neutron-server

[2]     On Network Node and Compute Node, Change settings like follows.
# install from Rocky, EPEL

[root@network ~]# yum --enablerepo=centos-openstack-rocky,epel -y install openstack-neutron-lbaas haproxy net-tools
[root@network ~]# vi /etc/neutron/neutron.conf
# add to [service_plugins]

service_plugins = router,lbaasv2
[root@network ~]# vi /etc/neutron/neutron_lbaas.conf
# line 207: add

[service_providers]
service_provider = LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default

[root@network ~]# vi /etc/neutron/lbaas_agent.ini
# add into [DEFAULT] section

[DEFAULT]
interface_driver = openvswitch
[root@network ~]# systemctl start neutron-lbaasv2-agent
[root@network ~]# systemctl enable neutron-lbaasv2-agent


neutron lbaas-loadbalancer-create --name lb01 int_net1_sub1
neutron lbaas-loadbalancer-show lb01
+---------------------+--------------------------------------+
| Field               | Value                                |
+---------------------+--------------------------------------+
| admin_state_up      | True                                 |
| description         |                                      |
| id                  | 06473beb-8026-4349-8a9b-f9559a5576b0 |
| listeners           |                                      |
| name                | lb01                                 |
| operating_status    | ONLINE                               |
| pools               |                                      |
| provider            | haproxy                              |
| provisioning_status | ACTIVE                               |
| tenant_id           | f9e2fdffdca54bf1a4e554de73781ab6     |
| vip_address         | 192.168.100.12                       |
| vip_port_id         | f04d88be-622b-405b-8550-0ccf55bcc628 |
| vip_subnet_id       | ff418fde-2a89-4007-a71d-20bea2257d01 |
+---------------------+--------------------------------------+
openstack port set --security-group group_web f04d88be-622b-405b-8550-0ccf55bcc628
neutron lbaas-listener-create --name lb01-http --loadbalancer lb01 --protocol HTTP --protocol-port 80
neutron lbaas-pool-create --name lb01-http-pool --lb-algorithm ROUND_ROBIN --listener lb01-http --protocol HTTP

[root@control1 shared(keystone)]# openstack server list
+--------------------------------------+------------------+---------+-------------------------------------+---------+--------+
| ID                                   | Name             | Status  | Networks                            | Image   | Flavor |
+--------------------------------------+------------------+---------+-------------------------------------+---------+--------+
| 2794818b-d28d-4fc9-8dc6-820c582283fe | web_net1_inst_-1 | ACTIVE  | int_net1=192.168.100.7              |         | small  |
| a1329000-dc80-47d5-9c83-e7eee814d0a0 | web_net1_inst_-2 | ACTIVE  | int_net1=192.168.100.10             |         | small  |

echo `hostname`> /var/www/html/index.html; cat /var/www/html/index.html
systemctl start httpd
systemctl enable httpd

neutron lbaas-member-create --name lb01-member-01 --subnet int_net1_sub1 --address 192.168.100.3 --protocol-port 80 lb01-http-pool;
neutron lbaas-member-create --name lb01-member-02 --subnet int_net1_sub1 --address 192.168.100.10 --protocol-port 80 lb01-http-pool;
neutron lbaas-member-list lb01-http-pool
(
neutron lbaas-member-delete lb01-member-02 lb01-http-pool
)
openstack floating ip create pro_vlan111
+---------------------+--------------------------------------+
| Field               | Value                                |
+---------------------+--------------------------------------+
| created_at          | 2019-07-24T11:54:12Z                 |
| description         |                                      |
| dns_domain          | None                                 |
| dns_name            | None                                 |
| fixed_ip_address    | None                                 |
| floating_ip_address | 10.1.17.182                          |
| floating_network_id | aa3f480d-f4c9-4a41-aedd-30ae275ef371 |
| id                  | 269d368a-a3ff-4706-8585-2038aad4b2bc |
| name                | 10.1.17.182                          |
| port_details        | None                                 |
| port_id             | None                                 |
| project_id          | f9e2fdffdca54bf1a4e554de73781ab6     |
| qos_policy_id       | None                                 |
| revision_number     | 0                                    |
| router_id           | None                                 |
| status              | DOWN                                 |
| subnet_id           | None                                 |
| tags                | []                                   |
| updated_at          | 2019-07-24T11:54:12Z                 |

neutron floatingip-associate 269d368a-a3ff-4706-8585-2038aad4b2bc f04d8/etc/neutron/fwaas_driver.ini8be-622b-405b-8550-0ccf55bcc628

https://www.rdoproject.org/networking/lbaas/

yum -y install openstack-neutron-lbaas openstack-neutron-lbaas-ui
systemctl restart httpd memcached

=== delete

neutron lbaas-listener-delete --name lb01-http
neutron lbaas-pool-delete --name lb01-http-pool       
neutron lbaas-loadbalancer-delete --name lb01

==========================  network2 : FWaaSv1 ==========================

https://techopenstack.wordpress.com/2016/10/06/lbaasfwaas-in-openstack/
yum install openstack-neutron-fwaas

# network
vi /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins = router,lbaasv2,firewall

vi /etc/neutron/fwaas_driver.ini
[fwaas]
driver = iptables
enabled = True
[service_providers]
service_provider = LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default

# control
vi /etc/openstack-dashboard/local_settings
service neutron-l3-agent restart;
service neutron-server restart;
service httpd restart


yum --enablerepo=centos-openstack-rocky,epel -y install python-pip

# Tham khao
# Linux security group 1 ( Linux )
openstack security group rule create group1 --protocol tcp --dst-port 22:22 --remote-ip 0.0.0.0/0
openstack security group rule create group1 --protocol icmp --remote-ip 0.0.0.0/0

# Windows security group 2 ( Windows )
openstack security group rule create group2 --protocol udp --dst-port 3389:3389 --remote-ip 0.0.0.0/0
openstack security group rule create group2 --protocol tcp --dst-port 3389:3389 --remote-ip 0.0.0.0/0
openstack security group rule create group2 --protocol icmp --remote-ip 0.0.0.0/0

# Linux security group  ( Linux )
openstack security group create group_web
openstack security group rule create group_web --protocol tcp --dst-port 80:80 --remote-ip 0.0.0.0/0
openstack security group rule create group_web --protocol tcp --dst-port 443:443 --remote-ip 0.0.0.0/0



firewall-create                    Create a firewall.
firewall-delete                    Delete a given firewall.
firewall-list                      List firewalls that belong to a given tenant.
firewall-policy-create             Create a firewall policy.
firewall-policy-delete             Delete a given firewall policy.
firewall-policy-insert-rule        Insert a rule into a given firewall policy.
firewall-policy-list               List firewall policies that belong to a given tenant.
firewall-policy-remove-rule        Remove a rule from a given firewall policy.
firewall-policy-show               Show information of a given firewall policy.
firewall-policy-update             Update a given firewall policy.
firewall-rule-create               Create a firewall rule.
firewall-rule-delete               Delete a given firewall rule.
firewall-rule-list                 List firewall rules that belong to a given tenant.
firewall-rule-show                 Show information of a given firewall rule.
firewall-rule-update               Update a given firewall rule.
firewall-show                      Show inf
firewall-update
 
rules(N) --> policy(1) --> fw(1)


neutron firewall-rule-create --protocol tcp --destination-port 22:22 --action allow
neutron firewall-policy-create --firewall-rules 59b5e9fa-c116-4bb0-bffb-c00ab1d4d2e7 allow_webserver # Policy 05202ee2-3441-4726-8978-c4f8820b89e6

neutron firewall-rule-create --protocol tcp --destination-port 443:443 --action allow
neutron firewall-policy-insert-rule allow_webserver 59dbf017-4156-4314-afe7-407b9a1a1f5b

# create FW
neutron firewall-create 05202ee2-3441-4726-8978-c4f8820b89e6

# Test web server die, no ping
neutron firewall-rule-create --protocol icmp --action allow
neutron firewall-policy-insert-rule allow_webserver 4c2671a4-9faa-456c-ab36-83934a80d27c

# test web ok
neutron firewall-rule-create --protocol tcp --destination-port 80:80 --action allow
neutron firewall-policy-insert-rule allow_webserver 3ef86d32-2021-48fd-abe6-c599cb262cef
neutron firewall-policy-remove-rule allow_webserver 3ef86d32-2021-48fd-abe6-c599cb262cef

[root@control1 neutron(keystone)]# neutron firewall-list
neutron CLI is deprecated and will be removed in the future. Use openstack CLI instead.
+--------------------------------------+------+----------------------------------+--------------------------------------+
| id                                   | name | tenant_id                        | firewall_policy_id                   |
+--------------------------------------+------+----------------------------------+--------------------------------------+
| e85a48f0-3140-43b9-9d66-bce39a2dabd7 |      | f9e2fdffdca54bf1a4e554de73781ab6 | 05202ee2-3441-4726-8978-c4f8820b89e6 |
+--------------------------------------+------+----------------------------------+--------------------------------------+
[root@control1 neutron(keystone)]#
[root@control1 neutron(keystone)]#
[root@control1 neutron(keystone)]#
[root@control1 neutron(keystone)]# neutron firewall-show e85a48f0-3140-43b9-9d66-bce39a2dabd7
neutron CLI is deprecated and will be removed in the future. Use openstack CLI instead.
+--------------------+--------------------------------------+
| Field              | Value                                |
+--------------------+--------------------------------------+
| admin_state_up     | True                                 |
| description        |                                      |
| firewall_policy_id | 05202ee2-3441-4726-8978-c4f8820b89e6 |
| id                 | e85a48f0-3140-43b9-9d66-bce39a2dabd7 |
| name               |                                      |
| project_id         | f9e2fdffdca54bf1a4e554de73781ab6     |
| router_ids         | 2b661bf4-4e5c-4239-8918-397e260ed85d |
|                    | 64b37d2d-e3a3-497e-98b6-89c6b3756c30 |
|                    | e4790aff-3ce4-4854-9123-a740e276407a |
| status             | ACTIVE                               |
| tenant_id          | f9e2fdffdca54bf1a4e554de73781ab6     |
+--------------------+--------------------------------------+

POLICY_FILES['neutron-fwaas'] = /root/neutron-fwaas-dashboard-stable-rocky/etc/neutron-fwaas-policy.json

==========================  packstack : FWaaSv2 ==========================

https://docs.openstack.org/neutron-fwaas-dashboard/latest/install/index.html
git clone https://opendev.org/openstack/neutron-fwaas-dashboard
cd neutron-fwaas-dashboard
sudo pip install .

# control/network node
 cat /etc/neutron/neutron.conf
[DEFAULT]
service_plugins = firewall_v2
[service_providers]
service_provider = FIREWALL_V2:fwaas_db:neutron_fwaas.services.firewall.service_drivers.agents.agents.FirewallAgentDriver:default
[fwaas]
agent_version = v2
driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas_v2.IptablesFwaasDriver
enabled = True

[root@control1 ~(keystone)]# cat /etc/neutron/fwaas_driver.ini
[DEFAULT]

cat /etc/neutron/fwaas_driver.ini
[fwaas]
agent_version = v2
driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas_v2.IptablesFwaasDriver
enabled = True
firewall_l2_driver = noop

cat /etc/neutron/l3_agent.ini
[DEFAULT]
[agent]
extensions = fwaas_v2
[ovs]