Sunday, 30 June 2019

KVM POC


Contents


Introduction

  KVM POC



I.             Test Case Details

1.      Network OS

A. Test1: LACP

a.       Tham chiếu

b.       Configure Team Redhat 7

 
 
 
[root@kvm_node1 network-scripts]# cat ifcfg-eno1
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=none
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
NAME=eno1
UUID=dcce8529-2f43-4514-8777-19037e69a633
DEVICE=eno1
ONBOOT=yes
DEVICETYPE=TeamPort
TEAM_MASTER=team0
TEAM_PORT_CONFIG='{"prio":9}'
 
[root@kvm_node1 network-scripts]# cat ifcfg-eno2
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=dhcp
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
NAME=eno2
UUID=d4a6c849-7665-454b-9912-f7053b9d3e08
DEVICE=eno2
ONBOOT=yes
DEVICETYPE=TeamPort
TEAM_MASTER=team0
TEAM_PORT_CONFIG='{"prio":10}'
 
[root@kvm_node1 network-scripts]# cat ifcfg-team0 
DEVICE=team0  
PROXY_METHOD=none  
BROWSER_ONLY=no  
BOOTPROTO=none  
IPADDR=10.1.29.10
PREFIX=24  
GATEWAY=10.1.0.1
DEFROUTE=yes  
IPV4_FAILURE_FATAL=no  
IPV6INIT=yes  
NAME=team0  
ONBOOT=yes  
DEVICETYPE=Team  
TEAM_CONFIG="{\"runner\": {\"name\": \"lacp\", \"active\": true, \"fast_rate\": true, \"tx_hash\": [\"en\", \"ipv4\", \"ipv6\"]},\"link_watch\":    {\"name\": \"ethtool\"}}"  
 
 
[root@kvm_node1 ~]# teamdctl team0 state
setup:
  runner: lacp
ports:
  eno1
    link watches:
      link summary: up
      instance[link_watch_0]:
        name: ethtool
        link: up
        down count: 0
    runner:
      aggregator ID: 2, Selected
      selected: yes
      state: current
  eno2
    link watches:
      link summary: up
      instance[link_watch_0]:
        name: ethtool
        link: up
        down count: 0
    runner:
      aggregator ID: 2, Selected
      selected: yes
      state: current
runner:
  active: yes
  fast rate: yes
 
 

c.        Configure LACP trên SW

 
set interfaces ge-0/0/6 ether-options 802.3ad ae0
set interfaces ge-0/0/7 ether-options 802.3ad ae0
set interfaces ae0 aggregated-ether-options lacp active
set interfaces ae0 unit 0 family ethernet-switching port-mode access
set interfaces ae0 unit 0 family ethernet-switching vlan members 111

B. Test1: Performance


net-switching port-mode access
set interfaces ae0 unit 0 family ethernet-switching vlan members 111

2.      Network VLAN KVM


Chú ý:
- Nên tắt NetworkManager vì nó lưu UUID của các eth device có thể gây lỗi khi start network.
- eno3.126 và macvtap bị xung đột nên chỉ có thể tạo 1 trong 2 cách

A. Test1: Tag VLAN trên VM

a.       Tham chiếu http://alesnosek.com/blog/2015/09/07/bridging-vlan-trunk-to-the-guest/

Dựa vào link cấu hình Trunk ở trên port Data cho các VM. Tạo Bridge cho KVM. Tag VLAN trên VM.
Tạo VM trên openstack IP 192.168.126.101 VLAN126 và VM trên KVM IP 192.168.126.2

b.       Tạo VM trên openstack

 
[root@controller ~(keystone)]# openstack network list 
+--------------------------------------+----------------------------------------------------+--------------------------------------+
| ID                                   | Name                                               | Subnets                              |
+--------------------------------------+----------------------------------------------------+--------------------------------------+
| 03ed80d6-f4dd-42e3-be12-7aae1c3b1086 | pro_dom1_net1                                      |                                      |
| 33bc8831-a864-4431-8956-15e1906ca51b | int_net4                                           | 0ef1a44f-f097-41b5-a53c-c2a3a2775501 |
| 43fd589f-025d-47b4-afbe-0992291e369d | int_net3                                           | 7d01455c-cc79-4f4f-9deb-f96d46f95d5a |
| b791752a-4b20-4b4f-ae00-017d7e0b78c5 | pro_net1                                           | 04eb5856-f1b4-4a0e-9c64-0efe30e0b867 |
| ccdbdce6-ac26-44f9-9999-2cae549661e9 | pro_net2                                           | 6c0eb4f7-b1de-43d1-9e77-8f6710e79b6b |
| ddeeff0b-0d95-4e66-b238-6b52e811de17 | pro_dom1_project1_net1                             |                                      |
| f380b652-9a34-43de-bbab-8eec0763f229 | int_net2                                           | ccd54129-3679-4afe-9a8c-b1691c571e08 |
| fdaee7c6-a2f2-467f-97ff-a2edcea582b4 | int_net1                                           | 65106710-df85-4a9f-b291-08c16d2e3888 |
| ff653c9f-f850-4724-b743-de51fd82bcef | HA network tenant 17354a789a32460eb81e166268b8b310 | 120e418a-d58d-471b-95eb-a4a018d2c181 |
+--------------------------------------+----------------------------------------------------+--------------------------------------+
[root@controller ~(keystone)]# 
[root@controller ~(keystone)]# 
[root@controller ~(keystone)]# openstack server create --flavor m1.tiny \
> --image cirros2 --key-name testkey --availability-zone nova:compute2:compute2 \
> --nic net-id=cdbdce6-ac26-44f9-9999-2cae549661e9 pro_net4_vlan126_inst1
No Network found for cdbdce6-ac26-44f9-9999-2cae549661e9
[root@controller ~(keystone)]# 
[root@controller ~(keystone)]# 
[root@controller ~(keystone)]# 
[root@controller ~(keystone)]# 
[root@controller ~(keystone)]# 
[root@controller ~(keystone)]# openstack server create --flavor m1.tiny \
> --image cirros2 --key-name testkey --availability-zone nova:compute2:compute2 \
> --nic net-id=b791752a-4b20-4b4f-ae00-017d7e0b78c5 vlan126_inst1
+-------------------------------------+------------------------------------------------+
| Field                               | Value                                          |
+-------------------------------------+------------------------------------------------+
| OS-DCF:diskConfig                   | MANUAL                                         |
| OS-EXT-AZ:availability_zone         | nova                                           |
| OS-EXT-SRV-ATTR:host                | None                                           |
| OS-EXT-SRV-ATTR:hypervisor_hostname | None                                           |
| OS-EXT-SRV-ATTR:instance_name       |                                                |
| OS-EXT-STS:power_state              | NOSTATE                                        |
| OS-EXT-STS:task_state               | scheduling                                     |
| OS-EXT-STS:vm_state                 | building                                       |
| OS-SRV-USG:launched_at              | None                                           |
| OS-SRV-USG:terminated_at            | None                                           |
| accessIPv4                          |                                                |
| accessIPv6                          |                                                |
| addresses                           |                                                |
| adminPass                           | hFfHKfVb9B8n                                   |
| config_drive                        |                                                |
| created                             | 2019-05-17T12:53:42Z                           |
| flavor                              | m1.tiny (1)                                    |
| hostId                              |                                                |
| id                                  | 8c634de3-d5a5-4a87-b0a9-77f74d8d2fe6           |
| image                               | cirros2 (4606081e-f773-484e-91cb-3f743ac0855d) |
| key_name                            | testkey                                        |
| name                                | vlan126_inst1                                  |
| progress                            | 0                                              |
| project_id                          | 17354a789a32460eb81e166268b8b310               |
| properties                          |                                                |
| security_groups                     | name='default'                                 |
| status                              | BUILD                                          |
| updated                             | 2019-05-17T12:53:42Z                           |
| user_id                             | 86e2872ece4f402790e1f8fd65a2d48f               |
| volumes_attached                    |                                                |
+-------------------------------------+------------------------------------------------+
[root@controller ~(keystone)]# 
[root@controller ~(keystone)]# .
-bash: .: filename argument required
.: usage: . filename [arguments]
[root@controller ~(keystone)]# 
[root@controller ~(keystone)]# 
[root@controller ~(keystone)]# 
[root@controller ~(keystone)]# 
[root@controller ~(keystone)]# ssh -i ~/.ssh/id_rsa 192.168.126.101
The authenticity of host '192.168.126.101 (192.168.126.101)' can't be established.
RSA key fingerprint is SHA256:sAvQExs1/j6RIAhdjEd28HLNC4SV7V7p4IaajD6aPLY.
RSA key fingerprint is MD5:98:37:22:ca:5f:ad:37:a6:cf:60:d0:f5:e6:51:57:94.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.126.101' (RSA) to the list of known hosts.
Please login as 'cirros' user, not as root
 
 
^CConnection to 192.168.126.101 closed.
[root@controller ~(keystone)]# ssh -i ~/.ssh/id_rsa cirros@192.168.126.101
$ ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
    link/ether fa:16:3e:bb:23:ac brd ff:ff:ff:ff:ff:ff
    inet 192.168.126.101/24 brd 192.168.126.255 scope global eth0
    inet6 fe80::f816:3eff:febb:23ac/64 scope link 
       valid_lft forever preferred_lft forever
$ ip a
 
< Ping thành công sau khi tạo VM trên KVM >
$ ping 192.168.126.2
PING 192.168.126.2 (192.168.126.2): 56 data bytes
64 bytes from 192.168.126.2: seq=0 ttl=64 time=1.174 ms
64 bytes from 192.168.126.2: seq=1 ttl=64 time=0.616 ms
64 bytes from 192.168.126.2: seq=2 ttl=64 time=0.453 ms
64 bytes from 192.168.126.2: seq=3 ttl=64 time=0.474 ms
^C
--- 192.168.126.2 ping statistics ---
4 packets transmitted, 4 packets received, 0% packet loss
round-trip min/avg/max = 0.453/0.679/1.174 ms 
 
 

c.        Cấu hình KVM

 
[root@kvm_node1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eno3 
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=dhcp
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=eno3
UUID=dca655f2-e2b0-4853-b79a-a4c10fc0702a
DEVICE=eno3
ONBOOT=yes
 
 
[root@kvm_node1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eno1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master team0 state UP group default qlen 1000
    link/ether 20:67:7c:de:d5:7c brd ff:ff:ff:ff:ff:ff
    inet 10.1.29.10/16 brd 10.1.255.255 scope global eno1
       valid_lft forever preferred_lft forever
3: eno2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master team0 state UP group default qlen 1000
    link/ether 20:67:7c:de:d5:7c brd ff:ff:ff:ff:ff:ff
4: eno3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 20:67:7c:de:d5:7e brd ff:ff:ff:ff:ff:ff
 
 

d.       Cấu hình VLAN cho VM trên KVM



 
[root@vm1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0.126 
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
NAME=eth0.126
UUID=b75aa98c-cbfe-4888-8d5f-14c5cbf7ca7f
DEVICE=eth0.126
ONBOOT=yes
IPADDR=192.168.126.2
NETMASK=255.255.255.0
VLAN=yes
[root@vm1 ~]# 
[root@vm1 ~]# ip -4 a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    inet 10.1.17.100/16 brd 10.1.255.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
3: eth0.126@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    inet 192.168.126.2/24 brd 192.168.126.255 scope global eth0.126
       valid_lft forever preferred_lft forever
[root@vm1 ~]# 
 

B. Test1: Tag VLAN trên sub port của port vật lý

a.       Tham chiếu https://serverfault.com/questions/841177/how-do-i-pass-vlan-traffic-over-a-bridge-to-kvm-vms


Dựa vào link cấu hình Trunk ở trên port eno3  Tạo Port eno3.126 để TAG VLAN. Tạo Bridge br-vlan126 qua port eno3.126
Quá trình học MAC có vấn đề

b.       Tạo port trên host kvm_node1

 
ip link add link eno3 name eno3.126 type vlan id 126
brctl show
brctl addbr br-vlan126
brctl addif br-vlan126 eno3.126
ip link show
 
# một số lệnh xóa 
ip link del eno3.126
ip link show
brctl delbr br-vlan126
 
# có thể thêm IP để test kết nối 
ifconfig eno3.126 192.168.126.98 netmask 255.255.255.0 
ifconfig br-vlan126 192.168.126.99 netmask 255.255.255.0 
ip addr del 192.168.126.98/24 dev eno3.126
ip addr del 192.168.126.99/24 dev br-vlan126
ip addr
 
# có thể thêm vlan111 để test kết nối 
ip link add link eno3 name eno3.111 type vlan id 111
brctl show
brctl addbr br-vlan111
brctl addif br-vlan111 eno3.111
ifconfig eno3.111 10.1.29.98 netmask 255.255.0.0 
ifconfig br-vlan111 10.1.29.99 netmask 255.255.0.0 
ip addr del 10.1.29.98/16 dev eno3.111 <== gây mất mạng vì chung giải IP cần kiểm tra
ip addr del 10.1.29.99/16 dev br-vlan111

c.        Cấu hình bridge trên VM của KVM kvm_node1


Trên VM từ openstack
 
$ ip -4 a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue 
    inet 127.0.0.1/8 scope host lo
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
    inet 192.168.126.101/24 brd 192.168.126.255 scope global eth0
$ ping 192.168.126.3
PING 192.168.126.3 (192.168.126.3): 56 data bytes
64 bytes from 192.168.126.3: seq=0 ttl=64 time=0.516 ms
64 bytes from 192.168.126.3: seq=1 ttl=64 time=1.058 ms
64 bytes from 192.168.126.3: seq=2 ttl=64 time=0.901 ms
^C
--- 192.168.126.3 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
 

3.      VM LiveMigration + Share NFS

C. Test1:

a.       Tham chiếu

Dựa vào link cấu hình Trunk ở trên port Data cho các VM. Tạo Bridge cho KVM. Tag VLAN trên VM.
Tạo VM trên openstack IP 192.168.126.101 VLAN126 và VM trên KVM IP 192.168.126.2

b.       Cấu hình share NFS trên kvm_node1, kvm_node2, sharerepo

 
[root@sharerepo home]# vi /etc/exports
/home *(rw,sync,no_root_squash)
 
[root@sharerepo home]# ls -ld /home/
drwxr-xrwx. 3 root root 145 May 17 20:57 /home/
 
systemctl enable rpcbind
systemctl enable nfs-server
systemctl enable nfs-lock
systemctl enable nfs-idmap
systemctl start rpcbind
systemctl start nfs-server
systemctl start nfs-lock
systemctl start nfs-idmap
 
 
 
kvm_node1 & kvm_node2
mount -t nfs sharerepo:/home /share/
 
[root@sharerepo home]# vi /etc/exports
/home *(rw,sync,no_root_squash)
 
 

c.        Cấu hình share NFS và openssh trên KVM kvm_node1, kvm_node2

sharerepo, kvm_node1, kvm_node2
# yum install openssh-askpass
 

 
 
 

 
 
 

 
 

 
 
 

4.      VM Spice + VNC

D. Test1:

a.       Tham chiếu

Cài đặt spice client trên PC https://virt-manager.org/download/
Cấu hình VM trỏ đến spice server

b.       Cấu hình VM trỏ đến Spice Server


5.      Windows

E. Test1:

a.       Tham chiếu

Cài đặt sử dụng virtio cho disk, ethernet
Tham khảo virtio:
What is virtio?

There are quite a few articles about it but basically it is like the VMWare Tools for KVM. It is a bunch of libraries that basically speed up your guest OS by making it more efficient to communicate with the host.

What speed ups are we talking about? Mainly disk I/O and network. Without Virtio drivers installed your Windows guest will feel like molasses. Or more like the difference between traditional hard drives and SSDs. It will still work but you won't like it.

b.       Cấu hình cài VM





6.      VM LiveMigration + Share iscsi storagepool + Share volume

A. Test1: ISCSI

a.       Tham chiếu

- Trên sharerepo tạo Target iscsi share 2 volume
- Cấu hình tương ứng 2 kvm
- Tạo VM cấu hình: volume sử dụng trực tiếp volume lun0, cache các drive đặt ở chế độ directsync
Mục tiêu: Chứng minh là share iscsi LUN lun0 có thể vẫn đọc ghi bình thường khi migrate VM từ node này qua node khác.

b.       Cấu hình share NFS trên kvm_node1, kvm_node2, sharerepo

 
backstores/fileio/ create shareddata /home/share_vol_1.img 100M
backstores/fileio/ create shareddata_2 /home/share_vol_2.img 200M
iscsi/ create iqn.2014-08.com.example:t1
/iscsi/iqn.20...ample:t1/tpg1> luns/ create /backstores/fileio/shareddata
/iscsi/iqn.20...ample:t1/tpg1> acls/ create iqn.1994-05.com.redhat:6948df66f53
acls/ create iqn.1994-05.com.redhat:c4c65cd3e4e6
 
 
iscsiadm --mode discovery --type sendtargets --portal sharerepo
iscsiadm --mode node --targetname iqn.2014-08.com.example:t1 --portal sharerepo --login
 
[root@kvm_node1 share]# lsblk --scsi
NAME HCTL       TYPE VENDOR   MODEL             REV TRAN
sda  1:1:0:0    disk HPE      LOGICAL VOLUME   1.60 sas
sdb  1:1:0:1    disk HPE      LOGICAL VOLUME   1.60 sas
sdc  1:1:0:2    disk HPE      LOGICAL VOLUME   1.60 sas
sde  4:0:0:0    disk LIO-ORG  shareddata       4.0  iscsi
 
 
iscsiadm -m node -T iqn.2014-08.com.example:t1 --portal sharerepo -u
[root@kvm_node1 share]#  iscsiadm --mode node --targetname iqn.2014-08.com.example:t1 --portal sharerepo --login
Logging in to [iface: default, target: iqn.2014-08.com.example:t1, portal: 10.1.29.9,3260] (multiple)
Login to [iface: default, target: iqn.2014-08.com.example:t1, portal: 10.1.29.9,3260] successful.
[root@kvm_node1 share]# lsblk --scsi
NAME HCTL       TYPE VENDOR   MODEL             REV TRAN
sda  1:1:0:0    disk HPE      LOGICAL VOLUME   1.60 sas
sdb  1:1:0:1    disk HPE      LOGICAL VOLUME   1.60 sas
sdc  1:1:0:2    disk HPE      LOGICAL VOLUME   1.60 sas
sde  5:0:0:0    disk LIO-ORG  shareddata       4.0  iscsi
sdf  5:0:0:1    disk LIO-ORG  shareddata_2     4.0  iscsi
[root@sharerepo home]# ls -ld /home/
drwxr-xrwx. 3 root root 145 May 17 20:57 /home/

c.        Cấu hình iscsi storage pool trên kvm_node1, kvm_node2



d.       Cấu hình VM sử dụng lun0 của iscsi storage pool

Cấu hình: đĩa ở vùng share, cache
.

Cấu hình đĩa ghi trực tiếp lên vùng iscsi, cache directsync

e.       Thực hiện migrarte VM và kiểm tra đọc ghi lun0

Cấu hình: đĩa ở vùng share, cache

 
[root@vlan111_vm2 ~]# lsblk --scsi
NAME HCTL       TYPE VENDOR   MODEL             REV TRAN
sda  2:0:0:0    disk LIO-ORG  shareddata       4.0  
sr0  0:0:0:0    rom  QEMU     QEMU DVD-ROM     1.5. ata
[root@vlan111_vm2 ~]# 
[root@vlan111_vm2 ~]# fdisk -l
 
Disk /dev/vda: 5368 MB, 5368709120 bytes, 10485760 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000f285b
 
   Device Boot      Start         End      Blocks   Id  System
/dev/vda1   *        2048     2099199     1048576   83  Linux
/dev/vda2         2099200    10485759     4193280   8e  Linux LVM
 
Disk /dev/sda: 104 MB, 104857600 bytes, 204800 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 4194304 bytes
Disk label type: dos
Disk identifier: 0x64b4cdcd
 
   Device Boot      Start         End      Blocks   Id  System
/dev/sda1            8192      204799       98304   83  Linux
 
Disk /dev/mapper/rhel-root: 3753 MB, 3753902080 bytes, 7331840 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
 
 
Disk /dev/mapper/rhel-swap: 536 MB, 536870912 bytes, 1048576 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
 
[root@vlan111_vm2 ~]# mount /dev/sda1 /mnt
mount: /dev/sda1 is already mounted or /mnt busy
       /dev/sda1 is already mounted on /mnt
[root@vlan111_vm2 ~]# date >>/mnt/time.txt 
[root@vlan111_vm2 ~]# cat /mnt/time.txt 
Sun May 19 06:10:31 EDT 2019
Sun May 19 06:10:32 EDT 2019
Sun May 19 06:10:32 EDT 2019
Sun May 19 06:10:32 EDT 2019
Sun May 19 06:10:33 EDT 2019
Sun May 19 06:11:44 EDT 2019
Sun May 19 06:11:45 EDT 2019
Sun May 19 06:11:45 EDT 2019
Sun May 19 06:22:30 EDT 2019
 
==> Sau khi thực hiện migrate VM đã chuyển node
 
 
[root@vlan111_vm2 ~]# cat /mnt/time.txt 
Sun May 19 06:10:31 EDT 2019
Sun May 19 06:10:32 EDT 2019
Sun May 19 06:10:32 EDT 2019
Sun May 19 06:10:32 EDT 2019
Sun May 19 06:10:33 EDT 2019
Sun May 19 06:11:44 EDT 2019
Sun May 19 06:11:45 EDT 2019
Sun May 19 06:11:45 EDT 2019
Sun May 19 06:22:30 EDT 2019
[root@vlan111_vm2 ~]# date >>/mnt/time.txt 
[root@vlan111_vm2 ~]# cat /mnt/time.txt 
Sun May 19 06:10:31 EDT 2019
Sun May 19 06:10:32 EDT 2019
 
 

7.      Test IO Performance

A. Test1:

a.       Tham chiếu

- Chạy dynamic trên VM và iometer trên PC
- Tham chiếu về IO Specs

b.       Cấu hình Specs IO và clients

[root@vlan111_vm2 ~]# /home/dynamo  -i 10.4.0.60 -m 10.1.29.34
Dynamo version 1.1.0, x86-64, built Mar 25 2014 22:08:27
 
 
 
Command line parameter(s):
   Looking for Iometer on "10.4.0.60"
Sending login request...
   vlan111_vm2
   10.1.29.34 (port 59623)
Successful PortTCP::Connect
  - port name: 10.4.0.60
 
*** If dynamo and iometer hangs here, please make sure
*** you use a correct -m <manager_computer_name> that
*** can ping from iometer machine. use IP if need.
   Login accepted.
Reporting drive information...
- Tạo VM cấu hình: volume sử dụng trực tiếp volume lun0, cache các drive đặt ở chế độ directsync 

direct IO



virtual IO

B. Test1: SCSI

a.       Tham chiếu

b.       Cấu hình LUN cho VM

 
[root@kvm_node1 ~]# fdisk -l |grep sd
WARNING: fdisk GPT support is currently new, and therefore in an experimental phase. Use at your own discretion.
Disk /dev/sdb: 2000.4 GB, 2000365379584 bytes, 3906963632 sectors
/dev/sdb1            2048  3906963631  1953480792   83  Linux
Disk /dev/sda: 480.1 GB, 480070426624 bytes, 937637552 sectors
Disk /dev/sdc: 2000.4 GB, 2000365379584 bytes, 3906963632 sectors
Disk /dev/sde: 107.4 GB, 107374182400 bytes, 209715200 sectors
Disk /dev/sdf: 214.7 GB, 214748364800 bytes, 419430400 sectors
Disk /dev/sdh: 104 MB, 104857600 bytes, 204800 sectors
/dev/sdh1            8192      204799       98304   83  Linux
 

c.        Test IO LUN cho VM

 
 
[root@vlan111_vm2 ~]# dd if=/dev/zero of=/mnt/100gb bs=1MB count=1000
1000+0 records in
1000+0 records out
1000000000 bytes (1.0 GB) copied, 2.19268 s, 456 MB/s

 
 
 
 

 
 
 
[root@vlan111_vm2 ~]# dd if=/dev/zero of=/virtualio/100gb bs=1MB count=1000
1000+0 records in
1000+0 records out
1000000000 bytes (1.0 GB) copied, 1.64655 s, 607 MB/s
[root@vlan111_vm2 ~]# dd if=/dev/zero of=/directio/100gb bs=1MB count=1000
1000+0 records in
1000+0 records out
1000000000 bytes (1.0 GB) copied, 2.65229 s, 377 MB/s
[root@vlan111_vm2 ~]# 
[root@vlan111_vm2 ~]# 
[root@vlan111_vm2 ~]# dd if=/dev/zero of=/directio/100gb bs=512KB count=1000
1000+0 records in
1000+0 records out
512000000 bytes (512 MB) copied, 1.01281 s, 506 MB/s
[root@vlan111_vm2 ~]# dd if=/dev/zero of=/virtualio/100gb bs=512KB count=1000
1000+0 records in
1000+0 records out
512000000 bytes (512 MB) copied, 0.769806 s, 665 MB/s
[root@vlan111_vm2 ~]# 
[root@vlan111_vm2 ~]# 
[root@vlan111_vm2 ~]# dd if=/dev/zero of=/virtualio/100gb bs=256KB count=1000
1000+0 records in
1000+0 records out
256000000 bytes (256 MB) copied, 1.58181 s, 162 MB/s
[root@vlan111_vm2 ~]# dd if=/dev/zero of=/directio/100gb bs=256KB count=1000
1000+0 records in
1000+0 records out
256000000 bytes (256 MB) copied, 0.358225 s, 715 MB/s
[root@vlan111_vm2 ~]# dd if=/dev/zero of=/directio/100gb bs=128KB count=1000
1000+0 records in
1000+0 records out
128000000 bytes (128 MB) copied, 0.0879717 s, 1.5 GB/s
[root@vlan111_vm2 ~]# dd if=/dev/zero of=/virtualio/100gb bs=128KB count=1000
1000+0 records in
1000+0 records out
128000000 bytes (128 MB) copied, 0.106413 s, 1.2 GB/s
 
ð  Với blocksize nhỏ thì direct IO cao hơn
ð  Với blocksize lớn thì ảo hóa IO cao hơn



=======

 Someconfigure file

ifcfg-bond0
DEVICE=bond0
TYPE=Ethernet
IPADDR=10.38.22.11
PREFIX=25
GATEWAY=10.38.22.1
ONBOOT=yes
BOOTPROTO=none
USERCTL=no
NM_CONTROLLER=no
BONDING_OPTS="mode=802.3ad miimon=100 lacp_rate=fast"

ifcfg-bond1
DEVICE=bond1
TYPE=Ethernet
ONBOOT=yes
BOOTPROTO=none
USERCTL=no
NM_CONTROLLER=no
BONDING_OPTS="mode=802.3ad miimon=100 lacp_rate=fast"


ifcfg-bond2
DEVICE=bond2
TYPE=Ethernet
ONBOOT=yes
BOOTPROTO=none
USERCTL=no
NM_CONTROLLER=no
IPADDR=192.168.100.3
PREFIX=24
BONDING_OPTS="mode=802.3ad miimon=100 lacp_rate=fast"

==========

ifcfg-bond1.423
DEVICE=bond1.423
VLAN=yes
ONBOOT="yes"
BOOTPROTO=none
BRIDGE=br-vlan423
NM_CONTROLLER=no

ifcfg-bond1.424

ifcfg-bond1.426
ifcfg-bond1.427

===========

ifcfg-br-vlan423
DEVICE=br-vlan423
TYPE=Bridge
BOOTPROTO=none
ONBOOT=yes
DELAY=0

ifcfg-br-vlan424
ifcfg-br-vlan426
ifcfg-br-vlan427

==========

ifcfg-eno1
DEVICE=eno1
ONBOOT=yes
BOOTPROTO=none
USERCTL=no
MASTER=bond0
SLAVE=yes

ifcfg-eno2
DEVICE=eno2
ONBOOT=yes
BOOTPROTO=none
USERCTL=no
MASTER=bond0
SLAVE=yes

ifcfg-eno49
ifcfg-eno50
ifcfg-ens1f0
ifcfg-ens1f1