- what test cases mean
– Host-DPDK is the upper bound of all test cases with a fixed HW configuration
– SR-IOV-DPDK-VPP is the target configuration feasible to our purpose
i.e. don’t forget to modify srcMAC and dstMAC in pktgen script when switching to test cases.
- Host-Base
# route packets using host-kernel iptables
./perf/myScript/setARP.sh
cat setARP.sh
#!/bin/bash
#
arp -s 192.168.1.2 50:6b:4b:b6:df:b2
arp -s 192.168.2.2 50:6b:4b:b6:df:b3
arp -s 192.168.3.2 50:6b:4b:b6:dc:5a
arp -s 192.168.4.2 50:6b:4b:b6:dc:5b
arp -s 192.168.5.2 50:6b:4b:b6:df:3a
arp -s 192.168.6.2 50:6b:4b:b6:df:3b
arp -s 192.168.7.2 50:6b:4b:b6:dc:4e
arp -s 192.168.8.2 50:6b:4b:b6:dc:4f
- Host-DPDK
# route packets with DPDK l3fwd app which runs in host side
vi /etc/default/grub
GRUB_CMDLINE_LINUX_DEFAULT="isolcpus=1,5,9,13,17,21,25,29,2,6,10,14,18,22,26,30 nohz_full=1,5,9,13,17,21,25,29,2,6,10,14,18,22,26,30 rcu_nocbs=1,5,9,13,17,21,25,29,2,6,10,14,18,22,26,30 iommu=pt intel_iommu=on"
update-grub
reboot
cd perf/myScript
./all.sh
cd /usr/local/share/dpdk/examples/l3fwd
vi l3fwd_lpm.c
static struct ipv4_l3fwd_lpm_route ipv4_l3fwd_lpm_route_array[] = {
{IPv4(1, 1, 1, 0), 24, 0},
{IPv4(2, 1, 1, 0), 24, 1},
{IPv4(3, 1, 1, 0), 24, 2},
{IPv4(4, 1, 1, 0), 24, 3},
{IPv4(5, 1, 1, 0), 24, 4},
{IPv4(6, 1, 1, 0), 24, 5},
{IPv4(7, 1, 1, 0), 24, 6},
{IPv4(8, 1, 1, 0), 24, 7},
{IPv4(192, 168, 1, 0), 24, 0},
{IPv4(192, 168, 2, 0), 24, 1},
{IPv4(192, 168, 3, 0), 24, 2},
{IPv4(192, 168, 4, 0), 24, 3},
{IPv4(192, 168, 5, 0), 24, 4},
{IPv4(192, 168, 6, 0), 24, 5},
{IPv4(192, 168, 7, 0), 24, 6},
{IPv4(192, 168, 8, 0), 24, 7},
};
make
cd build
./l3fwd -l 1-2,5-6,9-10,13-14,17-18,21-22,25-26,29-30 -n 4 -- -p 0xff --config="(0,0,1),(0,1,17),(1,0,5),(1,1,21),(2,0,9),(2,1,25),(3,0,13),(3,1,29),(4,0,2),(4,1,18),(5,0,6),(5,1,22),(6,0,10),(6,1,26),(7,0,14),(7,1,30)" -L
- Linux-Bridge
# route packets using guest-kernel iptables in VM which is connected to host's linux bridge
vi /etc/default/grub
GRUB_CMDLINE_LINUX_DEFAULT="default_hugepagesz=1G hugepagesz=1G hugepages=64"
update-grub
reboot
./perf/myScript/prepare-linux-bridge-vm.sh
virsh start linux-bridge
# login to VM
./setARP.sh
- OVS-DPDK
# route packets using guest-kernel iptables in VM which is connected to host's OVS-DPDK
mkdir -p /usr/etc/openvswitch
mkdir -p /var/run/openvswitch
rm /usr/etc/openvswitch/conf.db
ovsdb-tool create /usr/etc/openvswitch/conf.db \
/usr/share/openvswitch/vswitch.ovsschema
ovsdb-server --remote=punix:/var/run/openvswitch/db.sock \
--remote=db:Open_vSwitch,Open_vSwitch,manager_options \
--pidfile --detach --log-file
ovs-vsctl --no-wait init
ovs-vswitchd unix:/var/run/openvswitch/db.sock --pidfile --detach --log-file
ovs-vsctl set Open_vSwitch . "other_config:dpdk-init=true"
ovs-vsctl set Open_vSwitch . "other_config:pmd-cpu-mask=0x66666666"
ovs-vsctl set Open_vSwitch . "other_config:dpdk-socket-mem=2048,2048,2048,2048"
#ovs-vsctl set Open_vSwitch . "other_config:dpdk-extra=--vhost-owner libvirt-qemu:kvm --vhost-perm 0666"
ovs-vsctl get Open_vSwitch . other_config
chown libvirt-qemu:kvm /var/run/openvswitch/dpdkvhostuser*
chmod 0666 /var/run/openvswitch/dpdkvhostuser*
# login to VM
service irqbalance status
./setARP.sh
- SR-IOV
# route packets using guest-kernel iptables in VM which is connected to VF
vi /etc/default/grub
GRUB_CMDLINE_LINUX_DEFAULT="default_hugepagesz=1G hugepagesz=1G hugepages=64 iommu=pt intel_iommu=on"
update-grub
reboot
ip addr flush enp65s0f0
ip addr flush enp65s0f1
ip addr flush enp67s0f0
ip addr flush enp67s0f1
ip addr flush enp129s0f0
ip addr flush enp129s0f1
ip addr flush enp131s0f0
ip addr flush enp131s0f1
perf/myScript/listvfs-by-pf.sh
ifconfig enp65s0f2 up
ifconfig enp65s0f4 up
ifconfig enp67s0f2 up
ifconfig enp67s0f4 up
ifconfig enp129s0f2 up
ifconfig enp129s0f4 up
ifconfig enp131s0f2 up
ifconfig enp131s0f4 up
cat /sys/class/net/enp65s0f0/device/sriov_totalvfs
cat /sys/class/net/enp65s0f0/device/sriov_numvfs
echo 2 > /sys/class/net/enp65s0f0/device/sriov_numvfs
echo 2 > /sys/class/net/enp65s0f1/device/sriov_numvfs
echo 2 > /sys/class/net/enp67s0f0/device/sriov_numvfs
echo 2 > /sys/class/net/enp67s0f1/device/sriov_numvfs
echo 2 > /sys/class/net/enp129s0f0/device/sriov_numvfs
echo 2 > /sys/class/net/enp129s0f1/device/sriov_numvfs
echo 2 > /sys/class/net/enp131s0f0/device/sriov_numvfs
echo 2 > /sys/class/net/enp131s0f1/device/sriov_numvfs
echo 0 > /sys/class/net/enp65s0f0/device/sriov_numvfs
echo 0 > /sys/class/net/enp65s0f1/device/sriov_numvfs
echo 0 > /sys/class/net/enp67s0f0/device/sriov_numvfs
echo 0 > /sys/class/net/enp67s0f1/device/sriov_numvfs
echo 0 > /sys/class/net/enp129s0f0/device/sriov_numvfs
echo 0 > /sys/class/net/enp129s0f1/device/sriov_numvfs
echo 0 > /sys/class/net/enp131s0f0/device/sriov_numvfs
echo 0 > /sys/class/net/enp131s0f1/device/sriov_numvfs
cd ~/perf/myScripts/net-def
virsh
net-define sr-iov-net1.xml
net-define sr-iov-net2.xml
net-define sr-iov-net3.xml
net-define sr-iov-net4.xml
net-define sr-iov-net5.xml
net-define sr-iov-net6.xml
net-define sr-iov-net7.xml
net-define sr-iov-net8.xml
net-start sr-iov-net1
net-start sr-iov-net2
net-start sr-iov-net3
net-start sr-iov-net4
net-start sr-iov-net5
net-start sr-iov-net6
net-start sr-iov-net7
net-start sr-iov-net8
# login to VM
./setARP.sh
./parallel-softirq.sh
cat parallel-softirq.sh
#!/bin/bash
#
service irqbalance stop
for irq in `grep 'mlx5_comp' /proc/interrupts | cut -d: -f1`; do \
echo ffff > /proc/irq/$irq/smp_affinity; \
done
for i in `seq 1 8`; do
echo 00ff > /sys/class/net/eth1/queues/rx-$i/rps_cpus;
echo 00ff > /sys/class/net/eth2/queues/rx-$i/rps_cpus;
echo 00ff > /sys/class/net/eth3/queues/rx-$i/rps_cpus;
echo 00ff > /sys/class/net/eth4/queues/rx-$i/rps_cpus;
echo ff00 > /sys/class/net/eth5/queues/rx-$i/rps_cpus;
echo ff00 > /sys/class/net/eth6/queues/rx-$i/rps_cpus;
echo ff00 > /sys/class/net/eth7/queues/rx-$i/rps_cpus;
echo ff00 > /sys/class/net/eth8/queues/rx-$i/rps_cpus;
echo 00ff > /sys/class/net/eth1/queues/tx-$i/xps_cpus;
echo 00ff > /sys/class/net/eth2/queues/tx-$i/xps_cpus;
echo 00ff > /sys/class/net/eth3/queues/tx-$i/xps_cpus;
echo 00ff > /sys/class/net/eth4/queues/tx-$i/xps_cpus;
echo ff00 > /sys/class/net/eth5/queues/tx-$i/xps_cpus;
echo ff00 > /sys/class/net/eth6/queues/tx-$i/xps_cpus;
echo ff00 > /sys/class/net/eth7/queues/tx-$i/xps_cpus;
echo ff00 > /sys/class/net/eth8/queues/tx-$i/xps_cpus;
done
- SR-IOV-DPDK
# route packets using DPDK l3fwd app in VM which is connected to VF
cd /usr/local/share/dpdk/examples/l3fwd
make
cd build
./l3fwd -c 0xff0ff0 -n 4 -- -p 0xff --config="(0,0,4),(0,1,5),(1,0,6),(1,1,7),(2,0,8),(2,1,9),(3,0,10),(3,1,11),(4,0,16),(4,1,17),(5,0,18),(5,1,19),(6,0,20),(6,1,21),(7,0,22),(7,1,23)" -L
- SR-IOV-DPDK-VPP
# route packets using VPP over DPDK in VM which is connected to VF
# host-side
virsh net-destroy sr-iov-net1
virsh net-destroy sr-iov-net2
virsh net-destroy sr-iov-net3
virsh net-destroy sr-iov-net4
virsh net-destroy sr-iov-net5
virsh net-destroy sr-iov-net6
virsh net-destroy sr-iov-net7
virsh net-destroy sr-iov-net8
virsh net-undefine sr-iov-net1
virsh net-undefine sr-iov-net2
virsh net-undefine sr-iov-net3
virsh net-undefine sr-iov-net4
virsh net-undefine sr-iov-net5
virsh net-undefine sr-iov-net6
virsh net-undefine sr-iov-net7
virsh net-undefine sr-iov-net8
wget https://releases.hashicorp.com/vagrant/2.1.1/vagrant_2.1.1_x86_64.deb
dpkg -i ./vagrant_2.1.1_x86_64.deb
apt install libvirt-dev
vagrant plugin install vagrant-libvirt
# pkg-config libvirt or dpkg-reconfigure libvirt might be needed
git clone https://gerrit.fd.io/r/vpp
cd vpp/build-root/vagrant
# - read Vagrantfile and embedded scripts including update.sh, build.sh, install.sh
# - modify the source image in Vagrantfile to adapt to libvirt instead of default virtual box
# config.vm.box = "generic/ubuntu1604"
# - add below line to update.sh
# apt-get install -y make gcc
# - configure env.sh as you wish and launch the VM
# edit Vagrantfile
# lv.storage_pool_name = "images"
# (i.e. virsh pool-list)
vi env.sh
source ./env.sh
vagrant up --provider=libvirt
# guest-side
GRUB_CMDLINE_LINUX_DEFAULT="quiet splash console=tty1 console=ttyS0 net.ifnames=0 default_hugepagesz=1G hugepagesz=1G hugepages=8"
cd /vpp
sed -i '/vpp_uses_dpdk_mlx5_pmd/s/^# //g' build-data/platforms/vpp.mk
~/vppsb/router/router/tap_inject_netlink.c
extern int vnet_arp_set_ip4_over_ethernet (vnet_main_t * vnm,
u32 sw_if_index,
void *a_arg,
int is_static,
int is_no_fib_entry);
extern int vnet_arp_unset_ip4_over_ethernet (vnet_main_t * vnm,
u32 sw_if_index, void *a_arg);
./setARP.sh
# copy shared library to LD library path
cp x86_64-native-linuxapp-gcc/lib/librte_pmd_mlx5_glue* $(ldconfig -p | grep librte_pmd_mlx5_glue | awk '{print $4}')
# if you will use vpp with dpdk, it requires shared library to build vpp
vi .bashrc
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib