Googler | 9398cc3 | 2022-12-02 17:21:52 +0800 | [diff] [blame] | 1 | #!/bin/bash |
| 2 | # SPDX-License-Identifier: GPL-2.0 |
| 3 | # |
| 4 | # Setup/topology: |
| 5 | # |
| 6 | # NS1 NS2 NS3 |
| 7 | # veth1 <---> veth2 veth3 <---> veth4 (the top route) |
| 8 | # veth5 <---> veth6 veth7 <---> veth8 (the bottom route) |
| 9 | # |
| 10 | # each vethN gets IPv[4|6]_N address |
| 11 | # |
| 12 | # IPv*_SRC = IPv*_1 |
| 13 | # IPv*_DST = IPv*_4 |
| 14 | # |
| 15 | # all tests test pings from IPv*_SRC to IPv*_DST |
| 16 | # |
| 17 | # by default, routes are configured to allow packets to go |
| 18 | # IP*_1 <=> IP*_2 <=> IP*_3 <=> IP*_4 (the top route) |
| 19 | # |
| 20 | # a GRE device is installed in NS3 with IPv*_GRE, and |
| 21 | # NS1/NS2 are configured to route packets to IPv*_GRE via IP*_8 |
| 22 | # (the bottom route) |
| 23 | # |
| 24 | # Tests: |
| 25 | # |
| 26 | # 1. routes NS2->IPv*_DST are brought down, so the only way a ping |
| 27 | # from IP*_SRC to IP*_DST can work is via IPv*_GRE |
| 28 | # |
| 29 | # 2a. in an egress test, a bpf LWT_XMIT program is installed on veth1 |
| 30 | # that encaps the packets with an IP/GRE header to route to IPv*_GRE |
| 31 | # |
| 32 | # ping: SRC->[encap at veth1:egress]->GRE:decap->DST |
| 33 | # ping replies go DST->SRC directly |
| 34 | # |
| 35 | # 2b. in an ingress test, a bpf LWT_IN program is installed on veth2 |
| 36 | # that encaps the packets with an IP/GRE header to route to IPv*_GRE |
| 37 | # |
| 38 | # ping: SRC->[encap at veth2:ingress]->GRE:decap->DST |
| 39 | # ping replies go DST->SRC directly |
| 40 | |
| 41 | if [[ $EUID -ne 0 ]]; then |
| 42 | echo "This script must be run as root" |
| 43 | echo "FAIL" |
| 44 | exit 1 |
| 45 | fi |
| 46 | |
| 47 | readonly NS1="ns1-$(mktemp -u XXXXXX)" |
| 48 | readonly NS2="ns2-$(mktemp -u XXXXXX)" |
| 49 | readonly NS3="ns3-$(mktemp -u XXXXXX)" |
| 50 | |
| 51 | readonly IPv4_1="172.16.1.100" |
| 52 | readonly IPv4_2="172.16.2.100" |
| 53 | readonly IPv4_3="172.16.3.100" |
| 54 | readonly IPv4_4="172.16.4.100" |
| 55 | readonly IPv4_5="172.16.5.100" |
| 56 | readonly IPv4_6="172.16.6.100" |
| 57 | readonly IPv4_7="172.16.7.100" |
| 58 | readonly IPv4_8="172.16.8.100" |
| 59 | readonly IPv4_GRE="172.16.16.100" |
| 60 | |
| 61 | readonly IPv4_SRC=$IPv4_1 |
| 62 | readonly IPv4_DST=$IPv4_4 |
| 63 | |
| 64 | readonly IPv6_1="fb01::1" |
| 65 | readonly IPv6_2="fb02::1" |
| 66 | readonly IPv6_3="fb03::1" |
| 67 | readonly IPv6_4="fb04::1" |
| 68 | readonly IPv6_5="fb05::1" |
| 69 | readonly IPv6_6="fb06::1" |
| 70 | readonly IPv6_7="fb07::1" |
| 71 | readonly IPv6_8="fb08::1" |
| 72 | readonly IPv6_GRE="fb10::1" |
| 73 | |
| 74 | readonly IPv6_SRC=$IPv6_1 |
| 75 | readonly IPv6_DST=$IPv6_4 |
| 76 | |
| 77 | TEST_STATUS=0 |
| 78 | TESTS_SUCCEEDED=0 |
| 79 | TESTS_FAILED=0 |
| 80 | |
| 81 | TMPFILE="" |
| 82 | |
| 83 | process_test_results() |
| 84 | { |
| 85 | if [[ "${TEST_STATUS}" -eq 0 ]] ; then |
| 86 | echo "PASS" |
| 87 | TESTS_SUCCEEDED=$((TESTS_SUCCEEDED+1)) |
| 88 | else |
| 89 | echo "FAIL" |
| 90 | TESTS_FAILED=$((TESTS_FAILED+1)) |
| 91 | fi |
| 92 | } |
| 93 | |
| 94 | print_test_summary_and_exit() |
| 95 | { |
| 96 | echo "passed tests: ${TESTS_SUCCEEDED}" |
| 97 | echo "failed tests: ${TESTS_FAILED}" |
| 98 | if [ "${TESTS_FAILED}" -eq "0" ] ; then |
| 99 | exit 0 |
| 100 | else |
| 101 | exit 1 |
| 102 | fi |
| 103 | } |
| 104 | |
| 105 | setup() |
| 106 | { |
| 107 | set -e # exit on error |
| 108 | TEST_STATUS=0 |
| 109 | |
| 110 | # create devices and namespaces |
| 111 | ip netns add "${NS1}" |
| 112 | ip netns add "${NS2}" |
| 113 | ip netns add "${NS3}" |
| 114 | |
Googler | 9398cc3 | 2022-12-02 17:21:52 +0800 | [diff] [blame] | 115 | ip link add veth1 type veth peer name veth2 |
| 116 | ip link add veth3 type veth peer name veth4 |
| 117 | ip link add veth5 type veth peer name veth6 |
| 118 | ip link add veth7 type veth peer name veth8 |
| 119 | |
| 120 | ip netns exec ${NS2} sysctl -wq net.ipv4.ip_forward=1 |
| 121 | ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.forwarding=1 |
| 122 | |
| 123 | ip link set veth1 netns ${NS1} |
| 124 | ip link set veth2 netns ${NS2} |
| 125 | ip link set veth3 netns ${NS2} |
| 126 | ip link set veth4 netns ${NS3} |
| 127 | ip link set veth5 netns ${NS1} |
| 128 | ip link set veth6 netns ${NS2} |
| 129 | ip link set veth7 netns ${NS2} |
| 130 | ip link set veth8 netns ${NS3} |
| 131 | |
| 132 | if [ ! -z "${VRF}" ] ; then |
| 133 | ip -netns ${NS1} link add red type vrf table 1001 |
| 134 | ip -netns ${NS1} link set red up |
| 135 | ip -netns ${NS1} route add table 1001 unreachable default metric 8192 |
| 136 | ip -netns ${NS1} -6 route add table 1001 unreachable default metric 8192 |
| 137 | ip -netns ${NS1} link set veth1 vrf red |
| 138 | ip -netns ${NS1} link set veth5 vrf red |
| 139 | |
| 140 | ip -netns ${NS2} link add red type vrf table 1001 |
| 141 | ip -netns ${NS2} link set red up |
| 142 | ip -netns ${NS2} route add table 1001 unreachable default metric 8192 |
| 143 | ip -netns ${NS2} -6 route add table 1001 unreachable default metric 8192 |
| 144 | ip -netns ${NS2} link set veth2 vrf red |
| 145 | ip -netns ${NS2} link set veth3 vrf red |
| 146 | ip -netns ${NS2} link set veth6 vrf red |
| 147 | ip -netns ${NS2} link set veth7 vrf red |
| 148 | fi |
| 149 | |
| 150 | # configure addesses: the top route (1-2-3-4) |
| 151 | ip -netns ${NS1} addr add ${IPv4_1}/24 dev veth1 |
| 152 | ip -netns ${NS2} addr add ${IPv4_2}/24 dev veth2 |
| 153 | ip -netns ${NS2} addr add ${IPv4_3}/24 dev veth3 |
| 154 | ip -netns ${NS3} addr add ${IPv4_4}/24 dev veth4 |
| 155 | ip -netns ${NS1} -6 addr add ${IPv6_1}/128 nodad dev veth1 |
| 156 | ip -netns ${NS2} -6 addr add ${IPv6_2}/128 nodad dev veth2 |
| 157 | ip -netns ${NS2} -6 addr add ${IPv6_3}/128 nodad dev veth3 |
| 158 | ip -netns ${NS3} -6 addr add ${IPv6_4}/128 nodad dev veth4 |
| 159 | |
| 160 | # configure addresses: the bottom route (5-6-7-8) |
| 161 | ip -netns ${NS1} addr add ${IPv4_5}/24 dev veth5 |
| 162 | ip -netns ${NS2} addr add ${IPv4_6}/24 dev veth6 |
| 163 | ip -netns ${NS2} addr add ${IPv4_7}/24 dev veth7 |
| 164 | ip -netns ${NS3} addr add ${IPv4_8}/24 dev veth8 |
| 165 | ip -netns ${NS1} -6 addr add ${IPv6_5}/128 nodad dev veth5 |
| 166 | ip -netns ${NS2} -6 addr add ${IPv6_6}/128 nodad dev veth6 |
| 167 | ip -netns ${NS2} -6 addr add ${IPv6_7}/128 nodad dev veth7 |
| 168 | ip -netns ${NS3} -6 addr add ${IPv6_8}/128 nodad dev veth8 |
| 169 | |
| 170 | ip -netns ${NS1} link set dev veth1 up |
| 171 | ip -netns ${NS2} link set dev veth2 up |
| 172 | ip -netns ${NS2} link set dev veth3 up |
| 173 | ip -netns ${NS3} link set dev veth4 up |
| 174 | ip -netns ${NS1} link set dev veth5 up |
| 175 | ip -netns ${NS2} link set dev veth6 up |
| 176 | ip -netns ${NS2} link set dev veth7 up |
| 177 | ip -netns ${NS3} link set dev veth8 up |
| 178 | |
| 179 | # configure routes: IP*_SRC -> veth1/IP*_2 (= top route) default; |
| 180 | # the bottom route to specific bottom addresses |
| 181 | |
| 182 | # NS1 |
| 183 | # top route |
| 184 | ip -netns ${NS1} route add ${IPv4_2}/32 dev veth1 ${VRF} |
| 185 | ip -netns ${NS1} route add default dev veth1 via ${IPv4_2} ${VRF} # go top by default |
| 186 | ip -netns ${NS1} -6 route add ${IPv6_2}/128 dev veth1 ${VRF} |
| 187 | ip -netns ${NS1} -6 route add default dev veth1 via ${IPv6_2} ${VRF} # go top by default |
| 188 | # bottom route |
| 189 | ip -netns ${NS1} route add ${IPv4_6}/32 dev veth5 ${VRF} |
| 190 | ip -netns ${NS1} route add ${IPv4_7}/32 dev veth5 via ${IPv4_6} ${VRF} |
| 191 | ip -netns ${NS1} route add ${IPv4_8}/32 dev veth5 via ${IPv4_6} ${VRF} |
| 192 | ip -netns ${NS1} -6 route add ${IPv6_6}/128 dev veth5 ${VRF} |
| 193 | ip -netns ${NS1} -6 route add ${IPv6_7}/128 dev veth5 via ${IPv6_6} ${VRF} |
| 194 | ip -netns ${NS1} -6 route add ${IPv6_8}/128 dev veth5 via ${IPv6_6} ${VRF} |
| 195 | |
| 196 | # NS2 |
| 197 | # top route |
| 198 | ip -netns ${NS2} route add ${IPv4_1}/32 dev veth2 ${VRF} |
| 199 | ip -netns ${NS2} route add ${IPv4_4}/32 dev veth3 ${VRF} |
| 200 | ip -netns ${NS2} -6 route add ${IPv6_1}/128 dev veth2 ${VRF} |
| 201 | ip -netns ${NS2} -6 route add ${IPv6_4}/128 dev veth3 ${VRF} |
| 202 | # bottom route |
| 203 | ip -netns ${NS2} route add ${IPv4_5}/32 dev veth6 ${VRF} |
| 204 | ip -netns ${NS2} route add ${IPv4_8}/32 dev veth7 ${VRF} |
| 205 | ip -netns ${NS2} -6 route add ${IPv6_5}/128 dev veth6 ${VRF} |
| 206 | ip -netns ${NS2} -6 route add ${IPv6_8}/128 dev veth7 ${VRF} |
| 207 | |
| 208 | # NS3 |
| 209 | # top route |
| 210 | ip -netns ${NS3} route add ${IPv4_3}/32 dev veth4 |
| 211 | ip -netns ${NS3} route add ${IPv4_1}/32 dev veth4 via ${IPv4_3} |
| 212 | ip -netns ${NS3} route add ${IPv4_2}/32 dev veth4 via ${IPv4_3} |
| 213 | ip -netns ${NS3} -6 route add ${IPv6_3}/128 dev veth4 |
| 214 | ip -netns ${NS3} -6 route add ${IPv6_1}/128 dev veth4 via ${IPv6_3} |
| 215 | ip -netns ${NS3} -6 route add ${IPv6_2}/128 dev veth4 via ${IPv6_3} |
| 216 | # bottom route |
| 217 | ip -netns ${NS3} route add ${IPv4_7}/32 dev veth8 |
| 218 | ip -netns ${NS3} route add ${IPv4_5}/32 dev veth8 via ${IPv4_7} |
| 219 | ip -netns ${NS3} route add ${IPv4_6}/32 dev veth8 via ${IPv4_7} |
| 220 | ip -netns ${NS3} -6 route add ${IPv6_7}/128 dev veth8 |
| 221 | ip -netns ${NS3} -6 route add ${IPv6_5}/128 dev veth8 via ${IPv6_7} |
| 222 | ip -netns ${NS3} -6 route add ${IPv6_6}/128 dev veth8 via ${IPv6_7} |
| 223 | |
| 224 | # configure IPv4 GRE device in NS3, and a route to it via the "bottom" route |
| 225 | ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255 |
| 226 | ip -netns ${NS3} link set gre_dev up |
| 227 | ip -netns ${NS3} addr add ${IPv4_GRE} dev gre_dev |
| 228 | ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6} ${VRF} |
| 229 | ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8} ${VRF} |
| 230 | |
| 231 | |
| 232 | # configure IPv6 GRE device in NS3, and a route to it via the "bottom" route |
| 233 | ip -netns ${NS3} -6 tunnel add name gre6_dev mode ip6gre remote ${IPv6_1} local ${IPv6_GRE} ttl 255 |
| 234 | ip -netns ${NS3} link set gre6_dev up |
| 235 | ip -netns ${NS3} -6 addr add ${IPv6_GRE} nodad dev gre6_dev |
| 236 | ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF} |
| 237 | ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF} |
| 238 | |
Googler | b48fa91 | 2023-03-17 12:40:29 +0530 | [diff] [blame^] | 239 | # rp_filter gets confused by what these tests are doing, so disable it |
| 240 | ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0 |
| 241 | ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0 |
| 242 | ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0 |
| 243 | |
Googler | 9398cc3 | 2022-12-02 17:21:52 +0800 | [diff] [blame] | 244 | TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX) |
| 245 | |
| 246 | sleep 1 # reduce flakiness |
| 247 | set +e |
| 248 | } |
| 249 | |
| 250 | cleanup() |
| 251 | { |
| 252 | if [ -f ${TMPFILE} ] ; then |
| 253 | rm ${TMPFILE} |
| 254 | fi |
| 255 | |
| 256 | ip netns del ${NS1} 2> /dev/null |
| 257 | ip netns del ${NS2} 2> /dev/null |
| 258 | ip netns del ${NS3} 2> /dev/null |
| 259 | } |
| 260 | |
| 261 | trap cleanup EXIT |
| 262 | |
| 263 | remove_routes_to_gredev() |
| 264 | { |
| 265 | ip -netns ${NS1} route del ${IPv4_GRE} dev veth5 ${VRF} |
| 266 | ip -netns ${NS2} route del ${IPv4_GRE} dev veth7 ${VRF} |
| 267 | ip -netns ${NS1} -6 route del ${IPv6_GRE}/128 dev veth5 ${VRF} |
| 268 | ip -netns ${NS2} -6 route del ${IPv6_GRE}/128 dev veth7 ${VRF} |
| 269 | } |
| 270 | |
| 271 | add_unreachable_routes_to_gredev() |
| 272 | { |
| 273 | ip -netns ${NS1} route add unreachable ${IPv4_GRE}/32 ${VRF} |
| 274 | ip -netns ${NS2} route add unreachable ${IPv4_GRE}/32 ${VRF} |
| 275 | ip -netns ${NS1} -6 route add unreachable ${IPv6_GRE}/128 ${VRF} |
| 276 | ip -netns ${NS2} -6 route add unreachable ${IPv6_GRE}/128 ${VRF} |
| 277 | } |
| 278 | |
| 279 | test_ping() |
| 280 | { |
| 281 | local readonly PROTO=$1 |
| 282 | local readonly EXPECTED=$2 |
| 283 | local RET=0 |
| 284 | |
| 285 | if [ "${PROTO}" == "IPv4" ] ; then |
| 286 | ip netns exec ${NS1} ping -c 1 -W 1 -I veth1 ${IPv4_DST} 2>&1 > /dev/null |
| 287 | RET=$? |
| 288 | elif [ "${PROTO}" == "IPv6" ] ; then |
Googler | b48fa91 | 2023-03-17 12:40:29 +0530 | [diff] [blame^] | 289 | ip netns exec ${NS1} ping6 -c 1 -W 6 -I veth1 ${IPv6_DST} 2>&1 > /dev/null |
Googler | 9398cc3 | 2022-12-02 17:21:52 +0800 | [diff] [blame] | 290 | RET=$? |
| 291 | else |
| 292 | echo " test_ping: unknown PROTO: ${PROTO}" |
| 293 | TEST_STATUS=1 |
| 294 | fi |
| 295 | |
| 296 | if [ "0" != "${RET}" ]; then |
| 297 | RET=1 |
| 298 | fi |
| 299 | |
| 300 | if [ "${EXPECTED}" != "${RET}" ] ; then |
| 301 | echo " test_ping failed: expected: ${EXPECTED}; got ${RET}" |
| 302 | TEST_STATUS=1 |
| 303 | fi |
| 304 | } |
| 305 | |
| 306 | test_gso() |
| 307 | { |
| 308 | local readonly PROTO=$1 |
| 309 | local readonly PKT_SZ=5000 |
| 310 | local IP_DST="" |
| 311 | : > ${TMPFILE} # trim the capture file |
| 312 | |
| 313 | # check that nc is present |
| 314 | command -v nc >/dev/null 2>&1 || \ |
| 315 | { echo >&2 "nc is not available: skipping TSO tests"; return; } |
| 316 | |
| 317 | # listen on port 9000, capture TCP into $TMPFILE |
| 318 | if [ "${PROTO}" == "IPv4" ] ; then |
| 319 | IP_DST=${IPv4_DST} |
| 320 | ip netns exec ${NS3} bash -c \ |
| 321 | "nc -4 -l -p 9000 > ${TMPFILE} &" |
| 322 | elif [ "${PROTO}" == "IPv6" ] ; then |
| 323 | IP_DST=${IPv6_DST} |
| 324 | ip netns exec ${NS3} bash -c \ |
| 325 | "nc -6 -l -p 9000 > ${TMPFILE} &" |
| 326 | RET=$? |
| 327 | else |
| 328 | echo " test_gso: unknown PROTO: ${PROTO}" |
| 329 | TEST_STATUS=1 |
| 330 | fi |
| 331 | sleep 1 # let nc start listening |
| 332 | |
| 333 | # send a packet larger than MTU |
| 334 | ip netns exec ${NS1} bash -c \ |
| 335 | "dd if=/dev/zero bs=$PKT_SZ count=1 > /dev/tcp/${IP_DST}/9000 2>/dev/null" |
| 336 | sleep 2 # let the packet get delivered |
| 337 | |
| 338 | # verify we received all expected bytes |
| 339 | SZ=$(stat -c %s ${TMPFILE}) |
| 340 | if [ "$SZ" != "$PKT_SZ" ] ; then |
| 341 | echo " test_gso failed: ${PROTO}" |
| 342 | TEST_STATUS=1 |
| 343 | fi |
| 344 | } |
| 345 | |
| 346 | test_egress() |
| 347 | { |
| 348 | local readonly ENCAP=$1 |
| 349 | echo "starting egress ${ENCAP} encap test ${VRF}" |
| 350 | setup |
| 351 | |
| 352 | # by default, pings work |
| 353 | test_ping IPv4 0 |
| 354 | test_ping IPv6 0 |
| 355 | |
| 356 | # remove NS2->DST routes, ping fails |
| 357 | ip -netns ${NS2} route del ${IPv4_DST}/32 dev veth3 ${VRF} |
| 358 | ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3 ${VRF} |
| 359 | test_ping IPv4 1 |
| 360 | test_ping IPv6 1 |
| 361 | |
| 362 | # install replacement routes (LWT/eBPF), pings succeed |
| 363 | if [ "${ENCAP}" == "IPv4" ] ; then |
| 364 | ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \ |
| 365 | test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF} |
| 366 | ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \ |
| 367 | test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF} |
| 368 | elif [ "${ENCAP}" == "IPv6" ] ; then |
| 369 | ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \ |
| 370 | test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF} |
| 371 | ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \ |
| 372 | test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF} |
| 373 | else |
| 374 | echo " unknown encap ${ENCAP}" |
| 375 | TEST_STATUS=1 |
| 376 | fi |
| 377 | test_ping IPv4 0 |
| 378 | test_ping IPv6 0 |
| 379 | |
| 380 | # skip GSO tests with VRF: VRF routing needs properly assigned |
| 381 | # source IP/device, which is easy to do with ping and hard with dd/nc. |
| 382 | if [ -z "${VRF}" ] ; then |
| 383 | test_gso IPv4 |
| 384 | test_gso IPv6 |
| 385 | fi |
| 386 | |
| 387 | # a negative test: remove routes to GRE devices: ping fails |
| 388 | remove_routes_to_gredev |
| 389 | test_ping IPv4 1 |
| 390 | test_ping IPv6 1 |
| 391 | |
| 392 | # another negative test |
| 393 | add_unreachable_routes_to_gredev |
| 394 | test_ping IPv4 1 |
| 395 | test_ping IPv6 1 |
| 396 | |
| 397 | cleanup |
| 398 | process_test_results |
| 399 | } |
| 400 | |
| 401 | test_ingress() |
| 402 | { |
| 403 | local readonly ENCAP=$1 |
| 404 | echo "starting ingress ${ENCAP} encap test ${VRF}" |
| 405 | setup |
| 406 | |
| 407 | # need to wait a bit for IPv6 to autoconf, otherwise |
| 408 | # ping6 sometimes fails with "unable to bind to address" |
| 409 | |
| 410 | # by default, pings work |
| 411 | test_ping IPv4 0 |
| 412 | test_ping IPv6 0 |
| 413 | |
| 414 | # remove NS2->DST routes, pings fail |
| 415 | ip -netns ${NS2} route del ${IPv4_DST}/32 dev veth3 ${VRF} |
| 416 | ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3 ${VRF} |
| 417 | test_ping IPv4 1 |
| 418 | test_ping IPv6 1 |
| 419 | |
| 420 | # install replacement routes (LWT/eBPF), pings succeed |
| 421 | if [ "${ENCAP}" == "IPv4" ] ; then |
| 422 | ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \ |
| 423 | test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF} |
| 424 | ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \ |
| 425 | test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF} |
| 426 | elif [ "${ENCAP}" == "IPv6" ] ; then |
| 427 | ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \ |
| 428 | test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF} |
| 429 | ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \ |
| 430 | test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF} |
| 431 | else |
| 432 | echo "FAIL: unknown encap ${ENCAP}" |
| 433 | TEST_STATUS=1 |
| 434 | fi |
| 435 | test_ping IPv4 0 |
| 436 | test_ping IPv6 0 |
| 437 | |
| 438 | # a negative test: remove routes to GRE devices: ping fails |
| 439 | remove_routes_to_gredev |
| 440 | test_ping IPv4 1 |
| 441 | test_ping IPv6 1 |
| 442 | |
| 443 | # another negative test |
| 444 | add_unreachable_routes_to_gredev |
| 445 | test_ping IPv4 1 |
| 446 | test_ping IPv6 1 |
| 447 | |
| 448 | cleanup |
| 449 | process_test_results |
| 450 | } |
| 451 | |
| 452 | VRF="" |
| 453 | test_egress IPv4 |
| 454 | test_egress IPv6 |
| 455 | test_ingress IPv4 |
| 456 | test_ingress IPv6 |
| 457 | |
| 458 | VRF="vrf red" |
| 459 | test_egress IPv4 |
| 460 | test_egress IPv6 |
| 461 | test_ingress IPv4 |
| 462 | test_ingress IPv6 |
| 463 | |
| 464 | print_test_summary_and_exit |