/
vr_nexthop.c
2316 lines (1926 loc) · 64.2 KB
/
vr_nexthop.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* vr_nexthop.c -- data path nexthop management
*
* Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
*/
#include <vr_os.h>
#include "vr_message.h"
#include "vr_sandesh.h"
#include "vr_mcast.h"
#include "vr_bridge.h"
static int nh_discard(unsigned short, struct vr_packet *,
struct vr_nexthop *, struct vr_forwarding_md *);
extern unsigned int vr_forward(struct vrouter *, unsigned short,
struct vr_packet *, struct vr_forwarding_md *);
extern void vr_init_forwarding_md(struct vr_forwarding_md *);
extern bool vr_has_to_fragment(struct vr_interface *, struct vr_packet *,
unsigned int);
struct vr_nexthop *vr_inet_src_lookup(unsigned short, struct vr_ip *,
struct vr_packet *);
extern struct vr_vrf_stats *(*vr_inet_vrf_stats)(unsigned short, unsigned int);
struct vr_nexthop *ip4_default_nh;
struct vr_nexthop *
__vrouter_get_nexthop(struct vrouter *router, unsigned int index)
{
if (!router || index >= router->vr_max_nexthops)
return NULL;
return router->vr_nexthops[index];
}
struct vr_nexthop *
vrouter_get_nexthop(unsigned int rid, unsigned int index)
{
struct vr_nexthop *nh;
struct vrouter *router;
router = vrouter_get(rid);
nh = __vrouter_get_nexthop(router, index);
if (nh)
nh->nh_users++;
return nh;
}
void
vrouter_put_nexthop(struct vr_nexthop *nh)
{
int i;
/* This function might get invoked with zero ref_cnt */
if (nh->nh_users) {
nh->nh_users--;
}
if (!nh->nh_users ) {
if (!vr_not_ready)
vr_delay_op();
/* If composite de-ref the internal nexthops */
if (nh->nh_type == NH_COMPOSITE) {
for (i = 0; i < nh->nh_component_cnt; i++) {
if (nh->nh_component_nh[i].cnh)
vrouter_put_nexthop(nh->nh_component_nh[i].cnh);
}
vr_free(nh->nh_component_nh);
}
if (nh->nh_dev) {
vrouter_put_interface(nh->nh_dev);
}
vr_free(nh);
}
return;
}
static int
vrouter_add_nexthop(struct vr_nexthop *nh)
{
struct vrouter *router = vrouter_get(nh->nh_rid);
if (!router || nh->nh_id >= router->vr_max_nexthops)
return -EINVAL;
/*
* NH change just copies the field
* over to nexthop, incase of change
* just return
*/
if (router->vr_nexthops[nh->nh_id])
return 0;
nh->nh_users++;
router->vr_nexthops[nh->nh_id] = nh;
return 0;
}
static void
nh_del(struct vr_nexthop *nh)
{
struct vrouter *router = vrouter_get(nh->nh_rid);
if (!router || nh->nh_id >= router->vr_max_nexthops)
return;
if (router->vr_nexthops[nh->nh_id]) {
router->vr_nexthops[nh->nh_id] = NULL;
}
vrouter_put_nexthop(nh);
return;
}
static int
nh_resolve(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
struct vr_vrf_stats *stats;
struct vr_packet *pkt_clone;
stats = vr_inet_vrf_stats(vrf, pkt->vp_cpu);
if (stats)
stats->vrf_resolves++;
if (pkt->vp_if->vif_bridge) {
/*
* bridge is set only for vhost/physical interface, and this
* path will be hit only for packets from vhost, in which case
* we already know everything that has to be known i.e. we know
* the outgoing device and the mac address (which was already
* resolved as part of the arp request from host
*/
pkt_clone = vr_pclone(pkt);
if (pkt_clone) {
vr_preset(pkt_clone);
vif_xconnect(pkt->vp_if, pkt_clone);
}
}
/* will trap the packet to agent to create a route */
vr_trap(pkt, vrf, AGENT_TRAP_RESOLVE, NULL);
return 0;
}
static int
nh_vxlan_vrf(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
return vr_bridge_input(nh->nh_router, nh->nh_vrf, pkt, fmd);
}
static int
nh_rcv(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
struct vr_vrf_stats *stats;
stats = vr_inet_vrf_stats(vrf, pkt->vp_cpu);
if (stats)
stats->vrf_receives++;
if (nh->nh_family == AF_INET)
return vr_ip_rcv(nh->nh_router, pkt, fmd);
else
vr_pfree(pkt, VP_DROP_INVALID_PROTOCOL);
return 0;
}
static int
nh_push_mpls_header(struct vr_packet *pkt, unsigned int label)
{
unsigned int *lbl;
unsigned int ttl;
lbl = (unsigned int *)pkt_push(pkt, sizeof(unsigned int));
if (!lbl)
return -ENOSPC;
/* Use the ttl from packet. If not ttl,
* initialise to some arbitrary value */
ttl = pkt->vp_ttl;
if (!ttl) {
ttl = 64;
}
*lbl = htonl((label << VR_MPLS_LABEL_SHIFT) | VR_MPLS_STACK_BIT | ttl);
return 0;
}
/*
* nh_udp_tunnel_helper - helper function to use for UDP tunneling. Used
* by mirroring and MPLS over UDP. Returns true on success, false otherwise.
*/
static bool
nh_udp_tunnel_helper(struct vr_packet *pkt, unsigned short sport,
unsigned short dport, unsigned int sip,
unsigned int dip)
{
struct vr_ip *ip;
struct vr_udp *udp;
/* Udp Header */
udp = (struct vr_udp *)pkt_push(pkt, sizeof(struct vr_udp));
if (!udp) {
return false;
}
udp->udp_sport = sport;
udp->udp_dport = dport;
udp->udp_length = htons(pkt_len(pkt));
udp->udp_csum = 0;
/* And now the IP header */
ip = (struct vr_ip *)pkt_push(pkt, sizeof(struct vr_ip));
if (!ip) {
return false;
}
ip->ip_version = 4;
ip->ip_hl = 5;
ip->ip_tos = 0;
ip->ip_id = htons(vr_generate_unique_ip_id());
ip->ip_frag_off = 0;
ip->ip_ttl = 64;
ip->ip_proto = VR_IP_PROTO_UDP;
ip->ip_saddr = sip;
ip->ip_daddr = dip;
ip->ip_len = htons(pkt_len(pkt));
/*
* header checksum
*/
ip->ip_csum = 0;
ip->ip_csum = vr_ip_csum(ip);
pkt_set_network_header(pkt, pkt->vp_data);
return true;
}
static bool
nh_vxlan_tunnel_helper(unsigned short vrf, struct vr_packet *pkt,
struct vr_forwarding_md *fmd, unsigned int sip,
unsigned int dip)
{
unsigned short udp_src_port = VR_VXLAN_UDP_SRC_PORT;
struct vr_vxlan *vxlanh;
struct vr_packet *tmp_pkt;
if (pkt_head_space(pkt) < VR_VXLAN_HDR_LEN) {
tmp_pkt = vr_pexpand_head(pkt, VR_VXLAN_HDR_LEN - pkt_head_space(pkt));
if (!tmp_pkt) {
return false;
}
pkt = tmp_pkt;
}
/* Change the packet type to VXLAN as we added the vxlan header */
pkt->vp_type = VP_TYPE_VXLAN;
/*
* The UDP source port is a hash of the inner headers
*/
if (vr_get_udp_src_port) {
udp_src_port = vr_get_udp_src_port(pkt, fmd, vrf);
if (udp_src_port == 0) {
return false;
}
}
/* Add the vxlan header */
vxlanh = (struct vr_vxlan *)pkt_push(pkt, sizeof(struct vr_vxlan));
vxlanh->vxlan_vnid = htonl(fmd->fmd_label << VR_VXLAN_VNID_SHIFT);
vxlanh->vxlan_flags = htonl(VR_VXLAN_IBIT);
return nh_udp_tunnel_helper(pkt, htons(udp_src_port),
htons(VR_VXLAN_UDP_DST_PORT), sip, dip);
}
static struct vr_packet *
nh_mcast_clone(struct vr_packet *pkt, unsigned short head_room)
{
struct vr_packet *clone_pkt;
/* Clone the packet */
clone_pkt = vr_pclone(pkt);
if (!clone_pkt) {
return NULL;
}
/* Increase the head space by the head_room */
if (vr_pcow(clone_pkt, head_room)) {
vr_pfree(clone_pkt, VP_DROP_PCOW_FAIL);
return NULL;
}
/* Copy the ttl from old packet */
clone_pkt->vp_ttl = pkt->vp_ttl;
return clone_pkt;
}
static int
nh_composite_ecmp_validate_src(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
int i;
struct vr_nexthop *cnh;
/* the first few checks are straight forward */
if (!fmd || (uint8_t)fmd->fmd_ecmp_src_nh_index >= nh->nh_component_cnt)
return NH_SOURCE_INVALID;
cnh = nh->nh_component_nh[fmd->fmd_ecmp_src_nh_index].cnh;
if (cnh && !cnh->nh_validate_src)
return NH_SOURCE_INVALID;
/*
* when the 'supposed' source goes down, cnh is null, in which
* case validate the source against other present nexthops. follow
* the same logic if the component validate source returns invalid
* source, which could mean that source has moved
*/
if (!cnh ||
(NH_SOURCE_INVALID == cnh->nh_validate_src(vrf, pkt, cnh, fmd))) {
for (i = 0; i < nh->nh_component_cnt; i++) {
if (i == fmd->fmd_ecmp_src_nh_index)
continue;
cnh = nh->nh_component_nh[i].cnh;
/* If direct nexthop is not valid, dont process it */
if (!cnh || !(cnh->nh_flags & NH_FLAG_VALID) ||
!cnh->nh_validate_src)
continue;
/*
* if the source has moved to a present and valid source,
* return mismatch
*/
if ((NH_SOURCE_VALID == cnh->nh_validate_src(vrf, pkt, cnh, fmd)))
return NH_SOURCE_MISMATCH;
}
/* if everything else fails, source is indeed invalid */
return NH_SOURCE_INVALID;
}
/* source is validated by validate_src */
return NH_SOURCE_VALID;
}
static int
nh_composite_ecmp(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
int ret = 0;
struct vr_nexthop *member_nh = NULL;
struct vr_vrf_stats *stats;
pkt->vp_type = VP_TYPE_IP;
stats = vr_inet_vrf_stats(vrf, pkt->vp_cpu);
if (stats)
stats->vrf_ecmp_composites++;
if (!fmd || fmd->fmd_ecmp_nh_index >= (short)nh->nh_component_cnt)
goto drop;
if (fmd->fmd_ecmp_nh_index >= 0)
member_nh = nh->nh_component_nh[fmd->fmd_ecmp_nh_index].cnh;
if (!member_nh) {
vr_trap(pkt, vrf, AGENT_TRAP_ECMP_RESOLVE, &fmd->fmd_flow_index);
return 0;
}
fmd->fmd_label = nh->nh_component_nh[fmd->fmd_ecmp_nh_index].cnh_label;
return nh_output(vrf, pkt, member_nh, fmd);
drop:
vr_pfree(pkt, VP_DROP_NO_FMD);
return ret;
}
/*
* This function validate the source of the tunnel incase of L2 or L3
* multicast
*/
static int
nh_composite_mcast_validate_src(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
int i;
struct vr_nexthop *dir_nh, *fabric_nh;
unsigned int tun_dip;
/*
* If multicast packet is received on fabric interface, we need to
* validate whether the source of this packet is in the distribution
* tree. If source is not in the list, packet needs to be dropped.
* It is assumed that L2 Multicast component's first member is
* always Fabric nexthop. Nexthop should reorder properly even if
* Agent adds in different order
*/
if (pkt->vp_if->vif_type != VIF_TYPE_PHYSICAL)
return NH_SOURCE_VALID;
if (!nh->nh_component_cnt)
return NH_SOURCE_INVALID;
fabric_nh = nh->nh_component_nh[0].cnh;
if ((!fabric_nh) || (!(fabric_nh->nh_flags & NH_FLAG_VALID)) ||
(fabric_nh->nh_type != NH_COMPOSITE) ||
(!(fabric_nh->nh_flags & NH_FLAG_COMPOSITE_FABRIC))) {
return NH_SOURCE_INVALID;
}
for (i = 0; i < fabric_nh->nh_component_cnt; i++) {
dir_nh = fabric_nh->nh_component_nh[i].cnh;
/* If direct nexthop is not valid, dont process it */
if ((!dir_nh) || !(dir_nh->nh_flags & NH_FLAG_VALID))
continue;
if (dir_nh->nh_type != NH_TUNNEL)
continue;
tun_dip = 0;
if (dir_nh->nh_flags & NH_FLAG_TUNNEL_GRE)
tun_dip = dir_nh->nh_gre_tun_dip;
else if (dir_nh->nh_flags & NH_FLAG_TUNNEL_UDP_MPLS)
tun_dip = dir_nh->nh_udp_tun_dip;
/* Dont forward to same source */
if (tun_dip && fmd->fmd_outer_src_ip &&
fmd->fmd_outer_src_ip == tun_dip)
return NH_SOURCE_VALID;
}
return NH_SOURCE_INVALID;
}
static int
nh_composite_mcast_l2(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
int i, clone_size = 0;
struct vr_nexthop *dir_nh;
unsigned short drop_reason, pkt_vrf;
struct vr_packet *new_pkt;
struct vr_vrf_stats *stats;
stats = vr_inet_vrf_stats(vrf, pkt->vp_cpu);
if (stats)
stats->vrf_l2_mcast_composites++;
drop_reason = VP_DROP_CLONED_ORIGINAL;
if (!fmd) {
drop_reason = VP_DROP_NO_FMD;
goto drop;
}
if (nh->nh_validate_src) {
if (nh->nh_validate_src(vrf, pkt, nh, fmd) == NH_SOURCE_INVALID) {
drop_reason = VP_DROP_INVALID_MCAST_SOURCE;
goto drop;
}
}
/*
* The packet can come to this nexthp either from Fabric or from VM.
* Incase of Fabric, the packet would contain the Vxlan header and
* control information. From VM, it contains neither of them
*/
for (i = 0; i < nh->nh_component_cnt; i++) {
dir_nh = nh->nh_component_nh[i].cnh;
/* If direct nexthop is not valid, dont process it */
if ((!dir_nh) || !(dir_nh->nh_flags & NH_FLAG_VALID))
continue;
if (dir_nh->nh_type == NH_ENCAP) {
/* Dont give back the packet to same VM */
if (dir_nh->nh_dev == pkt->vp_if)
continue;
/* There would be enought head space to clone it with zero
* size */
if (!(new_pkt = nh_mcast_clone(pkt, 0))) {
drop_reason = VP_DROP_MCAST_CLONE_FAIL;
break;
}
if (pkt->vp_if->vif_type == VIF_TYPE_PHYSICAL) {
/*
* No need to give the below headers to ENCAP NH. So
* pull them off
*/
if (!pkt_pull(new_pkt, (VR_VXLAN_HDR_LEN +
VR_L2_MCAST_CTRL_DATA_LEN))) {
vr_pfree(new_pkt, VP_DROP_PULL);
break;
}
}
pkt_vrf = dir_nh->nh_dev->vif_vrf;
} else if ((dir_nh->nh_type == NH_COMPOSITE) &&
(dir_nh->nh_flags & NH_FLAG_COMPOSITE_FABRIC)) {
/* Need to create extra head space only if packet is coming
* from VM */
if (vif_is_virtual(pkt->vp_if))
clone_size = VR_L2_MCAST_PKT_HEAD_SPACE;
/* Create head space for L2 Mcast header */
if (!(new_pkt = nh_mcast_clone(pkt, clone_size))) {
drop_reason = VP_DROP_MCAST_CLONE_FAIL;
break;
}
pkt_vrf = vrf;
} else {
continue;
}
nh_output(pkt_vrf, new_pkt, dir_nh, fmd);
}
/* Original packet needs to be unconditionally dropped */
drop:
vr_pfree(pkt, drop_reason);
return 0;
}
static int
nh_composite_mcast_l3(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
struct vr_vrf_stats *stats;
unsigned short drop_reason, pkt_vrf;
struct vr_nexthop *dir_nh;
struct vr_packet *new_pkt;
int i;
uint32_t mcast_clone_size = 0;
drop_reason = VP_DROP_CLONED_ORIGINAL;
stats = vr_inet_vrf_stats(vrf, pkt->vp_cpu);
if (stats)
stats->vrf_l3_mcast_composites++;
if (!fmd) {
drop_reason = VP_DROP_NO_FMD;
goto drop;
}
if (nh->nh_validate_src) {
if (nh->nh_validate_src(vrf, pkt, nh, fmd) == NH_SOURCE_INVALID) {
drop_reason = VP_DROP_INVALID_MCAST_SOURCE;
goto drop;
}
}
for (i = 0; i < nh->nh_component_cnt; i++) {
dir_nh = nh->nh_component_nh[i].cnh;
/* If direct nexthop is not valid, dont process it */
if ((!dir_nh) || !(dir_nh->nh_flags & NH_FLAG_VALID))
continue;
if (dir_nh->nh_type == NH_ENCAP) {
/* Dont give back the packet to same VM */
if (dir_nh->nh_dev == pkt->vp_if)
continue;
mcast_clone_size = sizeof(struct vr_eth);
pkt_vrf = dir_nh->nh_dev->vif_vrf;
} else if ((dir_nh->nh_type == NH_COMPOSITE) &&
(dir_nh->nh_flags & NH_FLAG_COMPOSITE_FABRIC)) {
mcast_clone_size = VR_L3_MCAST_PKT_HEAD_SPACE;
pkt_vrf = vrf;
} else {
continue;
}
if (!(new_pkt = nh_mcast_clone(pkt, mcast_clone_size))) {
drop_reason = VP_DROP_MCAST_CLONE_FAIL;
break;
}
fmd->fmd_label = nh->nh_component_nh[i].cnh_label;
nh_output(pkt_vrf, new_pkt, dir_nh, fmd);
}
/* Original packet needs to be unconditionally dropped */
drop:
vr_pfree(pkt, drop_reason);
return 0;
}
static int
nh_composite_fabric(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
int i;
struct vr_vrf_stats *stats;
struct vr_nexthop *dir_nh;
unsigned short drop_reason;
struct vr_packet *new_pkt;
unsigned int dip, sip;
int32_t label;
drop_reason = VP_DROP_CLONED_ORIGINAL;
stats = vr_inet_vrf_stats(vrf, pkt->vp_cpu);
if (stats)
stats->vrf_fabric_composites++;
if (!fmd) {
drop_reason = VP_DROP_NO_FMD;
goto drop;
}
/*
* Packet can be L2 or L3 with or without control information. It is
* always ensured before coming to this nexthop that packet headers
* along with control inforation is in first buffer. So it can be
* safely cow'd for the required length
*/
label = fmd->fmd_label;
for (i = 0; i < nh->nh_component_cnt; i++) {
dir_nh = nh->nh_component_nh[i].cnh;
/* If direct nexthop is not valid, dont process it */
if ((!dir_nh) || !(dir_nh->nh_flags & NH_FLAG_VALID))
continue;
if (dir_nh->nh_type != NH_TUNNEL)
continue;
/*
* Take the right tunnel source. The dst is also our own
* address
*/
sip = dip = 0;
if (dir_nh->nh_flags & NH_FLAG_TUNNEL_GRE) {
sip = dir_nh->nh_gre_tun_sip;
dip = dir_nh->nh_gre_tun_dip;
} else if (dir_nh->nh_flags & NH_FLAG_TUNNEL_UDP_MPLS) {
sip = dir_nh->nh_udp_tun_sip;
dip = dir_nh->nh_udp_tun_dip;
} else {
drop_reason = VP_DROP_INVALID_NH;
break;
}
/* Dont forward to same source */
if (fmd->fmd_outer_src_ip && fmd->fmd_outer_src_ip == dip)
continue;
/*
* Enough head spaces are created in the previous nexthop
* handling. Just cow the packet with zero size to get different
* buffer space
*/
new_pkt = nh_mcast_clone(pkt, 0);
if (!new_pkt) {
drop_reason = VP_DROP_MCAST_CLONE_FAIL;
break;
}
/*
* If L2 multicast packet from VM, we need to update Vxlan
* with right values
*/
if ((new_pkt->vp_type == VP_TYPE_L2) &&
(new_pkt->vp_flags & VP_FLAG_MULTICAST) &&
(vif_is_virtual(new_pkt->vp_if))) {
/*
* The L2 multicast bridge entry will have VNID as label. If fmd
* does not valid label/vnid, skip the processing
*/
if (label < 0) {
vr_pfree(new_pkt, VP_DROP_INVALID_LABEL);
break;
}
/*
* Add vxlan encapsulation. The vxlan id need to be taken
* from Bridge entry
*/
fmd->fmd_label = label;
if (nh_vxlan_tunnel_helper(dir_nh->nh_dev->vif_vrf,
new_pkt, fmd, sip, sip) == false) {
vr_pfree(new_pkt, VP_DROP_PUSH);
break;
}
if (vr_l2_mcast_control_data_add(new_pkt) == false) {
vr_pfree(new_pkt, VP_DROP_PUSH);
break;
}
}
/* MPLS label for outer header encapsulation */
fmd->fmd_label = nh->nh_component_nh[i].cnh_label;
if (!dir_nh->nh_dev) {
struct vr_nexthop *sub_nh_db, *parent_nh_db;
sub_nh_db = __vrouter_get_nexthop(vrouter_get(dir_nh->nh_rid),
dir_nh->nh_id);
parent_nh_db = __vrouter_get_nexthop(vrouter_get(nh->nh_rid),
nh->nh_id);
vr_printf("Sub_nh ptr %p id %d type %d flags 0x%x ref_cnt %d"
" NHdb_ptr %p nh_validate_src %p nh_reach_nh %p "
" nh_data_size %d Parent_nh ptr %p id %d type %d flags 0x%x"
" ref_cnt %d NHdb_ptr %p component_cnt %d component id %d \n",
dir_nh, dir_nh->nh_id, dir_nh->nh_type, dir_nh->nh_flags,
dir_nh->nh_users, sub_nh_db, dir_nh->nh_validate_src,
dir_nh->nh_reach_nh, dir_nh->nh_data_size, nh, nh->nh_id,
nh->nh_type, nh->nh_flags, nh->nh_users, parent_nh_db,
nh->nh_component_cnt, i);
}
nh_output(dir_nh->nh_dev->vif_vrf, new_pkt, dir_nh, fmd);
}
/* Original packet needs to be unconditionally dropped */
drop:
vr_pfree(pkt, drop_reason);
return 0;
}
static int
nh_composite_multi_proto(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
uint32_t *ctrl_data;
unsigned short drop_reason;
struct vr_vrf_stats *stats;
unsigned short pkt_type_flag;
int i;
struct vr_nexthop *dir_nh;
stats = vr_inet_vrf_stats(vrf, pkt->vp_cpu);
if (stats)
stats->vrf_multi_proto_composites++;
if (!fmd) {
drop_reason = VP_DROP_NO_FMD;
goto drop;
}
/* Mark the packet as Multicast */
pkt->vp_flags |= VP_FLAG_MULTICAST;
/* Identify whether L2 or L3 packet */
pkt_type_flag = NH_FLAG_COMPOSITE_L2;
ctrl_data = (uint32_t *)pkt_data(pkt);
if (*ctrl_data != VR_L2_MCAST_CTRL_DATA) {
pkt_type_flag = NH_FLAG_COMPOSITE_L3;
pkt->vp_type = VP_TYPE_IP;
} else {
/* Mark the packet as L2. Let the control information flow till
* the L2 mcast nexthop */
pkt->vp_type = VP_TYPE_L2;
}
/*
* Look for the same nexthop flags and forward to the first nexthop
*/
for(i = 0; i < nh->nh_component_cnt; i++) {
/* If direct nexthop is not valid, dont process it */
dir_nh = nh->nh_component_nh[i].cnh;
if ((!dir_nh) || (!(dir_nh->nh_flags & NH_FLAG_VALID)))
continue;
if (dir_nh->nh_flags & pkt_type_flag) {
nh_output(vrf, pkt, dir_nh, fmd);
return 0;
}
}
drop_reason = VP_DROP_INVALID_NH;
drop:
vr_pfree(pkt, drop_reason);
return 0;
}
static int
nh_discard(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
struct vr_vrf_stats *stats;
stats = vr_inet_vrf_stats(vrf, pkt->vp_cpu);
if (stats)
stats->vrf_discards++;
vr_pfree(pkt, VP_DROP_DISCARD);
return 0;
}
static int
nh_udp_tunnel(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
struct vr_packet *tmp;
struct vr_ip *ip;
struct vr_udp *udp;
struct vr_vrf_stats *stats;
if (!fmd)
goto send_fail;
if (pkt_head_space(pkt) < VR_UDP_HEAD_SPACE) {
tmp = vr_palloc_head(pkt, VR_UDP_HEAD_SPACE);
if (!tmp)
goto send_fail;
pkt = tmp;
if (!pkt_reserve_head_space(pkt, VR_UDP_HEAD_SPACE))
goto send_fail;
}
if (nh_udp_tunnel_helper(pkt, nh->nh_udp_tun_sport,
nh->nh_udp_tun_dport, nh->nh_udp_tun_sip,
nh->nh_udp_tun_dip) == false) {
goto send_fail;
}
if (pkt_len(pkt) > ((1 << sizeof(ip->ip_len) * 8)))
goto send_fail;
/*
* Incase of mirroring set the inner network header to the newly added
* header so that this is fragmented and checksummed
*/
pkt_set_inner_network_header(pkt, pkt->vp_data);
/*
* Calculate the partial checksum for udp header
*/
ip = (struct vr_ip *)(pkt_data(pkt));
udp = (struct vr_udp *)((char *)ip + ip->ip_hl * 4);
udp->udp_csum = vr_ip_partial_csum(ip);
stats = vr_inet_vrf_stats(vrf, pkt->vp_cpu);
if (stats)
stats->vrf_udp_tunnels++;
vr_forward(vrouter_get(nh->nh_rid),
(vrf == (unsigned short)-1) ? fmd->fmd_dvrf : vrf,
pkt, fmd);
return 0;
send_fail:
vr_pfree(pkt, VP_DROP_PUSH);
return 0;
}
/*
* nh_vxlan_tunnel - tunnel packet with VXLAN header
*/
static int
nh_vxlan_tunnel(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
struct vr_interface *vif;
struct vr_vrf_stats *stats;
unsigned short reason = VP_DROP_PUSH;
struct vr_packet *tmp_pkt;
struct vr_df_trap_arg trap_arg;
stats = vr_inet_vrf_stats(vrf, pkt->vp_cpu);
if (stats)
stats->vrf_udp_mpls_tunnels++;
if (!fmd || fmd->fmd_label < 0)
return vr_forward(nh->nh_router, vrf, pkt, fmd);
if (vr_perfs)
pkt->vp_flags |= VP_FLAG_GSO;
if (pkt->vp_type == VP_TYPE_IP) {
if (vr_has_to_fragment(nh->nh_dev, pkt, VR_VXLAN_HDR_LEN) &&
vr_ip_dont_fragment_set(pkt)) {
trap_arg.df_mtu = vif_get_mtu(nh->nh_dev) - VR_VXLAN_HDR_LEN;
trap_arg.df_flow_index = fmd->fmd_flow_index;
return vr_trap(pkt, vrf, AGENT_TRAP_HANDLE_DF, (void *)&trap_arg);
}
}
if (nh_vxlan_tunnel_helper(vrf, pkt, fmd, nh->nh_udp_tun_sip,
nh->nh_udp_tun_dip) == false) {
goto send_fail;
}
if (pkt_head_space(pkt) < nh->nh_udp_tun_encap_len) {
tmp_pkt = vr_pexpand_head(pkt, nh->nh_udp_tun_encap_len - pkt_head_space(pkt));
if (!tmp_pkt) {
goto send_fail;
}
pkt = tmp_pkt;
}
/* slap l2 header */
vif = nh->nh_dev;
if (!vif->vif_set_rewrite(vif, pkt, nh->nh_data,
nh->nh_udp_tun_encap_len)) {
goto send_fail;
}
vif->vif_tx(vif, pkt);
return 0;
send_fail:
vr_pfree(pkt, reason);
return 0;
}
static int
nh_mpls_udp_tunnel_validate_src(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
if (fmd->fmd_outer_src_ip == nh->nh_udp_tun_dip)
return NH_SOURCE_VALID;
return NH_SOURCE_INVALID;
}
/*
* nh_mpls_udp_tunnel - tunnel packet with MPLS label in UDP.
*/
static int
nh_mpls_udp_tunnel(unsigned short vrf, struct vr_packet *pkt,
struct vr_nexthop *nh, struct vr_forwarding_md *fmd)
{
unsigned char *tun_encap;
struct vr_interface *vif;
struct vr_vrf_stats *stats;
unsigned int tun_sip, tun_dip, head_space;
__u16 tun_encap_len, udp_src_port = VR_MPLS_OVER_UDP_SRC_PORT;
unsigned short reason = VP_DROP_PUSH;
struct vr_packet *tmp_pkt;
struct vr_df_trap_arg trap_arg;
/*
* If we are testing MPLS over UDP using the vr_mudp sysctl, use the
* values from the GRE tunnel nexthop below. Otherwise, use the values
* from the UDP tunnel nexthop.
*/
if (vr_mudp) {
tun_sip = nh->nh_gre_tun_sip;
tun_dip = nh->nh_gre_tun_dip;
tun_encap_len = nh->nh_gre_tun_encap_len;
} else {
tun_sip = nh->nh_udp_tun_sip;
tun_dip = nh->nh_udp_tun_dip;
tun_encap_len = nh->nh_udp_tun_encap_len;
}
stats = vr_inet_vrf_stats(vrf, pkt->vp_cpu);
if (stats)
stats->vrf_udp_mpls_tunnels++;
if (!fmd || fmd->fmd_label < 0)
return vr_forward(nh->nh_router, vrf, pkt, fmd);
/*
* The UDP source port is a hash of the inner IP src/dst address and vrf
*/
if (vr_get_udp_src_port) {
udp_src_port = vr_get_udp_src_port(pkt, fmd, vrf);
if (udp_src_port == 0) {
reason = VP_DROP_PULL;
goto send_fail;
}
}
/* Calculate the head space for mpls,udp ip and eth */
head_space = VR_MPLS_HDR_LEN + sizeof(struct vr_ip) + sizeof(struct vr_udp);