26 #include <rte_config.h> 28 #include <rte_hash_crc.h> 30 #include <tas_memif.h> 31 #include <utils_sync.h> 35 #include "tcp_common.h" 38 #define TCP_MAX_RTT 100000 47 } __attribute__((packed));
50 #define fs_lock(fs) util_spin_lock(&fs->lock) 51 #define fs_unlock(fs) util_spin_unlock(&fs->lock) 53 #define fs_lock(fs) do {} while (0) 54 #define fs_unlock(fs) do {} while (0) 59 uint16_t len,
void *dst);
61 uint16_t len,
const void *src);
62 #ifdef FLEXNIC_PL_OOO_RECV 64 uint16_t len,
const void *src);
68 uint32_t seq, uint32_t ack, uint32_t rxwnd, uint16_t payload,
69 uint32_t payload_pos, uint32_t ts_echo, uint32_t ts_my, uint8_t fin);
71 uint32_t ack, uint32_t rxwnd, uint32_t echo_ts, uint32_t my_ts,
75 static inline void tcp_checksums(
struct network_buf_handle *nbh,
83 for (i = 0; i < n; i++) {
84 rte_prefetch0(&fp_state->flowst[queues[i]]);
95 for (i = 0; i < n; i++) {
96 fs = &fp_state->flowst[queues[i]];
99 rte_prefetch0(p + 64);
105 struct network_buf_handle *nbh, uint32_t ts)
107 uint32_t flow_id =
queue;
109 uint32_t avail, len, tx_pos, tx_seq, ack, rx_wnd;
117 new_core = fp_state->flow_group_steering[fs->
flow_group];
118 if (new_core != ctx->id) {
123 if (rte_ring_enqueue(ctxs[new_core]->qman_fwd_ring, fs) != 0) {
124 fprintf(stderr,
"fast_flows_qman: rte_ring_enqueue failed\n");
129 if (qman_set(&ctx->qman, flow_id, 0, 0, 0,
130 QMAN_SET_RATE | QMAN_SET_MAXCHUNK | QMAN_SET_AVAIL) != 0)
132 fprintf(stderr,
"flast_flows_qman: qman_set clear failed, UNEXPECTED\n");
136 notify_fastpath_core(new_core);
143 avail = tcp_txavail(fs, NULL);
146 fprintf(stderr,
"ATX try_sendseg local=%08x:%05u remote=%08x:%05u " 147 "tx_avail=%x tx_next_pos=%x avail=%u\n",
148 f_beui32(fs->local_ip), f_beui16(fs->local_port),
149 f_beui32(fs->remote_ip), f_beui16(fs->remote_port),
152 #ifdef FLEXNIC_TRACING 162 trace_event(FLEXNIC_PL_TREV_AFLOQMAN,
sizeof(te_afloqman), &te_afloqman);
170 len = MIN(avail, TCP_MSS);
187 fin = (fs->
rx_base_sp & FLEXNIC_PL_FLOWST_TXFIN) == FLEXNIC_PL_FLOWST_TXFIN &&
197 flow_tx_segment(ctx, nbh, fs, tx_seq, ack, rx_wnd, len, tx_pos,
208 uint16_t flow_id = fs - fp_state->flowst;
214 avail = tcp_txavail(fs, NULL);
217 if (qman_set(&ctx->qman, flow_id, fs->
tx_rate, avail, TCP_MSS,
218 QMAN_SET_RATE | QMAN_SET_MAXCHUNK | QMAN_SET_AVAIL) != 0)
220 fprintf(stderr,
"fast_flows_qman_fwd: qman_set failed, UNEXPECTED\n");
229 struct network_buf_handle **nbhs,
void **fss,
struct tcp_opts *tos,
235 for (i = 0; i < n; i++) {
239 p = network_buf_bufoff(nbhs[i]);
240 len = network_buf_len(nbhs[i]);
244 (f_beui16(p->eth.type) != ETH_TYPE_IP) |
245 (p->ip.proto != IP_PROTO_TCP) |
246 (IPH_V(&p->ip) != 4) |
247 (IPH_HL(&p->ip) != 5) |
248 (TCPH_HDRLEN(&p->tcp) < 5) |
249 (len < f_beui16(p->ip.len) +
sizeof(p->eth)) |
250 (tcp_parse_options(p, len, &tos[i]) != 0) |
259 void **fss, uint16_t n)
266 for (i = 0; i < n; i++) {
271 rx_base = fs->
rx_base_sp & FLEXNIC_PL_FLOWST_RX_MASK;
279 struct network_buf_handle *nbh,
void *fsp,
struct tcp_opts *opts,
282 struct pkt_tcp *p = network_buf_bufoff(nbh);
284 uint32_t payload_bytes, payload_off, seq, ack, old_avail, new_avail,
287 uint32_t rx_bump = 0, tx_bump = 0, rx_pos, rtt;
288 int no_permanent_sp = 0;
289 uint16_t tcp_extra_hlen, trim_start, trim_end;
290 uint16_t flow_id = fs - fp_state->flowst;
291 int trigger_ack = 0, fin_bump = 0;
293 tcp_extra_hlen = (TCPH_HDRLEN(&p->tcp) - 5) * 4;
294 payload_off =
sizeof(*p) + tcp_extra_hlen;
296 f_beui16(p->ip.len) - (
sizeof(p->ip) +
sizeof(p->tcp) + tcp_extra_hlen);
297 orig_payload = payload_bytes;
300 fprintf(stderr,
"FLOW local=%08x:%05u remote=%08x:%05u RX: seq=%u ack=%u " 301 "flags=%x payload=%u\n",
302 f_beui32(p->ip.dest), f_beui16(p->tcp.dest),
303 f_beui32(p->ip.src), f_beui16(p->tcp.src), f_beui32(p->tcp.seqno),
304 f_beui32(p->tcp.ackno), TCPH_FLAGS(&p->tcp), payload_bytes);
309 #ifdef FLEXNIC_TRACING 311 .local_ip = f_beui32(p->ip.dest),
312 .remote_ip = f_beui32(p->ip.src),
313 .local_port = f_beui16(p->tcp.dest),
314 .remote_port = f_beui16(p->tcp.src),
317 .flow_seq = f_beui32(p->tcp.seqno),
318 .flow_ack = f_beui32(p->tcp.ackno),
319 .flow_flags = TCPH_FLAGS(&p->tcp),
320 .flow_len = payload_bytes,
330 trace_event(FLEXNIC_PL_TREV_RXFS,
sizeof(te_rxfs), &te_rxfs);
334 fprintf(stderr,
"FLOW local=%08x:%05u remote=%08x:%05u ST: op=%"PRIx64
335 " rx_pos=%x rx_next_seq=%u rx_avail=%x tx_pos=%x tx_next_seq=%u" 336 " tx_sent=%u sp=%u\n",
337 f_beui32(p->ip.dest), f_beui16(p->tcp.dest),
344 if (UNLIKELY((fs->
rx_base_sp & FLEXNIC_PL_FLOWST_SLOWPATH) != 0)) {
345 fprintf(stderr,
"dma_krx_pkt_fastpath: slowpath because of state\n");
350 if (UNLIKELY((TCPH_FLAGS(&p->tcp) & ~(TCP_ACK | TCP_PSH | TCP_ECE | TCP_CWR |
353 if ((TCPH_FLAGS(&p->tcp) & TCP_SYN) != 0) {
357 fprintf(stderr,
"dma_krx_pkt_fastpath: slow path because of flags (%x)\n",
358 TCPH_FLAGS(&p->tcp));
365 old_avail = tcp_txavail(fs, NULL);
367 seq = f_beui32(p->tcp.seqno);
368 ack = f_beui32(p->tcp.ackno);
373 if (payload_bytes > 0)
378 if ((TCPH_FLAGS(&p->tcp) & TCP_ACK) == TCP_ACK) {
383 if (LIKELY((TCPH_FLAGS(&p->tcp) & TCP_ACK) == TCP_ACK &&
384 tcp_valid_rxack(fs, ack, &tx_bump) == 0))
387 if ((TCPH_FLAGS(&p->tcp) & TCP_ECE) == TCP_ECE) {
391 if (LIKELY(tx_bump <= fs->tx_sent)) {
394 #ifdef ALLOW_FUTURE_ACKS 403 fprintf(stderr,
"dma_krx_pkt_fastpath: acked more bytes than sent\n");
409 if (UNLIKELY(tx_bump != 0)) {
411 }
else if (UNLIKELY(orig_payload == 0 && ++fs->
rx_dupack_cnt >= 3)) {
413 flow_reset_retransmit(fs);
418 #ifdef FLEXNIC_PL_OOO_RECV 420 if (UNLIKELY(tcp_trim_rxbuf(fs, seq, payload_bytes, &trim_start, &trim_end) != 0)) {
427 payload_bytes -= trim_start + trim_end;
428 payload_off += trim_start;
429 payload = (uint8_t *) p + payload_off;
437 if (payload_bytes == 0) {
442 if (fs->rx_ooo_len == 0) {
443 fs->rx_ooo_start = seq;
444 fs->rx_ooo_len = payload_bytes;
445 flow_rx_seq_write(fs, seq, payload_bytes, payload);
448 }
else if (seq + payload_bytes == fs->rx_ooo_start) {
450 fs->rx_ooo_start = seq;
451 fs->rx_ooo_len += payload_bytes;
452 flow_rx_seq_write(fs, seq, payload_bytes, payload);
455 }
else if (fs->rx_ooo_start + fs->rx_ooo_len == seq) {
457 fs->rx_ooo_len += payload_bytes;
458 flow_rx_seq_write(fs, seq, payload_bytes, payload);
471 if (tcp_valid_rxseq(fs, seq, payload_bytes, &trim_start, &trim_end) != 0) {
474 fprintf(stderr,
"dma_krx_pkt_fastpath: packet with bad seq " 475 "(got %u, expect %u, avail %u, payload %u)\n", seq, fs->
rx_next_seq,
482 payload_bytes -= trim_start + trim_end;
483 payload_off += trim_start;
484 payload = (uint8_t *) p + payload_off;
489 if (LIKELY((TCPH_FLAGS(&p->tcp) & TCP_ACK) == TCP_ACK &&
490 f_beui32(opts->
ts->ts_ecr) != 0))
492 rtt = ts - f_beui32(opts->
ts->ts_ecr);
493 if (rtt < TCP_MAX_RTT) {
494 if (LIKELY(fs->
rtt_est != 0)) {
505 if ((fs->
rx_base_sp & FLEXNIC_PL_FLOWST_RXFIN) == FLEXNIC_PL_FLOWST_RXFIN &&
508 fprintf(stderr,
"fast_flows_packet: data after FIN dropped\n");
513 if (payload_bytes > 0) {
514 flow_rx_write(fs, fs->
rx_next_pos, payload_bytes, payload);
516 rx_bump = payload_bytes;
528 #ifdef FLEXNIC_PL_OOO_RECV 531 if (UNLIKELY(fs->rx_ooo_len != 0)) {
532 if (tcp_trim_rxbuf(fs, fs->rx_ooo_start, fs->rx_ooo_len, &trim_start,
541 fs->rx_ooo_start += trim_start;
542 fs->rx_ooo_len -= trim_start + trim_end;
546 if (fs->rx_ooo_len > 0 && fs->rx_ooo_start == fs->
rx_next_seq) {
551 rx_bump += fs->rx_ooo_len;
567 if ((TCPH_FLAGS(&p->tcp) & TCP_FIN) == TCP_FIN &&
570 if (fs->
rx_next_seq == f_beui32(p->tcp.seqno) + orig_payload && !fs->rx_ooo_len) {
577 fprintf(stderr,
"fast_flows_packet: ignored fin because out of order\n");
584 if (LIKELY(rx_bump != 0 || tx_bump != 0 || fin_bump)) {
586 fprintf(stderr,
"dma_krx_pkt_fastpath: updating application state\n");
590 type = FLEXTCP_PL_ARX_CONNUPDATE;
593 type |= FLEXTCP_PL_ARX_FLRXDONE << 8;
596 #ifdef FLEXNIC_TRACING 607 .local_ip = f_beui32(p->ip.dest),
608 .remote_ip = f_beui32(p->ip.src),
609 .local_port = f_beui16(p->tcp.dest),
610 .remote_port = f_beui16(p->tcp.src),
612 trace_event(FLEXNIC_PL_TREV_ARX,
sizeof(te_arx), &te_arx);
615 arx_cache_add(ctx, fs->
db_id, fs->
opaque, rx_bump, rx_pos, tx_bump, type);
619 new_avail = tcp_txavail(fs, NULL);
620 if (new_avail > old_avail) {
622 if (qman_set(&ctx->qman, flow_id, fs->
tx_rate, new_avail -
623 old_avail, TCP_MSS, QMAN_SET_RATE | QMAN_SET_MAXCHUNK
624 | QMAN_ADD_AVAIL) != 0)
626 fprintf(stderr,
"fast_flows_packet: qman_set 1 failed, UNEXPECTED\n");
641 if (!no_permanent_sp) {
652 uint16_t bump_seq, uint32_t rx_bump, uint32_t tx_bump, uint8_t flags,
653 struct network_buf_handle *nbh, uint32_t ts)
656 uint32_t rx_avail_prev, old_avail, new_avail,
tx_avail;
660 #ifdef FLEXNIC_TRACING 664 .bump_seq_ent = bump_seq,
668 .local_ip = f_beui32(fs->local_ip),
669 .remote_ip = f_beui32(fs->remote_ip),
670 .local_port = f_beui16(fs->local_port),
671 .remote_port = f_beui16(fs->remote_port),
686 trace_event(FLEXNIC_PL_TREV_ATX,
sizeof(te_atx), &te_atx);
692 bump_seq - fs->
bump_seq > (UINT16_MAX / 2)) ||
693 (bump_seq < fs->bump_seq &&
694 (fs->
bump_seq < ((UINT16_MAX / 4) * 3) ||
695 bump_seq > (UINT16_MAX / 4))))
701 if ((fs->
rx_base_sp & FLEXNIC_PL_FLOWST_TXFIN) == FLEXNIC_PL_FLOWST_TXFIN &&
705 fprintf(stderr,
"fast_flows_bump: tx bump while TX is already closed\n");
707 }
else if ((flags & FLEXTCP_PL_ATX_FLTXDONE) == FLEXTCP_PL_ATX_FLTXDONE &&
708 !(fs->
rx_base_sp & FLEXNIC_PL_FLOWST_TXFIN) &&
712 fprintf(stderr,
"fast_flows_bump: tx eos without dummy byte\n");
722 fprintf(stderr,
"fast_flows_bump: tx bump too large\n");
727 fprintf(stderr,
"fast_flows_bump: rx bump too large\n");
731 old_avail = tcp_txavail(fs, NULL);
732 new_avail = tcp_txavail(fs, &tx_avail);
735 if ((flags & FLEXTCP_PL_ATX_FLTXDONE) == FLEXTCP_PL_ATX_FLTXDONE &&
742 if (old_avail < new_avail) {
743 if (qman_set(&ctx->qman, flow_id, fs->
tx_rate, new_avail -
744 old_avail, TCP_MSS, QMAN_SET_RATE | QMAN_SET_MAXCHUNK
745 | QMAN_ADD_AVAIL) != 0)
747 fprintf(stderr,
"flast_flows_bump: qman_set 1 failed, UNEXPECTED\n");
759 if (new_avail == 0 && rx_avail_prev == 0 && fs->
rx_avail != 0) {
774 uint32_t old_avail, new_avail = -1;
778 #ifdef FLEXNIC_TRACING 787 trace_event(FLEXNIC_PL_TREV_REXMIT,
sizeof(te_rexmit), &te_rexmit);
795 old_avail = tcp_txavail(fs, NULL);
809 flow_reset_retransmit(fs);
810 new_avail = tcp_txavail(fs, NULL);
819 if (new_avail > old_avail) {
820 if (qman_set(&ctx->qman, flow_id, fs->
tx_rate, new_avail - old_avail,
821 TCP_MSS, QMAN_SET_RATE | QMAN_SET_MAXCHUNK | QMAN_ADD_AVAIL) != 0)
823 fprintf(stderr,
"flast_flows_bump: qman_set 1 failed, UNEXPECTED\n");
835 uint16_t len,
void *dst)
839 if (LIKELY(pos + len <= fs->tx_len)) {
840 dma_read(fs->
tx_base + pos, len, dst);
843 dma_read(fs->
tx_base + pos, part, dst);
844 dma_read(fs->
tx_base, len - part, (uint8_t *) dst + part);
850 uint16_t len,
const void *src)
853 uint64_t rx_base = fs->
rx_base_sp & FLEXNIC_PL_FLOWST_RX_MASK;
855 if (LIKELY(pos + len <= fs->rx_len)) {
856 dma_write(rx_base + pos, len, src);
859 dma_write(rx_base + pos, part, src);
860 dma_write(rx_base, len - part, (
const uint8_t *) src + part);
864 #ifdef FLEXNIC_PL_OOO_RECV 866 uint16_t len,
const void *src)
872 assert(pos < fs->rx_len);
873 flow_rx_write(fs, pos, len, src);
879 uint32_t seq, uint32_t ack, uint32_t rxwnd, uint16_t payload,
880 uint32_t payload_pos, uint32_t ts_echo, uint32_t ts_my, uint8_t fin)
882 uint16_t hdrs_len, optlen, fin_fl;
883 struct pkt_tcp *p = network_buf_buf(nbh);
887 optlen = (
sizeof(*opt_ts) + 3) & ~3;
888 hdrs_len =
sizeof(*p) + optlen;
892 memcpy(&p->eth.src, &
eth_addr, ETH_ADDR_LEN);
893 p->eth.type = t_beui16(ETH_TYPE_IP);
895 IPH_VHL_SET(&p->ip, 4, 5);
897 p->ip.len = t_beui16(hdrs_len - offsetof(
struct pkt_tcp, ip) + payload);
898 p->ip.id = t_beui16(3);
899 p->ip.offset = t_beui16(0);
901 p->ip.proto = IP_PROTO_TCP;
903 p->ip.src = fs->local_ip;
904 p->ip.dest = fs->remote_ip;
907 if ((fs->
rx_base_sp & FLEXNIC_PL_FLOWST_ECN) == FLEXNIC_PL_FLOWST_ECN) {
908 IPH_ECN_SET(&p->ip, IP_ECN_ECT0);
911 fin_fl = (fin ? TCP_FIN : 0);
913 p->tcp.src = fs->local_port;
914 p->tcp.dest = fs->remote_port;
915 p->tcp.seqno = t_beui32(seq);
916 p->tcp.ackno = t_beui32(ack);
917 TCPH_HDRLEN_FLAGS_SET(&p->tcp, 5 + optlen / 4, TCP_PSH | TCP_ACK | fin_fl);
918 p->tcp.wnd = t_beui16(MIN(0xFFFF, rxwnd));
920 p->tcp.urgp = t_beui16(0);
923 memset(p + 1, 0, optlen);
925 opt_ts->kind = TCP_OPT_TIMESTAMP;
926 opt_ts->length =
sizeof(*opt_ts);
927 opt_ts->ts_val = t_beui32(ts_my);
928 opt_ts->ts_ecr = t_beui32(ts_echo);
932 flow_tx_read(fs, payload_pos, payload, (uint8_t *) p + hdrs_len);
936 tcp_checksums(nbh, p, fs->local_ip, fs->remote_ip, hdrs_len - offsetof(
struct 939 #ifdef FLEXNIC_TRACING 941 .local_ip = f_beui32(p->ip.src),
942 .remote_ip = f_beui32(p->ip.dest),
943 .local_port = f_beui16(p->tcp.src),
944 .remote_port = f_beui16(p->tcp.dest),
948 .flow_flags = TCPH_FLAGS(&p->tcp),
951 trace_event(FLEXNIC_PL_TREV_TXSEG,
sizeof(te_txseg), &te_txseg);
954 tx_send(ctx, nbh, 0, hdrs_len + payload);
958 uint32_t ack, uint32_t rxwnd, uint32_t echots, uint32_t myts,
966 uint16_t ecn_flags = 0;
968 p = network_buf_bufoff(nbh);
971 fprintf(stderr,
"FLOW local=%08x:%05u remote=%08x:%05u ACK: seq=%u ack=%u\n",
972 f_beui32(p->ip.dest), f_beui16(p->tcp.dest),
973 f_beui32(p->ip.src), f_beui16(p->tcp.src), seq, ack);
978 p->eth.src = p->eth.dest;
981 p->ip.src = p->ip.dest;
984 p->tcp.src = p->tcp.dest;
987 hdrlen =
sizeof(*p) + (TCPH_HDRLEN(&p->tcp) - 5) * 4;
990 if (IPH_ECN(&p->ip) == IP_ECN_CE) {
995 IPH_ECN_SET(&p->ip, IP_ECN_NONE);
998 p->tcp.seqno = t_beui32(seq);
999 p->tcp.ackno = t_beui32(ack);
1000 TCPH_HDRLEN_FLAGS_SET(&p->tcp, TCPH_HDRLEN(&p->tcp), TCP_ACK | ecn_flags);
1001 p->tcp.wnd = t_beui16(MIN(0xFFFF, rxwnd));
1002 p->tcp.urgp = t_beui16(0);
1005 ts_opt->ts_val = t_beui32(myts);
1006 ts_opt->ts_ecr = t_beui32(echots);
1008 p->ip.len = t_beui16(hdrlen - offsetof(
struct pkt_tcp, ip));
1012 tcp_checksums(nbh, p, p->ip.src, p->ip.dest, hdrlen - offsetof(
struct 1015 #ifdef FLEXNIC_TRACING 1017 .local_ip = f_beui32(p->ip.src),
1018 .remote_ip = f_beui32(p->ip.dest),
1019 .local_port = f_beui16(p->tcp.src),
1020 .remote_port = f_beui16(p->tcp.dest),
1024 .flow_flags = TCPH_FLAGS(&p->tcp),
1026 trace_event(FLEXNIC_PL_TREV_TXACK,
sizeof(te_txack), &te_txack);
1029 tx_send(ctx, nbh, network_buf_off(nbh), hdrlen);
1058 static inline void tcp_checksums(
struct network_buf_handle *nbh,
1063 p->tcp.chksum = tx_xsum_enable(nbh, &p->ip, ip_s, ip_d, l3_paylen);
1066 p->ip.chksum = rte_ipv4_cksum((
void *) &p->ip);
1067 p->tcp.chksum = rte_ipv4_udptcp_cksum((
void *) &p->ip, (
void *) &p->tcp);
1071 void fast_flows_kernelxsums(
struct network_buf_handle *nbh,
1074 tcp_checksums(nbh, p, p->ip.src, p->ip.dest,
1075 f_beui16(p->ip.len) -
sizeof(p->ip));
1078 static inline uint32_t flow_hash(
struct flow_key *k)
1080 return crc32c_sse42_u32(k->local_port.x | (((uint32_t) k->remote_port.x) << 16),
1081 crc32c_sse42_u64(k->local_ip.x | (((uint64_t) k->remote_ip.x) << 32), 0));
1085 struct network_buf_handle **nbhs,
void **fss, uint16_t n)
1088 uint32_t h, k, j, eh, fid, ffid;
1096 for (i = 0; i < n; i++) {
1097 p = network_buf_bufoff(nbhs[i]);
1099 key.local_ip = p->ip.dest;
1100 key.remote_ip = p->ip.src;
1101 key.local_port = p->tcp.dest;
1102 key.remote_port = p->tcp.src;
1103 h = flow_hash(&key);
1105 rte_prefetch0(&fp_state->flowht[h % FLEXNIC_PL_FLOWHT_ENTRIES]);
1106 rte_prefetch0(&fp_state->flowht[(h + 3) % FLEXNIC_PL_FLOWHT_ENTRIES]);
1112 for (i = 0; i < n; i++) {
1114 for (j = 0; j < FLEXNIC_PL_FLOWHT_NBSZ; j++) {
1115 k = (h + j) % FLEXNIC_PL_FLOWHT_ENTRIES;
1116 e = &fp_state->flowht[k];
1122 fid = ffid & ((1 << FLEXNIC_PL_FLOWHTE_POSSHIFT) - 1);
1123 if ((ffid & FLEXNIC_PL_FLOWHTE_VALID) == 0 || eh != h) {
1127 rte_prefetch0(&fp_state->flowst[fid]);
1132 for (i = 0; i < n; i++) {
1133 p = network_buf_bufoff(nbhs[i]);
1137 for (j = 0; j < FLEXNIC_PL_FLOWHT_NBSZ; j++) {
1138 k = (h + j) % FLEXNIC_PL_FLOWHT_ENTRIES;
1139 e = &fp_state->flowht[k];
1145 fid = ffid & ((1 << FLEXNIC_PL_FLOWHTE_POSSHIFT) - 1);
1146 if ((ffid & FLEXNIC_PL_FLOWHTE_VALID) == 0 || eh != h) {
1151 fs = &fp_state->flowst[fid];
1152 if ((fs->local_ip.x == p->ip.dest.x) &
1153 (fs->remote_ip.x == p->ip.src.x) &
1154 (fs->local_port.x == p->tcp.dest.x) &
1155 (fs->remote_port.x == p->tcp.src.x))
1157 rte_prefetch0((uint8_t *) fs + 64);
1158 fss[i] = &fp_state->flowst[fid];
struct tcp_timestamp_opt * ts
struct eth_addr remote_mac
uint32_t cnt_rx_ack_bytes
uint32_t cnt_rx_ecn_bytes