TAS
TCP Acceleration as an OS Service
nicif.c
1 /*
2  * Copyright 2019 University of Washington, Max Planck Institute for
3  * Software Systems, and The University of Texas at Austin
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be
14  * included in all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 
30 #include <tas.h>
31 #include <tas_memif.h>
32 #include <packet_defs.h>
33 #include <utils.h>
34 #include <utils_timeout.h>
35 #include <utils_sync.h>
36 #include "internal.h"
37 
38 #include <rte_config.h>
39 #include <rte_hash_crc.h>
40 
41 #define PKTBUF_SIZE 1536
42 
43 struct nic_buffer {
44  uint64_t addr;
45  void *buf;
46 };
47 
48 struct flow_id_item {
49  uint32_t flow_id;
50  struct flow_id_item *next;
51 };
52 
53 static int adminq_init(void);
54 static int adminq_init_core(uint16_t core);
55 static inline int rxq_poll(void);
56 static inline void process_packet(const void *buf, uint16_t len,
57  uint32_t fn_core, uint16_t flow_group);
58 static inline volatile struct flextcp_pl_ktx *ktx_try_alloc(uint32_t core,
59  struct nic_buffer **buf, uint32_t *new_tail);
60 static inline uint32_t flow_hash(ip_addr_t lip, beui16_t lp,
61  ip_addr_t rip, beui16_t rp);
62 static inline int flow_slot_alloc(uint32_t h, uint32_t *i, uint32_t *d);
63 static inline int flow_slot_clear(uint32_t f_id, ip_addr_t lip, beui16_t lp,
64  ip_addr_t rip, beui16_t rp);
65 static void flow_id_alloc_init(void);
66 static int flow_id_alloc(uint32_t *fid);
67 static void flow_id_free(uint32_t flow_id);
68 
69 struct flow_id_item flow_id_items[FLEXNIC_PL_FLOWST_NUM];
70 struct flow_id_item *flow_id_freelist;
71 
72 static uint32_t fn_cores;
73 
74 static struct nic_buffer **rxq_bufs;
75 static volatile struct flextcp_pl_krx **rxq_base;
76 static uint32_t rxq_len;
77 static uint32_t *rxq_tail;
78 static uint32_t rxq_next;
79 
80 static struct nic_buffer **txq_bufs;
81 static volatile struct flextcp_pl_ktx **txq_base;
82 static uint32_t txq_len;
83 static uint32_t *txq_tail;
84 
85 int nicif_init(void)
86 {
87  rte_hash_crc_init_alg();
88 
89  /* wait for fastpath to be ready */
90  while (!(tas_info->flags & FLEXNIC_FLAG_READY));
91 
92  fn_cores = tas_info->cores_num;
93 
94  /* prepare packet memory manager */
95  if (packetmem_init()) {
96  fprintf(stderr, "nicif_init: pktmem_init failed\n");
97  return -1;
98  }
99 
100  /* prepare flow_id allocator */
101  flow_id_alloc_init();
102 
103  if (adminq_init()) {
104  fprintf(stderr, "nicif_init: initializing admin queue failed\n");
105  return -1;
106  }
107 
108  return 0;
109 }
110 
111 unsigned nicif_poll(void)
112 {
113  unsigned i, ret = 0/*, nonsuc = 0*/;
114  int x;
115 
116  for (i = 0; i < 512; i++) {
117  x = rxq_poll();
118  /*if (x == -1 && ++nonsuc > 2 * fn_cores)
119  break;
120  else if (x != -1)
121  nonsuc = 0;*/
122 
123  ret += (x == -1 ? 0 : 1);
124  }
125 
126  return ret;
127 }
128 
130 int nicif_appctx_add(uint16_t appid, uint32_t db, uint64_t *rxq_base,
131  uint32_t rxq_len, uint64_t *txq_base, uint32_t txq_len, int evfd)
132 {
133  struct flextcp_pl_appctx *actx;
134  struct flextcp_pl_appst *ast = &fp_state->appst[appid];
135  uint16_t i;
136 
137  if (appid >= FLEXNIC_PL_APPST_NUM) {
138  fprintf(stderr, "nicif_appctx_add: app id too high (%u, max=%u)\n", appid,
139  FLEXNIC_PL_APPST_NUM);
140  return -1;
141  }
142 
143  if (ast->ctx_num + 1 >= FLEXNIC_PL_APPST_CTX_NUM) {
144  fprintf(stderr, "nicif_appctx_add: too many contexts in app\n");
145  return -1;
146 
147  }
148 
149  for (i = 0; i < tas_info->cores_num; i++) {
150  actx = &fp_state->appctx[i][db];
151  actx->appst_id = appid;
152  actx->rx_base = rxq_base[i];
153  actx->tx_base = txq_base[i];
154  actx->rx_avail = rxq_len;
155  actx->evfd = evfd;
156  }
157 
158  MEM_BARRIER();
159 
160  for (i = 0; i < tas_info->cores_num; i++) {
161  actx = &fp_state->appctx[i][db];
162  actx->tx_len = txq_len;
163  actx->rx_len = rxq_len;
164  }
165 
166  MEM_BARRIER();
167  ast->ctx_ids[ast->ctx_num] = db;
168  MEM_BARRIER();
169  ast->ctx_num++;
170 
171  return 0;
172 }
173 
175 int nicif_connection_add(uint32_t db, uint64_t mac_remote, uint32_t ip_local,
176  uint16_t port_local, uint32_t ip_remote, uint16_t port_remote,
177  uint64_t rx_base, uint32_t rx_len, uint64_t tx_base, uint32_t tx_len,
178  uint32_t remote_seq, uint32_t local_seq, uint64_t app_opaque,
179  uint32_t flags, uint32_t rate, uint32_t fn_core, uint16_t flow_group,
180  uint32_t *pf_id)
181 {
182  struct flextcp_pl_flowst *fs;
183  beui32_t lip = t_beui32(ip_local), rip = t_beui32(ip_remote);
184  beui16_t lp = t_beui16(port_local), rp = t_beui16(port_remote);
185  uint32_t i, d, f_id, hash;
186  struct flextcp_pl_flowhte *hte = fp_state->flowht;
187 
188  /* allocate flow id */
189  if (flow_id_alloc(&f_id) != 0) {
190  fprintf(stderr, "nicif_connection_add: allocating flow state\n");
191  return -1;
192  }
193 
194  /* calculate hash and find empty slot */
195  hash = flow_hash(lip, lp, rip, rp);
196  if (flow_slot_alloc(hash, &i, &d) != 0) {
197  flow_id_free(f_id);
198  fprintf(stderr, "nicif_connection_add: allocating slot failed\n");
199  return -1;
200  }
201  assert(i < FLEXNIC_PL_FLOWHT_ENTRIES);
202  assert(d < FLEXNIC_PL_FLOWHT_NBSZ);
203 
204  if ((flags & NICIF_CONN_ECN) == NICIF_CONN_ECN) {
205  rx_base |= FLEXNIC_PL_FLOWST_ECN;
206  }
207 
208  fs = &fp_state->flowst[f_id];
209  fs->opaque = app_opaque;
210  fs->rx_base_sp = rx_base;
211  fs->tx_base = tx_base;
212  fs->rx_len = rx_len;
213  fs->tx_len = tx_len;
214  memcpy(&fs->remote_mac, &mac_remote, ETH_ADDR_LEN);
215  fs->db_id = db;
216 
217  fs->local_ip = lip;
218  fs->remote_ip = rip;
219  fs->local_port = lp;
220  fs->remote_port = rp;
221 
222  fs->flow_group = flow_group;
223  fs->lock = 0;
224  fs->bump_seq = 0;
225 
226  fs->rx_avail = rx_len;
227  fs->rx_next_pos = 0;
228  fs->rx_next_seq = remote_seq;
229  fs->rx_remote_avail = rx_len; /* XXX */
230 
231  fs->tx_sent = 0;
232  fs->tx_next_pos = 0;
233  fs->tx_next_seq = local_seq;
234  fs->tx_avail = 0;
235  fs->tx_next_ts = 0;
236  fs->tx_rate = rate;
237  fs->rtt_est = 0;
238 
239  /* write to empty entry first */
240  MEM_BARRIER();
241  hte[i].flow_hash = hash;
242  MEM_BARRIER();
243  hte[i].flow_id = FLEXNIC_PL_FLOWHTE_VALID |
244  (d << FLEXNIC_PL_FLOWHTE_POSSHIFT) | f_id;
245 
246  *pf_id = f_id;
247  return 0;
248 }
249 
250 int nicif_connection_disable(uint32_t f_id, uint32_t *tx_seq, uint32_t *rx_seq,
251  int *tx_closed, int *rx_closed)
252 {
253  struct flextcp_pl_flowst *fs = &fp_state->flowst[f_id];
254 
255  util_spin_lock(&fs->lock);
256 
257  *tx_seq = fs->tx_next_seq;
258  *rx_seq = fs->rx_next_seq;
259  fs->rx_base_sp |= FLEXNIC_PL_FLOWST_SLOWPATH;
260 
261  *rx_closed = !!(fs->rx_base_sp & FLEXNIC_PL_FLOWST_RXFIN);
262  *tx_closed = !!(fs->rx_base_sp & FLEXNIC_PL_FLOWST_TXFIN) &&
263  fs->tx_sent == 0;
264 
265  util_spin_unlock(&fs->lock);
266 
267  flow_slot_clear(f_id, fs->local_ip, fs->local_port, fs->remote_ip,
268  fs->remote_port);
269  return 0;
270 }
271 
272 void nicif_connection_free(uint32_t f_id)
273 {
274  flow_id_free(f_id);
275 }
276 
278 int nicif_connection_move(uint32_t dst_db, uint32_t f_id)
279 {
280  fp_state->flowst[f_id].db_id = dst_db;
281  return 0;
282 }
283 
285 int nicif_connection_stats(uint32_t f_id,
286  struct nicif_connection_stats *p_stats)
287 {
288  struct flextcp_pl_flowst *fs;
289 
290  if (f_id >= FLEXNIC_PL_FLOWST_NUM) {
291  fprintf(stderr, "nicif_connection_stats: bad flow id\n");
292  return -1;
293  }
294 
295  fs = &fp_state->flowst[f_id];
296  p_stats->c_drops = fs->cnt_tx_drops;
297  p_stats->c_acks = fs->cnt_rx_acks;
298  p_stats->c_ackb = fs->cnt_rx_ack_bytes;
299  p_stats->c_ecnb = fs->cnt_rx_ecn_bytes;
300  p_stats->txp = fs->tx_sent != 0;
301  p_stats->rtt = fs->rtt_est;
302 
303  return 0;
304 }
305 
314 int nicif_connection_setrate(uint32_t f_id, uint32_t rate)
315 {
316  struct flextcp_pl_flowst *fs;
317 
318  if (f_id >= FLEXNIC_PL_FLOWST_NUM) {
319  fprintf(stderr, "nicif_connection_stats: bad flow id\n");
320  return -1;
321  }
322 
323  fs = &fp_state->flowst[f_id];
324  fs->tx_rate = rate;
325 
326  return 0;
327 }
328 
330 int nicif_connection_retransmit(uint32_t f_id, uint16_t flow_group)
331 {
332  volatile struct flextcp_pl_ktx *ktx;
333  struct nic_buffer *buf;
334  uint32_t tail;
335  uint16_t core = fp_state->flow_group_steering[flow_group];
336 
337  if ((ktx = ktx_try_alloc(core, &buf, &tail)) == NULL) {
338  return -1;
339  }
340  txq_tail[core] = tail;
341 
342  ktx->msg.connretran.flow_id = f_id;
343  MEM_BARRIER();
344  ktx->type = FLEXTCP_PL_KTX_CONNRETRAN;
345 
346  notify_fastpath_core(core);
347 
348  return 0;
349 }
350 
352 int nicif_tx_alloc(uint16_t len, void **pbuf, uint32_t *opaque)
353 {
354  volatile struct flextcp_pl_ktx *ktx;
355  struct nic_buffer *buf;
356 
357  if ((ktx = ktx_try_alloc(0, &buf, opaque)) == NULL) {
358  return -1;
359  }
360 
361  ktx->msg.packet.addr = buf->addr;
362  ktx->msg.packet.len = len;
363  *pbuf = buf->buf;
364  return 0;
365 }
366 
368 void nicif_tx_send(uint32_t opaque, int no_ts)
369 {
370  uint32_t tail = (opaque == 0 ? txq_len - 1 : opaque - 1);
371  volatile struct flextcp_pl_ktx *ktx = &txq_base[0][tail];
372 
373  MEM_BARRIER();
374  ktx->type = (!no_ts ? FLEXTCP_PL_KTX_PACKET : FLEXTCP_PL_KTX_PACKET_NOTS);
375  txq_tail[0] = opaque;
376 
377  notify_fastpath_core(0);
378 }
379 
380 static int adminq_init(void)
381 {
382  uint32_t i;
383 
384  rxq_len = config.nic_rx_len;
385  txq_len = config.nic_tx_len;
386 
387  rxq_bufs = calloc(fn_cores, sizeof(*rxq_bufs));
388  rxq_base = calloc(fn_cores, sizeof(*rxq_base));
389  rxq_tail = calloc(fn_cores, sizeof(*rxq_tail));
390  txq_bufs = calloc(fn_cores, sizeof(*txq_bufs));
391  txq_base = calloc(fn_cores, sizeof(*txq_base));
392  txq_tail = calloc(fn_cores, sizeof(*txq_tail));
393  if (rxq_bufs == NULL || rxq_base == NULL || rxq_tail == NULL ||
394  txq_bufs == NULL || txq_base == NULL || txq_tail == NULL)
395  {
396  fprintf(stderr, "adminq_init: queue state alloc failed\n");
397  return -1;
398  }
399 
400  rxq_next = 0;
401 
402  for (i = 0; i < fn_cores; i++) {
403  if (adminq_init_core(i) != 0)
404  return -1;
405  }
406 
407  return 0;
408 }
409 
410 static int adminq_init_core(uint16_t core)
411 {
412  struct packetmem_handle *pm_bufs, *pm_rx, *pm_tx;
413  uintptr_t off_bufs, off_rx, off_tx;
414  size_t i, sz_bufs, sz_rx, sz_tx;
415 
416  if ((rxq_bufs[core] = calloc(config.nic_rx_len, sizeof(**rxq_bufs)))
417  == NULL)
418  {
419  fprintf(stderr, "adminq_init: calloc rx bufs failed\n");
420  return -1;
421  }
422  if ((txq_bufs[core] = calloc(config.nic_tx_len, sizeof(**txq_bufs)))
423  == NULL)
424  {
425  fprintf(stderr, "adminq_init: calloc tx bufs failed\n");
426  free(rxq_bufs[core]);
427  return -1;
428  }
429 
430  sz_bufs = ((config.nic_rx_len + config.nic_tx_len) * PKTBUF_SIZE + 0xfff)
431  & ~0xfffULL;
432  if (packetmem_alloc(sz_bufs, &off_bufs, &pm_bufs) != 0) {
433  fprintf(stderr, "adminq_init: packetmem_alloc bufs failed\n");
434  free(txq_bufs[core]);
435  free(rxq_bufs[core]);
436  return -1;
437  }
438 
439  sz_rx = config.nic_rx_len * sizeof(struct flextcp_pl_krx);
440  if (packetmem_alloc(sz_rx, &off_rx, &pm_rx) != 0) {
441  fprintf(stderr, "adminq_init: packetmem_alloc tx failed\n");
442  packetmem_free(pm_bufs);
443  free(txq_bufs[core]);
444  free(rxq_bufs[core]);
445  return -1;
446  }
447  sz_tx = config.nic_tx_len * sizeof(struct flextcp_pl_ktx);
448  if (packetmem_alloc(sz_tx, &off_tx, &pm_tx) != 0) {
449  fprintf(stderr, "adminq_init: packetmem_alloc tx failed\n");
450  packetmem_free(pm_rx);
451  packetmem_free(pm_bufs);
452  free(txq_bufs[core]);
453  free(rxq_bufs[core]);
454  return -1;
455  }
456 
457  rxq_base[core] = (volatile struct flextcp_pl_krx *)
458  ((uint8_t *) tas_shm + off_rx);
459  txq_base[core] = (volatile struct flextcp_pl_ktx *)
460  ((uint8_t *) tas_shm + off_tx);
461 
462  memset((void *) rxq_base[core], 0, sz_rx);
463  memset((void *) txq_base[core], 0, sz_tx);
464 
465  for (i = 0; i < rxq_len; i++) {
466  rxq_bufs[core][i].addr = off_bufs;
467  rxq_bufs[core][i].buf = (uint8_t *) tas_shm + off_bufs;
468  rxq_base[core][i].addr = off_bufs;
469  off_bufs += PKTBUF_SIZE;
470  }
471  for (i = 0; i < txq_len; i++) {
472  txq_bufs[core][i].addr = off_bufs;
473  txq_bufs[core][i].buf = (uint8_t *) tas_shm + off_bufs;
474  off_bufs += PKTBUF_SIZE;
475  }
476 
477  fp_state->kctx[core].rx_base = off_rx;
478  fp_state->kctx[core].tx_base = off_tx;
479  MEM_BARRIER();
480  fp_state->kctx[core].tx_len = sz_tx;
481  fp_state->kctx[core].rx_len = sz_rx;
482  return 0;
483 }
484 
485 static inline int rxq_poll(void)
486 {
487  uint32_t old_tail, tail, core;
488  volatile struct flextcp_pl_krx *krx;
489  struct nic_buffer *buf;
490  uint8_t type;
491  int ret = 0;
492 
493  core = rxq_next;
494  old_tail = tail = rxq_tail[core];
495  krx = &rxq_base[core][tail];
496  buf = &rxq_bufs[core][tail];
497  rxq_next = (core + 1) % fn_cores;
498 
499  /* no queue entry here */
500  type = krx->type;
501  if (type == FLEXTCP_PL_KRX_INVALID) {
502  return -1;
503  }
504 
505  /* update tail */
506  tail = tail + 1;
507  if (tail == rxq_len) {
508  tail -= rxq_len;
509  }
510 
511  /* handle based on queue entry type */
512  type = krx->type;
513  switch (type) {
514  case FLEXTCP_PL_KRX_PACKET:
515  process_packet(buf->buf, krx->msg.packet.len, krx->msg.packet.fn_core,
516  krx->msg.packet.flow_group);
517  break;
518 
519  default:
520  fprintf(stderr, "rxq_poll: unknown rx type 0x%x old %x len %x\n", type,
521  old_tail, rxq_len);
522  }
523 
524  krx->type = 0;
525  rxq_tail[core] = tail;
526 
527  return ret;
528 }
529 
530 static inline void process_packet(const void *buf, uint16_t len,
531  uint32_t fn_core, uint16_t flow_group)
532 {
533  const struct eth_hdr *eth = buf;
534  const struct ip_hdr *ip = (struct ip_hdr *) (eth + 1);
535  const struct tcp_hdr *tcp = (struct tcp_hdr *) (ip + 1);
536  int to_kni = 1;
537 
538  if (f_beui16(eth->type) == ETH_TYPE_ARP) {
539  if (len < sizeof(struct pkt_arp)) {
540  fprintf(stderr, "process_packet: short arp packet\n");
541  return;
542  }
543 
544  arp_packet(buf, len);
545  } else if (f_beui16(eth->type) == ETH_TYPE_IP) {
546  if (len < sizeof(*eth) + sizeof(*ip)) {
547  fprintf(stderr, "process_packet: short ip packet\n");
548  return;
549  }
550 
551  if (ip->proto == IP_PROTO_TCP) {
552  if (len < sizeof(*eth) + sizeof(*ip) + sizeof(*tcp)) {
553  fprintf(stderr, "process_packet: short tcp packet\n");
554  return;
555  }
556 
557  to_kni = !!tcp_packet(buf, len, fn_core, flow_group);
558  }
559  }
560 
561  if (to_kni)
562  kni_packet(buf, len);
563 }
564 
565 static inline volatile struct flextcp_pl_ktx *ktx_try_alloc(uint32_t core,
566  struct nic_buffer **pbuf, uint32_t *new_tail)
567 {
568  uint32_t tail = txq_tail[core];
569  volatile struct flextcp_pl_ktx *ktx = &txq_base[core][tail];
570  struct nic_buffer *buf = &txq_bufs[core][tail];
571 
572  /* queue is full */
573  if (ktx->type != 0) {
574  return NULL;
575  }
576 
577  /* update tail */
578  tail = tail + 1;
579  if (tail == rxq_len) {
580  tail -= rxq_len;
581  }
582 
583  *pbuf = buf;
584  *new_tail = tail;
585 
586  return ktx;
587 }
588 
589 static inline uint32_t flow_hash(ip_addr_t lip, beui16_t lp,
590  ip_addr_t rip, beui16_t rp)
591 {
592  struct {
593  ip_addr_t lip;
594  ip_addr_t rip;
595  beui16_t lp;
596  beui16_t rp;
597  } __attribute__((packed)) hk =
598  { .lip = lip, .rip = rip, .lp = lp, .rp = rp };
599  MEM_BARRIER();
600  return rte_hash_crc(&hk, sizeof(hk), 0);
601 }
602 
603 static inline int flow_slot_alloc(uint32_t h, uint32_t *pi, uint32_t *pd)
604 {
605  uint32_t j, i, l, k, d;
606  struct flextcp_pl_flowhte *hte = fp_state->flowht;
607 
608  /* find slot */
609  j = h % FLEXNIC_PL_FLOWHT_ENTRIES;
610  l = (j + FLEXNIC_PL_FLOWHT_NBSZ) % FLEXNIC_PL_FLOWHT_ENTRIES;
611 
612  /* look for empty slot */
613  d = 0;
614  for (i = j; i != l; i = (i + 1) % FLEXNIC_PL_FLOWHT_ENTRIES) {
615  if ((hte[i].flow_id & FLEXNIC_PL_FLOWHTE_VALID) == 0) {
616  *pi = i;
617  *pd = d;
618  return 0;
619  }
620  d++;
621  }
622 
623  /* no free slot, try to clear up on */
624  k = (l + 4 * FLEXNIC_PL_FLOWHT_NBSZ) % FLEXNIC_PL_FLOWHT_ENTRIES;
625  /* looking for candidate empty slot to move back */
626  for (; i != k; i = (i + 1) % FLEXNIC_PL_FLOWHT_ENTRIES) {
627  if ((hte[i].flow_id & FLEXNIC_PL_FLOWHTE_VALID) == 0) {
628  break;
629  }
630  }
631 
632  /* abort if no candidate slot found */
633  if (i == k) {
634  fprintf(stderr, "flow_slot_alloc: no empty slot found\n");
635  return -1;
636  }
637 
638  /* move candidate backwards until in range for this insertion */
639  /* j < l -> (i < j || i >= l) */
640  /* j > l -> (i >= l && i < j) */
641  while ((j > l || (i < j || i >= l)) && (j < l || (i >= l && i < j))) {
642  k = i;
643 
644  /* look for element to swap */
645  i = (k - FLEXNIC_PL_FLOWHT_NBSZ) % FLEXNIC_PL_FLOWHT_ENTRIES;
646  for (; i != k; i = (i + 1) % FLEXNIC_PL_FLOWHT_ENTRIES) {
647  assert((hte[i].flow_id & FLEXNIC_PL_FLOWHTE_VALID) != 0);
648 
649  /* calculate how much further this element can be moved */
650  d = (hte[i].flow_id >> FLEXNIC_PL_FLOWHTE_POSSHIFT) &
651  (FLEXNIC_PL_FLOWHT_NBSZ - 1);
652  d = FLEXNIC_PL_FLOWHT_NBSZ - 1 - d;
653 
654  /* check whether element can be moved */
655  if ((k - i) % FLEXNIC_PL_FLOWHT_ENTRIES <= d) {
656  break;
657  }
658  }
659 
660  /* abort if none of the elements can be moved */
661  if (i == k) {
662  fprintf(stderr, "flow_slot_alloc: no element could be moved\n");
663  return -1;
664  }
665 
666  /* move element up */
667  assert((hte[k].flow_id & FLEXNIC_PL_FLOWHTE_VALID) == 0);
668  d = (hte[i].flow_id >> FLEXNIC_PL_FLOWHTE_POSSHIFT) &
669  (FLEXNIC_PL_FLOWHT_NBSZ - 1);
670 
671  /* write to empty entry first */
672  hte[k].flow_hash = hte[i].flow_hash;
673  MEM_BARRIER();
674  hte[k].flow_id = FLEXNIC_PL_FLOWHTE_VALID |
675  (d << FLEXNIC_PL_FLOWHTE_POSSHIFT) |
676  (((1 << FLEXNIC_PL_FLOWHTE_POSSHIFT) - 1) & hte[i].flow_id);
677  MEM_BARRIER();
678 
679  /* empty original position */
680  hte[i].flow_id = 0;
681  MEM_BARRIER();
682  }
683 
684  *pi = i;
685  *pd = (i - j) % FLEXNIC_PL_FLOWHT_ENTRIES;
686  return 0;
687 }
688 
689 static inline int flow_slot_clear(uint32_t f_id, ip_addr_t lip, beui16_t lp,
690  ip_addr_t rip, beui16_t rp)
691 {
692  uint32_t h, k, j, ffid, eh;
693  struct flextcp_pl_flowhte *e;
694 
695  h = flow_hash(lip, lp, rip, rp);
696 
697  for (j = 0; j < FLEXNIC_PL_FLOWHT_NBSZ; j++) {
698  k = (h + j) % FLEXNIC_PL_FLOWHT_ENTRIES;
699  e = &fp_state->flowht[k];
700 
701  ffid = e->flow_id;
702  MEM_BARRIER();
703  eh = e->flow_hash;
704 
705  if ((ffid & FLEXNIC_PL_FLOWHTE_VALID) == 0 || eh != h) {
706  continue;
707  }
708 
709  if ((ffid & ((1 << FLEXNIC_PL_FLOWHTE_POSSHIFT) - 1)) == f_id) {
710  e->flow_id &= ~FLEXNIC_PL_FLOWHTE_VALID;
711  return 0;
712  }
713  }
714 
715  fprintf(stderr, "flow_slot_clear: table entry not found\n");
716  return -1;
717 }
718 
719 static void flow_id_alloc_init(void)
720 {
721  size_t i;
722  struct flow_id_item *it, *prev = NULL;
723  for (i = 0; i < FLEXNIC_PL_FLOWST_NUM; i++) {
724  it = &flow_id_items[i];
725  it->flow_id = i;
726  it->next = NULL;
727 
728  if (prev == NULL) {
729  flow_id_freelist = it;
730  } else {
731  prev->next = it;
732  }
733  prev = it;
734  }
735 }
736 
737 static int flow_id_alloc(uint32_t *fid)
738 {
739  struct flow_id_item *it = flow_id_freelist;
740 
741  if (it == NULL)
742  return -1;
743 
744  flow_id_freelist = it->next;
745  *fid = it->flow_id;
746  return 0;
747 }
748 
749 static void flow_id_free(uint32_t flow_id)
750 {
751  struct flow_id_item *it = &flow_id_items[flow_id];
752  it->next = flow_id_freelist;
753  flow_id_freelist = it;
754 }
uint32_t tx_next_seq
Definition: tas_memif.h:300
int nicif_connection_setrate(uint32_t f_id, uint32_t rate)
Definition: nicif.c:314
#define FLEXNIC_FLAG_READY
Definition: tas_memif.h:51
uint32_t tx_len
Definition: tas_memif.h:246
void kni_packet(const void *pkt, uint16_t len)
Definition: kni.c:109
uint16_t bump_seq
Definition: tas_memif.h:263
uint16_t db_id
Definition: tas_memif.h:258
uint32_t tx_avail
Definition: tas_memif.h:293
uint32_t cores_num
Definition: tas_memif.h:72
uint32_t rx_len
Definition: tas_memif.h:244
uint64_t flags
Definition: tas_memif.h:58
int nicif_connection_add(uint32_t db, uint64_t mac_remote, uint32_t ip_local, uint16_t port_local, uint32_t ip_remote, uint16_t port_remote, uint64_t rx_base, uint32_t rx_len, uint64_t tx_base, uint32_t tx_len, uint32_t remote_seq, uint32_t local_seq, uint64_t app_opaque, uint32_t flags, uint32_t rate, uint32_t fn_core, uint16_t flow_group, uint32_t *pf_id)
Definition: nicif.c:175
int nicif_init(void)
Definition: nicif.c:85
void nicif_connection_free(uint32_t f_id)
Definition: nicif.c:272
int tcp_packet(const void *pkt, uint16_t len, uint32_t fn_core, uint16_t flow_group)
Definition: tcp.c:350
void arp_packet(const void *pkt, uint16_t len)
Definition: arp.c:138
int packetmem_init(void)
Definition: packetmem.c:44
uint32_t rx_remote_avail
Definition: tas_memif.h:281
int nicif_connection_move(uint32_t dst_db, uint32_t f_id)
Definition: nicif.c:278
volatile uint32_t lock
Definition: tas_memif.h:271
uint64_t rx_base_sp
Definition: tas_memif.h:239
struct eth_addr remote_mac
Definition: tas_memif.h:255
uint32_t tx_next_ts
Definition: tas_memif.h:302
uint16_t ctx_ids[FLEXNIC_PL_APPST_CTX_NUM]
Definition: tas_memif.h:198
int packetmem_alloc(size_t length, uintptr_t *off, struct packetmem_handle **handle)
Definition: packetmem.c:61
uint32_t rtt_est
Definition: tas_memif.h:315
uint64_t nic_tx_len
Definition: config.h:49
int nicif_connection_retransmit(uint32_t f_id, uint16_t flow_group)
Definition: nicif.c:330
Definition: utils.h:44
uint32_t tx_rate
Definition: tas_memif.h:305
unsigned nicif_poll(void)
Definition: nicif.c:111
int nicif_connection_stats(uint32_t f_id, struct nicif_connection_stats *p_stats)
Definition: nicif.c:285
Definition: utils.h:45
uint32_t rx_next_seq
Definition: tas_memif.h:279
uint16_t cnt_rx_acks
Definition: tas_memif.h:309
uint16_t flow_group
Definition: tas_memif.h:261
uint64_t opaque
Definition: tas_memif.h:236
void packetmem_free(struct packetmem_handle *handle)
Definition: packetmem.c:113
uint64_t tx_base
Definition: tas_memif.h:241
int nicif_tx_alloc(uint16_t len, void **pbuf, uint32_t *opaque)
Definition: nicif.c:352
uint16_t ctx_num
Definition: tas_memif.h:195
uint16_t cnt_tx_drops
Definition: tas_memif.h:307
uint64_t nic_rx_len
Definition: config.h:47
int nicif_connection_disable(uint32_t f_id, uint32_t *tx_seq, uint32_t *rx_seq, int *tx_closed, int *rx_closed)
Definition: nicif.c:250
uint32_t cnt_rx_ack_bytes
Definition: tas_memif.h:311
uint32_t tx_next_pos
Definition: tas_memif.h:298
int nicif_appctx_add(uint16_t appid, uint32_t db, uint64_t *rxq_base, uint32_t rxq_len, uint64_t *txq_base, uint32_t txq_len, int evfd)
Definition: nicif.c:130
uint32_t rx_avail
Definition: tas_memif.h:274
uint32_t tx_sent
Definition: tas_memif.h:296
void nicif_tx_send(uint32_t opaque, int no_ts)
Definition: nicif.c:368
uint32_t rx_next_pos
Definition: tas_memif.h:277
uint32_t cnt_rx_ecn_bytes
Definition: tas_memif.h:313