PF_RING API
API documentation for PF_RING: high-speed packet capture, filtering and analysis framework.
pf_ring.h
Go to the documentation of this file.
1 /*
2  *
3  * Definitions for packet ring
4  *
5  * 2004-2018 - ntop.org
6  *
7  */
8 
9 #ifndef __RING_H
10 #define __RING_H
11 
19 #ifdef __KERNEL__
20 #include <linux/in6.h>
21 #else
22 #include <netinet/in.h>
23 #endif /* __KERNEL__ */
24 
25 #define RING_MAGIC
26 #define RING_MAGIC_VALUE 0x88
27 
28 /* Increment whenever we change slot or packet header layout (e.g. we add/move a field) */
29 #define RING_FLOWSLOT_VERSION 17
30 
31 #define DEFAULT_BUCKET_LEN 128
32 #define MAX_NUM_DEVICES 256
33 
34 #define MAX_NUM_RING_SOCKETS 256
35 
36 /* Watermark */
37 #define DEFAULT_MIN_PKT_QUEUED 128
38 #define DEFAULT_POLL_WATERMARK_TIMEOUT 0
39 
40 /* Dirty hack I know, but what else shall I do man? */
41 #define pfring_ptr ax25_ptr
42 
43 /* Versioning */
44 #define RING_VERSION "7.1.0"
45 #define RING_VERSION_NUM 0x070100
46 
47 /* Set */
48 #define SO_ADD_TO_CLUSTER 99
49 #define SO_REMOVE_FROM_CLUSTER 100
50 #define SO_SET_STRING 101
51 #define SO_ADD_FILTERING_RULE 102
52 #define SO_REMOVE_FILTERING_RULE 103
53 #define SO_TOGGLE_FILTER_POLICY 104
54 #define SO_SET_SAMPLING_RATE 105
55 #define SO_ACTIVATE_RING 106
56 #define SO_RING_BUCKET_LEN 107
57 #define SO_SET_CHANNEL_ID 108
58 #define SO_PURGE_IDLE_HASH_RULES 109 /* inactivity (sec) */
59 #define SO_SET_APPL_NAME 110
60 #define SO_SET_PACKET_DIRECTION 111
61 #define SO_SET_MASTER_RING 112
62 #define SO_ADD_HW_FILTERING_RULE 113
63 #define SO_DEL_HW_FILTERING_RULE 114
64 #define SO_DEACTIVATE_RING 116
65 #define SO_SET_POLL_WATERMARK 117
66 #define SO_SET_VIRTUAL_FILTERING_DEVICE 118
67 #define SO_REHASH_RSS_PACKET 119
68 #define SO_SET_POLL_WATERMARK_TIMEOUT 121
69 #define SO_SHUTDOWN_RING 124
70 #define SO_PURGE_IDLE_RULES 125 /* inactivity (sec) */
71 #define SO_SET_SOCKET_MODE 126
72 #define SO_USE_SHORT_PKT_HEADER 127
73 #define SO_ENABLE_RX_PACKET_BOUNCE 131
74 #define SO_SET_APPL_STATS 133
75 #define SO_SET_STACK_INJECTION_MODE 134 /* stack injection/interception from userspace */
76 #define SO_CREATE_CLUSTER_REFEREE 135
77 #define SO_PUBLISH_CLUSTER_OBJECT 136
78 #define SO_LOCK_CLUSTER_OBJECT 137
79 #define SO_UNLOCK_CLUSTER_OBJECT 138
80 #define SO_SET_CUSTOM_BOUND_DEV_NAME 139
81 #define SO_SET_IFF_PROMISC 140
82 #define SO_SET_VLAN_ID 141
83 
84 /* Get */
85 #define SO_GET_RING_VERSION 170
86 #define SO_GET_FILTERING_RULE_STATS 171
87 #define SO_GET_HASH_FILTERING_RULE_STATS 172
88 #define SO_GET_ZC_DEVICE_INFO 173
89 #define SO_GET_NUM_RX_CHANNELS 174
90 #define SO_GET_RING_ID 175
91 #define SO_GET_BPF_EXTENSIONS 176
92 #define SO_GET_BOUND_DEVICE_ADDRESS 177
93 #define SO_GET_NUM_QUEUED_PKTS 178
94 #define SO_GET_PKT_HEADER_LEN 179
95 #define SO_GET_LOOPBACK_TEST 180
96 #define SO_GET_BUCKET_LEN 181
97 #define SO_GET_DEVICE_TYPE 182
98 #define SO_GET_EXTRA_DMA_MEMORY 183
99 #define SO_GET_BOUND_DEVICE_IFINDEX 184
100 #define SO_GET_DEVICE_IFINDEX 185
101 #define SO_GET_APPL_STATS_FILE_NAME 186
102 #define SO_GET_LINK_STATUS 187
103 
104 /* Other *sockopt */
105 #define SO_SELECT_ZC_DEVICE 190
106 
107 /* Error codes */
108 #define PF_RING_ERROR_GENERIC -1
109 #define PF_RING_ERROR_INVALID_ARGUMENT -2
110 #define PF_RING_ERROR_NO_PKT_AVAILABLE -3
111 #define PF_RING_ERROR_NO_TX_SLOT_AVAILABLE -4
112 #define PF_RING_ERROR_WRONG_CONFIGURATION -5
113 #define PF_RING_ERROR_END_OF_DEMO_MODE -6
114 #define PF_RING_ERROR_NOT_SUPPORTED -7
115 #define PF_RING_ERROR_INVALID_LIB_VERSION -8
116 #define PF_RING_ERROR_UNKNOWN_ADAPTER -9
117 #define PF_RING_ERROR_NOT_ENOUGH_MEMORY -10
118 #define PF_RING_ERROR_INVALID_STATUS -11
119 #define PF_RING_ERROR_RING_NOT_ENABLED -12
120 
121 #define REFLECTOR_NAME_LEN 8
122 
123 #ifndef IN6ADDR_ANY_INIT
124 #define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } }
125 #endif
126 
127 #ifndef NETDEV_PRE_UP
128 #define NETDEV_PRE_UP 0x000D
129 #endif
130 
131 /* *********************************** */
132 
133 /*
134  Note that as offsets *can* be negative,
135  please do not change them to unsigned
136 */
137 struct pkt_offset {
138  /* This 'eth_offset' offset *must* be added to all offsets below
139  * ONLY if you are inside the kernel. Ignore it in user-space. */
140  int16_t eth_offset;
141 
142  int16_t vlan_offset;
143  int16_t l3_offset;
144  int16_t l4_offset;
145  int16_t payload_offset;
146 } __attribute__((packed));
147 
148 #ifndef ETH_ALEN
149 #define ETH_ALEN 6
150 #endif
151 
152 #define REFLECT_PACKET_DEVICE_NONE 0
153 
154 typedef union {
155  struct in6_addr v6; /* IPv6 src/dst IP addresses (Network byte order) */
156  u_int32_t v4; /* IPv4 src/dst IP addresses */
157 } ip_addr;
158 
159 #define ipv4_tos ip_tos
160 #define ipv6_tos ip_tos
161 #define ipv4_src ip_src.v4
162 #define ipv4_dst ip_dst.v4
163 #define ipv6_src ip_src.v6
164 #define ipv6_dst ip_dst.v6
165 #define host4_low host_low.v4
166 #define host4_high host_high.v4
167 #define host6_low host_low.v6
168 #define host6_high host_high.v6
169 #define host4_peer_a host_peer_a.v4
170 #define host4_peer_b host_peer_b.v4
171 #define host6_peer_a host_peer_a.v6
172 #define host6_peer_b host_peer_b.v6
173 
174 struct eth_vlan_hdr {
175  u_int16_t h_vlan_id; /* Tag Control Information (QoS, VLAN ID) */
176  u_int16_t h_proto; /* packet type ID field */
177 } __attribute__((packed));
178 
179 #define NEXTHDR_HOP 0
180 #define NEXTHDR_IPV6 41
181 #define NEXTHDR_ROUTING 43
182 #define NEXTHDR_FRAGMENT 44
183 #define NEXTHDR_ESP 50
184 #define NEXTHDR_AUTH 51
185 #define NEXTHDR_NONE 59
186 #define NEXTHDR_DEST 60
187 #define NEXTHDR_MOBILITY 135
188 
190  u_int32_t flow_lbl:24,
191  priority:4,
192  version:4;
193  u_int16_t payload_len;
194  u_int8_t nexthdr;
195  u_int8_t hop_limit;
196  struct in6_addr saddr;
197  struct in6_addr daddr;
198 } __attribute__((packed));
199 
201  u_int8_t nexthdr;
202  u_int8_t hdrlen;
203  u_int8_t padding[6];
204 } __attribute__((packed));
205 
206 #define GRE_HEADER_CHECKSUM 0x8000
207 #define GRE_HEADER_ROUTING 0x4000
208 #define GRE_HEADER_KEY 0x2000
209 #define GRE_HEADER_SEQ_NUM 0x1000
210 #define GRE_HEADER_VERSION 0x0007
211 
212 struct gre_header {
213  u_int16_t flags_and_version;
214  u_int16_t proto;
215  /* Optional fields */
216 } __attribute__((packed));
217 
218 #define GTP_SIGNALING_PORT 2123
219 #define GTP_U_DATA_PORT 2152
220 
221 #define GTP_VERSION_1 0x1
222 #define GTP_VERSION_2 0x2
223 #define GTP_PROTOCOL_TYPE 0x1
224 
225 struct gtp_v1_hdr {
226 #define GTP_FLAGS_VERSION 0xE0
227 #define GTP_FLAGS_VERSION_SHIFT 5
228 #define GTP_FLAGS_PROTOCOL_TYPE 0x10
229 #define GTP_FLAGS_RESERVED 0x08
230 #define GTP_FLAGS_EXTENSION 0x04
231 #define GTP_FLAGS_SEQ_NUM 0x02
232 #define GTP_FLAGS_NPDU_NUM 0x01
233  u_int8_t flags;
234  u_int8_t message_type;
235  u_int16_t payload_len;
236  u_int32_t teid;
237 } __attribute__((__packed__));
238 
239 /* Optional: GTP_FLAGS_EXTENSION | GTP_FLAGS_SEQ_NUM | GTP_FLAGS_NPDU_NUM */
240 struct gtp_v1_opt_hdr {
241  u_int16_t seq_num;
242  u_int8_t npdu_num;
243  u_int8_t next_ext_hdr;
244 } __attribute__((__packed__));
245 
246 /* Optional: GTP_FLAGS_EXTENSION && next_ext_hdr != 0 */
248 #define GTP_EXT_HDR_LEN_UNIT_BYTES 4
249  u_int8_t len; /* 4-byte unit */
250  /*
251  * u_char contents[len*4-2];
252  * u_int8_t next_ext_hdr;
253  */
254 } __attribute__((__packed__));
255 
256 #define NO_TUNNEL_ID 0xFFFFFFFF
257 
258 /* GPRS Tunneling Protocol */
259 typedef struct {
260  u_int32_t tunnel_id; /* GTP/GRE tunnelId or NO_TUNNEL_ID for no filtering */
261  u_int8_t tunneled_ip_version; /* Layer 4 protocol */
262  u_int8_t tunneled_proto; /* Layer 4 protocol */
263  ip_addr tunneled_ip_src, tunneled_ip_dst;
264  u_int16_t tunneled_l4_src_port, tunneled_l4_dst_port;
265 } __attribute__((packed))
266  tunnel_info;
267 
268 #define MOBILE_IP_PORT 434
269 
271  u_int8_t message_type, next_header;
272  u_int16_t reserved;
273 } __attribute__((packed));
274 
275 typedef enum {
276  long_pkt_header = 0, /* it includes PF_RING-extensions over the original pcap header */
277  short_pkt_header /* Short pcap-like header */
278 } pkt_header_len;
279 
281  /* Core fields (also used by NetFlow) */
282  u_int8_t dmac[ETH_ALEN], smac[ETH_ALEN]; /* MAC src/dst addresses */
283  u_int16_t eth_type; /* Ethernet type */
284  u_int16_t vlan_id; /* VLAN Id or NO_VLAN */
285  u_int16_t qinq_vlan_id; /* VLAN Id or NO_VLAN */
286  u_int8_t ip_version;
287  u_int8_t l3_proto, ip_tos; /* Layer 3 protocol, TOS */
288  ip_addr ip_src, ip_dst; /* IPv4/6 src/dst IP addresses */
289  u_int16_t l4_src_port, l4_dst_port;/* Layer 4 src/dst ports */
290  u_int8_t icmp_type, icmp_code; /* Variables for ICMP packets */
291  struct {
292  u_int8_t flags; /* TCP flags (0 if not available) */
293  u_int32_t seq_num, ack_num; /* TCP sequence number */
294  } tcp;
295  tunnel_info tunnel;
296  int32_t last_matched_rule_id; /* If > 0 identifies a rule that matched the packet */
297  struct pkt_offset offset; /* Offsets of L3/L4/payload elements */
298 } __attribute__((packed));
299 
300 #define UNKNOWN_INTERFACE -1
301 #define FAKE_PACKET -2 /* It indicates that the returned packet
302  is faked, and that the info is basically
303  a message from PF_RING
304  */
305 
307  u_int64_t timestamp_ns; /* Packet timestamp at ns precision. Note that if your NIC supports
308  hardware timestamp, this is the place to read timestamp from */
309 #define PKT_FLAGS_CHECKSUM_OFFLOAD 1 << 0 /* IP/TCP checksum offload enabled */
310 #define PKT_FLAGS_CHECKSUM_OK 1 << 1 /* Valid checksum (with IP/TCP checksum offload enabled) */
311 #define PKT_FLAGS_IP_MORE_FRAG 1 << 2 /* IP More fragments flag set */
312 #define PKT_FLAGS_IP_FRAG_OFFSET 1 << 3 /* IP fragment offset set (not 0) */
313 #define PKT_FLAGS_VLAN_HWACCEL 1 << 4 /* VLAN stripped by hw */
314 #define PKT_FLAGS_FLOW_OFFLOAD_UPDATE 1 << 6 /* Flow update metadata, see generic_flow_update struct (keep flag compatible with ZC) */
315 #define PKT_FLAGS_FLOW_OFFLOAD_PACKET 1 << 7 /* Flow raw packet, pkt_hash contains the flow_id (keep flag compatible with ZC) */
316 #define PKT_FLAGS_FLOW_OFFLOAD_MARKER 1 << 8 /* Flow raw packet belongs to a flow that has been marked (keep flag compatible with ZC) */
317  u_int32_t flags;
318 
319  u_int8_t rx_direction; /* 1=RX: packet received by the NIC, 0=TX: packet transmitted by the NIC */
320  int32_t if_index; /* index of the interface on which the packet has been received.
321  It can be also used to report other information */
322  u_int32_t pkt_hash; /* Hash based on the packet header */
323 
324  /* --- short header ends here --- */
325 
326  struct {
327  int32_t bounce_interface; /* Interface Id where this packet will bounce after processing
328  if its values is other than UNKNOWN_INTERFACE */
329  struct sk_buff *reserved; /* Kernel only pointer */
330  } tx;
331 
332  /* NOTE: leave it as last field of the memset on parse_pkt() will fail */
333  struct pkt_parsing_info parsed_pkt; /* packet parsing info */
334 } __attribute__((packed));
335 
336 /* NOTE: Keep 'struct pfring_pkthdr' in sync with 'struct pcap_pkthdr' */
337 
339  /* pcap header */
340  struct timeval ts; /* time stamp */
341  u_int32_t caplen; /* length of portion present */
342  u_int32_t len; /* length of whole packet (off wire) */
343  struct pfring_extended_pkthdr extended_hdr; /* PF_RING extended header */
344 } __attribute__((packed));
345 
346 /* *********************************** */
347 
348 #define MAX_NUM_LIST_ELEMENTS MAX_NUM_RING_SOCKETS /* sizeof(bits_set) [see below] */
349 
350 #ifdef __KERNEL__
351 typedef struct {
352  u_int32_t num_elements, top_element_id;
353  rwlock_t list_lock;
354  void *list_elements[MAX_NUM_LIST_ELEMENTS];
355 } lockless_list;
356 
357 void init_lockless_list(lockless_list *l);
358 int lockless_list_add(lockless_list *l, void *elem);
359 int lockless_list_remove(lockless_list *l, void *elem);
360 void* lockless_list_get_next(lockless_list *l, u_int32_t *last_list_idx);
361 void* lockless_list_get_first(lockless_list *l, u_int32_t *last_list_idx);
362 void lockless_list_empty(lockless_list *l, u_int8_t free_memory);
363 void term_lockless_list(lockless_list *l, u_int8_t free_memory);
364 #endif
365 
366 /* ************************************************* */
367 
368 typedef struct {
369  int32_t if_index; /* Index of the interface on which the packet has been received */
370  u_int8_t smac[ETH_ALEN], dmac[ETH_ALEN]; /* Use '0' (zero-ed MAC address) for any MAC address.
371  This is applied to both source and destination. */
372  u_int16_t vlan_id; /* Use 0 for any vlan */
373  u_int16_t eth_type; /* Use 0 for any ethernet type */
374  u_int8_t proto; /* Use 0 for any l3 protocol */
375  ip_addr shost, dhost; /* User '0' for any host. This is applied to both source and destination. */
376  ip_addr shost_mask, dhost_mask; /* IPv4/6 network mask */
377  u_int16_t sport_low, sport_high; /* All ports between port_low...port_high means 'any' port */
378  u_int16_t dport_low, dport_high; /* All ports between port_low...port_high means 'any' port */
379  struct {
380  u_int8_t flags; /* TCP flags (0 if not available) */
381  } tcp;
382 } __attribute__((packed))
383 filtering_rule_core_fields;
384 
385 /* ************************************************* */
386 
387 typedef struct {
388 
389 #define FILTER_TUNNEL_ID_FLAG 1 << 0
390  u_int16_t optional_fields; /* Use this mask to activate optional fields */
391 
392  struct {
393  u_int32_t tunnel_id; /* GTP/GRE tunnelId or NO_TUNNEL_ID for no filtering */
394  ip_addr shost, dhost; /* Filter on tunneled IPs */
395  ip_addr shost_mask, dhost_mask; /* IPv4/6 network mask */
396  } tunnel;
397 
398  char payload_pattern[32]; /* If strlen(payload_pattern) > 0, the packet payload
399  must match the specified pattern */
400 } __attribute__((packed))
401 filtering_rule_extended_fields;
402 
403 /* ************************************************* */
404 
405 typedef enum {
406  forward_packet_and_stop_rule_evaluation = 0,
407  dont_forward_packet_and_stop_rule_evaluation,
408  execute_action_and_continue_rule_evaluation,
409  execute_action_and_stop_rule_evaluation,
410  forward_packet_add_rule_and_stop_rule_evaluation, /* auto-filled hash rule */
411  reflect_packet_and_stop_rule_evaluation,
412  reflect_packet_and_continue_rule_evaluation,
413  bounce_packet_and_stop_rule_evaluation,
414  bounce_packet_and_continue_rule_evaluation
415 } rule_action_behaviour;
416 
417 typedef enum {
418  pkt_detail_flow,
419  pkt_detail_aggregation
420 } pkt_detail_mode;
421 
422 typedef enum {
423  rx_and_tx_direction = 0,
424  rx_only_direction,
425  tx_only_direction
426 } packet_direction;
427 
428 typedef enum {
429  send_and_recv_mode = 0,
430  send_only_mode,
431  recv_only_mode
432 } socket_mode;
433 
434 typedef struct {
435  unsigned long jiffies_last_match; /* Jiffies of the last rule match (updated by pf_ring) */
436  struct net_device *reflector_dev; /* Reflector device */
437 } __attribute__((packed))
438 filtering_internals;
439 
440 typedef struct {
441 #define FILTERING_RULE_AUTO_RULE_ID 0xFFFF
442  u_int16_t rule_id; /* Rules are processed in order from lowest to higest id */
443 
444  rule_action_behaviour rule_action; /* What to do in case of match */
445  u_int8_t balance_id, balance_pool; /* If balance_pool > 0, then pass the packet above only if the
446  (hash(proto, sip, sport, dip, dport) % balance_pool) = balance_id */
447  u_int8_t locked; /* Do not purge with pfring_purge_idle_rules() */
448  u_int8_t bidirectional; /* Swap peers when checking if they match the rule. Default: monodir */
449  filtering_rule_core_fields core_fields;
450  filtering_rule_extended_fields extended_fields;
451  char reflector_device_name[REFLECTOR_NAME_LEN];
452 
453  filtering_internals internals; /* PF_RING internal fields */
454 } __attribute__((packed))
455 filtering_rule;
456 
457 /* *********************************** */
458 
459 /* 82599 packet steering filters */
460 
461 typedef struct {
462  u_int8_t proto;
463  u_int32_t s_addr, d_addr;
464  u_int16_t s_port, d_port;
465  u_int16_t queue_id;
466 } __attribute__((packed))
467 intel_82599_five_tuple_filter_hw_rule;
468 
469 typedef struct {
470  u_int16_t vlan_id;
471  u_int8_t proto;
472  u_int32_t s_addr, d_addr;
473  u_int16_t s_port, d_port;
474  u_int16_t queue_id;
475 } __attribute__((packed))
476 intel_82599_perfect_filter_hw_rule;
477 
478 /*
479  Rules are defined per port. Each redirector device
480  has 4 ports (numbeder 0..3):
481 
482  0 +--------------+ 2 +--------------+
483  LAN <===> | | <===> | 1/10G |
484  | Redirector | | Ethernet |
485  LAN <===> | Switch | <===> | Adapter |
486  1 +--------------+ 3 +--------------+
487 
488  Drop Rule
489  Discard incoming packets matching the filter
490  on 'rule_port'
491 
492  Redirect Rule
493  Divert incoming packets matching the filter
494  on 'rule_port' to 'rule_target_port'.
495 
496  Mirror Rule
497  Copy incoming packets matching the filter
498  on 'rule_port' to 'rule_target_port'. The original
499  packet will continue its journey (i.e. packet are
500  actually duplicated)
501 */
502 
503 typedef enum {
504  drop_rule,
505  redirect_rule,
506  mirror_rule
507 } silicom_redirector_rule_type;
508 
509 typedef struct {
510  silicom_redirector_rule_type rule_type;
511  u_int8_t rule_port; /* Port on which the rule is defined */
512  u_int8_t rule_target_port; /* Target port (ignored for drop rules) */
513  u_int16_t vlan_id_low, vlan_id_high;
514  u_int8_t l3_proto;
515  ip_addr src_addr, dst_addr;
516  u_int32_t src_mask, dst_mask;
517  u_int16_t src_port_low, src_port_high;
518  u_int16_t dst_port_low, dst_port_high;
519 } __attribute__((packed))
520 silicom_redirector_hw_rule;
521 
522 typedef enum {
523  accolade_drop,
524  accolade_pass
525 } accolade_rule_action_type;
526 
527 /* Accolade supports mode 1 filtering on almost all cards (up to 32 rules),
528  * and mode 2 filtering on selected adapters (up to 1K rules).
529  * PF_RING automatically select mode 2 when available, and mode 1 as fallback.
530  * Mode 1 and 2 support different fields, please refer to the fields description. */
531 typedef struct {
532  accolade_rule_action_type action; /* in mode 2 this should be always the opposite of the default action */
533  u_int32_t port_mask; /* ports on which the rule is defined (default 0xf) - mode 1 only */
534  u_int8_t ip_version;
535  u_int8_t protocol; /* l4 */
536  u_int16_t vlan_id; /* mode 2 only (if vlan_id is set, mpls_label is ignored due to hw limitations) */
537  u_int32_t mpls_label; /* mode 2 only */
538  ip_addr src_addr, dst_addr;
539  u_int32_t src_addr_bits, dst_addr_bits;
540  u_int16_t src_port_low;
541  u_int16_t src_port_high; /* mode 1 only */
542  u_int16_t dst_port_low;
543  u_int16_t dst_port_high; /* mode 1 only */
544  u_int8_t l4_port_not; /* rule match if src_port_low/dst_port_low are defined and they do not match - mode 2 only */
545 } __attribute__((packed))
546 accolade_hw_rule;
547 
548 typedef enum {
549  flow_drop_rule,
550  flow_mark_rule
551 } generic_flow_rule_action_type;
552 
553 typedef struct {
554  generic_flow_rule_action_type action;
555  u_int32_t flow_id; /* flow id from flow metadata */
556  u_int32_t thread; /* id of the thread setting the rule */
557 } __attribute__((packed))
558 generic_flow_id_hw_rule;
559 
560 typedef struct {
561  generic_flow_rule_action_type action;
562  ip_addr src_ip;
563  ip_addr dst_ip;
564  u_int16_t src_port;
565  u_int16_t dst_port;
566  u_int8_t ip_version;
567  u_int8_t protocol;
568  u_int8_t interface; /* from extended_hdr.if_index */
569 } __attribute__((packed))
570 generic_flow_tuple_hw_rule;
571 
572 typedef enum {
573  intel_82599_five_tuple_rule,
574  intel_82599_perfect_filter_rule,
575  silicom_redirector_rule,
576  generic_flow_id_rule,
577  generic_flow_tuple_rule,
578  accolade_rule,
579  accolade_default
580 } hw_filtering_rule_type;
581 
582 typedef struct {
583  hw_filtering_rule_type rule_family_type;
584  u_int16_t rule_id;
585 
586  union {
587  intel_82599_five_tuple_filter_hw_rule five_tuple_rule;
588  intel_82599_perfect_filter_hw_rule perfect_rule;
589  silicom_redirector_hw_rule redirector_rule;
590  generic_flow_id_hw_rule flow_id_rule;
591  generic_flow_tuple_hw_rule flow_tuple_rule;
592  accolade_hw_rule accolade_rule;
593  } rule_family;
594 } __attribute__((packed))
595 hw_filtering_rule;
596 
597 #define MAGIC_HW_FILTERING_RULE_REQUEST 0x29010020 /* deprecated? */
598 
599 #ifdef __KERNEL__
600 
601 #define ETHTOOL_PFRING_SRXFTCHECK 0x10000000
602 #define ETHTOOL_PFRING_SRXFTRLDEL 0x10000031
603 #define ETHTOOL_PFRING_SRXFTRLINS 0x10000032
604 
605 #if defined(I82599_HW_FILTERING_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40))
606 #define FLOW_EXT 0x80000000
607 union _kcompat_ethtool_flow_union {
608  struct ethtool_tcpip4_spec tcp_ip4_spec;
609  struct ethtool_usrip4_spec usr_ip4_spec;
610  __u8 hdata[60];
611 };
612 struct _kcompat_ethtool_flow_ext {
613  __be16 vlan_etype;
614  __be16 vlan_tci;
615  __be32 data[2];
616 };
617 struct _kcompat_ethtool_rx_flow_spec {
618  __u32 flow_type;
619  union _kcompat_ethtool_flow_union h_u;
620  struct _kcompat_ethtool_flow_ext h_ext;
621  union _kcompat_ethtool_flow_union m_u;
622  struct _kcompat_ethtool_flow_ext m_ext;
623  __u64 ring_cookie;
624  __u32 location;
625 };
626 #define ethtool_rx_flow_spec _kcompat_ethtool_rx_flow_spec
627 #endif /* defined(I82599_HW_FILTERING_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40)) */
628 
629 #endif /* __KERNEL__ */
630 
631 typedef enum {
632  add_hw_rule,
633  remove_hw_rule
634 } hw_filtering_rule_command;
635 
636 /* *********************************** */
637 
639  u_int32_t tv_sec;
640  u_int32_t tv_nsec;
641 } __attribute__((packed));
642 
643 typedef struct {
644  u_int32_t flow_id;
645 
646  u_int8_t ip_version;
647  u_int8_t l4_protocol;
648 
649  u_int8_t tos;
650  u_int8_t tcp_flags;
651 
652  ip_addr src_ip;
653  ip_addr dst_ip;
654 
655  u_int16_t src_port;
656  u_int16_t dst_port;
657 
658  u_int32_t fwd_packets;
659  u_int32_t fwd_bytes;
660  u_int32_t rev_packets;
661  u_int32_t rev_bytes;
662 
663  struct pfring_timespec fwd_ts_first;
664  struct pfring_timespec fwd_ts_last;
665  struct pfring_timespec rev_ts_first;
666  struct pfring_timespec rev_ts_last;
667 } __attribute__((packed))
668 generic_flow_update;
669 
670 typedef struct {
671  generic_flow_rule_action_type action;
672  u_int32_t flow_id;
673 } __attribute__((packed))
674 generic_flow_feedback;
675 
676 /* *********************************** */
677 
678 extern struct pf_ring_socket *pfr; /* Forward */
679 
680 /* *********************************** */
681 
682 typedef int (*five_tuple_rule_handler)(struct pf_ring_socket *pfr,
683  hw_filtering_rule *rule,
684  hw_filtering_rule_command request);
685 typedef int (*perfect_filter_hw_rule_handler)(struct pf_ring_socket *pfr,
686  hw_filtering_rule *rule,
687  hw_filtering_rule_command request);
688 
689 typedef struct {
690  five_tuple_rule_handler five_tuple_handler;
691  perfect_filter_hw_rule_handler perfect_filter_handler;
692 } __attribute__((packed))
693 hw_filtering_device_handler;
694 
695 /* *********************************** */
696 
697 /* Hash size used for precise packet matching */
698 #define DEFAULT_RING_HASH_SIZE 4096
699 
700 /*
701  * The hashtable contains only perfect matches: no
702  * wildacards or so are accepted. (bidirectional)
703  */
704 typedef struct {
705  u_int16_t rule_id; /* Future use */
706  u_int16_t vlan_id;
707  u_int8_t ip_version;
708  u_int8_t proto; /* Layer 3 protocol */
709  ip_addr host_peer_a, host_peer_b;
710  u_int16_t port_peer_a, port_peer_b;
711 
712  rule_action_behaviour rule_action; /* What to do in case of match */
713  char reflector_device_name[REFLECTOR_NAME_LEN];
714 
715  filtering_internals internals; /* PF_RING internal fields */
716 } __attribute__((packed))
717 hash_filtering_rule;
718 
719 typedef struct {
720  u_int64_t match;
721  u_int64_t miss;
722  u_int32_t inactivity; /* sec */
723 } __attribute__((packed))
724 hash_filtering_rule_stats;
725 
726 /* ************************************************* */
727 
729  hash_filtering_rule rule;
730  u_int64_t match; /* number of packets matching the rule */
731  struct _sw_filtering_hash_bucket *next;
732 } __attribute__((packed))
733 sw_filtering_hash_bucket;
734 
735 /* *********************************** */
736 
737 #define RING_MIN_SLOT_SIZE (60+sizeof(struct pfring_pkthdr))
738 #define RING_MAX_SLOT_SIZE (1514+sizeof(struct pfring_pkthdr))
739 
740 #if !defined(__cplusplus)
741 
742 #define min_val(a,b) ((a < b) ? a : b)
743 #define max_val(a,b) ((a > b) ? a : b)
744 
745 #endif
746 
747 /* *********************************** */
748 
749 /* False sharing reference: http://en.wikipedia.org/wiki/False_sharing */
750 
751 typedef struct flowSlotInfo {
752  /* first page, managed by kernel */
753  u_int16_t version, sample_rate;
754  u_int32_t min_num_slots, slot_len, data_len;
755  u_int64_t tot_mem;
756  volatile u_int64_t insert_off;
757  u_int64_t kernel_remove_off;
758  u_int64_t tot_pkts, tot_lost;
759  volatile u_int64_t tot_insert;
760  u_int64_t kernel_tot_read;
761  u_int64_t tot_fwd_ok, tot_fwd_notok;
762  u_int64_t good_pkt_sent, pkt_send_error;
763  /* <-- 64 bytes here, should be enough to avoid some L1 VIVT coherence issues (32 ~ 64bytes lines) */
764  char padding[128-104];
765  /* <-- 128 bytes here, should be enough to avoid false sharing in most L2 (64 ~ 128bytes lines) */
766  char k_padding[4096-128];
767  /* <-- 4096 bytes here, to get a page aligned block writable by kernel side only */
768 
769  /* second page, managed by userland */
770  volatile u_int64_t tot_read;
771  volatile u_int64_t remove_off /* managed by userland */;
772  char u_padding[4096-16];
773  /* <-- 8192 bytes here, to get a page aligned block writable by userland only */
774 } __attribute__((packed))
775 FlowSlotInfo;
776 
777 /* **************************************** */
778 
779 #ifdef __KERNEL__
780 FlowSlotInfo *getRingPtr(void);
781 int allocateRing(char *deviceName, u_int numSlots, u_int bucketLen, u_int sampleRate);
782 unsigned int pollRing(struct file *fp, struct poll_table_struct * wait);
783 void deallocateRing(void);
784 #endif /* __KERNEL__ */
785 
786 /* *********************************** */
787 
788 #define PF_RING 27 /* (0x1b) Packet Ring */
789 #define SOCK_RING PF_RING
790 
791 /* ioctl() */
792 #define SIORINGPOLL 0x8888
793 
794 /* ************************************************* */
795 
796 #ifdef __KERNEL__
797 struct ring_sock {
798  struct sock sk; /* It MUST be the first element */
799  struct pf_ring_socket *pf_ring_sk;
800  /* FIXX Do we really need the following items? */
801  //struct packet_type prot_hook;
802  //spinlock_t bind_lock;
803 };
804 #endif
805 
806 /* *********************************** */
807 
808 typedef int (*zc_dev_wait_packet)(void *adapter, int mode);
809 typedef void (*zc_dev_notify)(void *rx_adapter_ptr, void *tx_adapter_ptr, u_int8_t device_in_use);
810 
811 typedef enum {
812  add_device_mapping = 0, remove_device_mapping
813 } zc_dev_operation;
814 
815 /* IMPORTANT NOTE
816  * add new family types ALWAYS at the end
817  * (i.e. append) of this datatype */
818 typedef enum {
819  intel_e1000e = 0,
820  intel_igb,
821  intel_ixgbe,
822  intel_ixgbe_82598,
823  intel_ixgbe_82599,
824  intel_igb_82580,
825  intel_e1000,
826  intel_ixgbe_82599_ts,
827  intel_i40e,
828  intel_fm10k
829 } zc_dev_model;
830 
831 typedef struct {
832  u_int32_t packet_memory_num_slots;
833  u_int32_t packet_memory_slot_len;
834  u_int32_t descr_packet_memory_tot_len;
835  u_int16_t registers_index;
836  u_int16_t stats_index;
837  u_int32_t vector;
838  u_int32_t num_queues;
839 } __attribute__((packed))
840 mem_ring_info;
841 
842 typedef struct {
843  mem_ring_info rx;
844  mem_ring_info tx;
845  u_int32_t phys_card_memory_len;
846  zc_dev_model device_model;
847 } __attribute__((packed))
848 zc_memory_info;
849 
850 typedef struct {
851  zc_memory_info mem_info;
852  u_int16_t channel_id;
853  void *rx_descr_packet_memory; /* Invalid in userland */
854  void *tx_descr_packet_memory; /* Invalid in userland */
855  char *phys_card_memory; /* Invalid in userland */
856  struct net_device *dev; /* Invalid in userland */
857  struct device *hwdev; /* Invalid in userland */
858  u_char device_address[6];
859 #ifdef __KERNEL__
860  wait_queue_head_t *packet_waitqueue;
861 #else
862  void *packet_waitqueue;
863 #endif
864  u_int8_t *interrupt_received, in_use;
865  void *rx_adapter_ptr, *tx_adapter_ptr;
866  zc_dev_wait_packet wait_packet_function_ptr;
867  zc_dev_notify usage_notification;
868 } __attribute__((packed))
869 zc_dev_info;
870 
871 #ifndef IFNAMSIZ
872 #define IFNAMSIZ 16
873 #endif
874 
875 typedef struct {
876  zc_dev_operation operation;
877  char device_name[IFNAMSIZ];
878  int32_t channel_id;
879 } __attribute__((packed))
880 zc_dev_mapping;
881 
882 /* ************************************************* */
883 
884 #define RING_ANY_CHANNEL ((u_int64_t)-1)
885 #define MAX_NUM_RX_CHANNELS 64 /* channel_id_mask is a 64 bit mask */
886 #define UNKNOWN_NUM_RX_CHANNELS 1
887 
888 #define RING_ANY_VLAN ((u_int16_t)0xFFFF)
889 #define RING_NO_VLAN ((u_int16_t)0)
890 
891 /* ************************************************* */
892 
893 typedef enum {
894  cluster_per_flow = 0, /* 6-tuple: <src ip, src port, dst ip, dst port, proto, vlan> */
895  cluster_round_robin,
896  cluster_per_flow_2_tuple, /* 2-tuple: <src ip, dst ip > */
897  cluster_per_flow_4_tuple, /* 4-tuple: <src ip, src port, dst ip, dst port > */
898  cluster_per_flow_5_tuple, /* 5-tuple: <src ip, src port, dst ip, dst port, proto > */
899  cluster_per_flow_tcp_5_tuple, /* 5-tuple only with TCP, 2 tuple with all other protos */
900  /* same as above, computing on tunnel content when present */
901  cluster_per_inner_flow, /* 6-tuple: <src ip, src port, dst ip, dst port, proto, vlan> */
902  cluster_per_inner_flow_2_tuple, /* 2-tuple: <src ip, dst ip > */
903  cluster_per_inner_flow_4_tuple, /* 4-tuple: <src ip, src port, dst ip, dst port > */
904  cluster_per_inner_flow_5_tuple, /* 5-tuple: <src ip, src port, dst ip, dst port, proto > */
905  cluster_per_inner_flow_tcp_5_tuple,/* 5-tuple only with TCP, 2 tuple with all other protos */
906  /* new types, for L2-only protocols */
907  cluster_per_flow_ip_5_tuple, /* 5-tuple only with IP, 2 tuple with non-IP <src mac, dst mac>*/
908  cluster_per_inner_flow_ip_5_tuple, /* 5-tuple only with IP, 2 tuple with non-IP <src mac, dst mac>*/
909 } cluster_type;
910 
912  u_int clusterId;
913  cluster_type the_type;
914 } __attribute__((packed));
915 
916 typedef enum {
917  standard_nic_family = 0, /* No Hw Filtering */
918  intel_82599_family
919 } pfring_device_type;
920 
921 typedef struct {
922  char device_name[IFNAMSIZ];
923  pfring_device_type device_type;
924 
925  /* Entry in the /proc filesystem */
926  struct proc_dir_entry *proc_entry;
927 } __attribute__((packed))
928 virtual_filtering_device_info;
929 
930 /* ************************************************* */
931 
933  u_int32_t cluster_id;
934  u_int32_t recovered; /* fresh or recovered */
935 } __attribute__((packed));
936 
938  u_int32_t cluster_id;
939  u_int32_t object_type;
940  u_int32_t object_id;
941 } __attribute__((packed));
942 
944  u_int32_t cluster_id;
945  u_int32_t object_type;
946  u_int32_t object_id;
947  u_int32_t lock_mask;
948  u_int32_t reserved;
949 } __attribute__((packed));
950 
951 /* ************************************************* */
952 
953 typedef enum {
954  cluster_slave = 0,
955  cluster_master = 1
956 } cluster_client_type;
957 
958 /* ************************************************* */
959 
960 #ifdef __KERNEL__
961 
962 #if(LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0))
963 #ifndef netdev_notifier_info_to_dev
964 #define netdev_notifier_info_to_dev(a) ((struct net_device*)a)
965 #endif
966 #endif
967 
968 #define CLUSTER_LEN 64
969 
970 /*
971  * A ring cluster is used group together rings used by various applications
972  * so that they look, from the PF_RING point of view, as a single ring.
973  * This means that developers can use clusters for sharing packets across
974  * applications using various policies as specified in the hashing_mode
975  * parameter.
976  */
977 struct ring_cluster {
978  u_short cluster_id; /* 0 = no cluster */
979  u_short num_cluster_elements;
980  cluster_type hashing_mode;
981  u_short hashing_id;
982  struct sock *sk[CLUSTER_LEN];
983 };
984 
985 /*
986  * Linked-list of ring clusters
987  */
988 typedef struct {
989  struct ring_cluster cluster;
990  struct list_head list;
991 } ring_cluster_element;
992 
993 #define MAX_NUM_ZC_BOUND_SOCKETS MAX_NUM_RING_SOCKETS
994 
995 typedef struct {
996  u8 num_bound_sockets;
997  zc_dev_info zc_dev;
998  struct list_head list;
999  /*
1000  In the ZC world only one application can open and enable the
1001  device@channel per direction. The array below is used to keep
1002  pointers to the sockets bound to device@channel.
1003  No more than one socket can be enabled for RX and one for TX.
1004  */
1005  struct pf_ring_socket *bound_sockets[MAX_NUM_ZC_BOUND_SOCKETS];
1006  rwlock_t lock;
1007 } zc_dev_list;
1008 
1009 #define MAX_NUM_IFIDX 2048
1010 
1011 /*
1012  * Linked-list of virtual filtering devices
1013  */
1014 typedef struct {
1015  virtual_filtering_device_info info;
1016  struct list_head list;
1017 } virtual_filtering_device_element;
1018 
1019 typedef struct {
1020  struct net_device *dev;
1021 
1022  /* Note: we keep device_name here for a couple of reasons:
1023  * 1. some device types might NOT have a net_device handler
1024  * 2. when a device name changes we need to remember the old name */
1025  char device_name[IFNAMSIZ];
1026 
1027  pfring_device_type device_type; /* Device Type */
1028 
1029  u_int8_t do_not_remove_promisc; /* promisc was set before any socket */
1030  atomic_t promisc_users; /* number of rings with promisc set bound to this device */
1031 
1032  /* Entry in the /proc filesystem */
1033  struct proc_dir_entry *proc_entry;
1034 
1035  /* ZC */
1036  u_int8_t is_zc_device;
1037  zc_dev_model zc_dev_model;
1038  u_int num_zc_dev_rx_queues; /* 0 for non ZC devices */
1039  u_int32_t num_zc_rx_slots;
1040  u_int32_t num_zc_tx_slots;
1041 
1042  /* Hardware Filters */
1043  struct {
1044  u_int16_t num_filters;
1045  hw_filtering_device_handler filter_handlers;
1046  } hw_filters;
1047 
1048  struct list_head device_list;
1049 } pf_ring_device;
1050 
1051 /* ************************************************* */
1052 
1053 struct dma_memory_info {
1054  u_int32_t num_chunks, chunk_len;
1055  u_int32_t num_slots, slot_len;
1056  unsigned long *virtual_addr; /* chunks pointers */
1057  u_int64_t *dma_addr; /* per-slot DMA adresses */
1058  struct device *hwdev; /* dev for DMA mapping */
1059 };
1060 
1061 /* ************************************************* */
1062 
1063 typedef struct {
1064  u_int32_t object_type;
1065  u_int32_t object_id;
1066  u_int32_t lock_bitmap;
1067 
1068  struct list_head list;
1069 } cluster_object;
1070 
1071 struct cluster_referee {
1072  u_int32_t id;
1073  u_int32_t users;
1074  u_int8_t master_running;
1075  struct list_head objects_list;
1076 
1077  struct list_head list;
1078 };
1079 
1080 /* ************************************************* */
1081 
1082 typedef int (*do_handle_sw_filtering_hash_bucket)(struct pf_ring_socket *pfr,
1083  sw_filtering_hash_bucket* rule,
1084  u_char add_rule);
1085 
1086 typedef int (*do_add_packet_to_ring)(struct pf_ring_socket *pfr,
1087  u_int8_t real_skb,
1088  struct pfring_pkthdr *hdr, struct sk_buff *skb,
1089  int displ, u_int8_t parse_pkt_first);
1090 
1091 typedef int (*do_add_raw_packet_to_ring)(struct pf_ring_socket *pfr,
1092  struct pfring_pkthdr *hdr,
1093  u_char *data, u_int data_len,
1094  u_int8_t parse_pkt_first);
1095 
1096 typedef u_int32_t (*do_rehash_rss)(struct sk_buff *skb, struct pfring_pkthdr *hdr);
1097 
1098 /* ************************************************* */
1099 
1100 #define NUM_FRAGMENTS_HASH_SLOTS 4096
1101 #define MAX_CLUSTER_FRAGMENTS_LEN 8*NUM_FRAGMENTS_HASH_SLOTS
1102 
1103 struct hash_fragment_node {
1104  /* Key */
1105  u_int32_t ipv4_src_host, ipv4_dst_host;
1106  u_int16_t ip_fragment_id;
1107 
1108  /* Value */
1109  u_int8_t cluster_app_id; /* Identifier of the app where the main fragment has been placed */
1110 
1111  /* Expire */
1112  unsigned long expire_jiffies; /* Time at which this entry will be expired */
1113 
1114  /* collision list */
1115  struct list_head frag_list;
1116 };
1117 
1118 /* ************************************************* */
1119 
1120 #define MAX_NUM_DEVICES_ID MAX_NUM_IFIDX
1121 /*
1122  * Ring options
1123  */
1124 struct pf_ring_socket {
1125  u_int8_t ring_active, ring_shutdown, num_rx_channels, num_bound_devices;
1126  pf_ring_device *ring_dev;
1127 
1128  DECLARE_BITMAP(netdev_mask, MAX_NUM_DEVICES_ID /* bits */);
1129  int ring_pid;
1130  u_int32_t ring_id;
1131  char *appl_name; /* String that identifies the application bound to the socket */
1132  packet_direction direction; /* Specify the capture direction for packets */
1133  socket_mode mode; /* Specify the link direction to enable (RX, TX, both) */
1134  pkt_header_len header_len;
1135  u_int8_t stack_injection_mode;
1136  u_int8_t promisc_enabled;
1137 
1138  struct sock *sk;
1139 
1140  /* /proc */
1141  char sock_proc_name[64]; /* /proc/net/pf_ring/<sock_proc_name> */
1142  char sock_proc_stats_name[64]; /* /proc/net/pf_ring/stats/<sock_proc_stats_name> */
1143  char statsString[512 + 1];
1144  char custom_bound_device_name[32];
1145 
1146  /* Poll Watermark */
1147  u_int32_t num_poll_calls;
1148  u_int16_t poll_num_pkts_watermark;
1149  u_int16_t poll_watermark_timeout;
1150  u_long queue_nonempty_timestamp;
1151 
1152  /* Master Ring */
1153  struct pf_ring_socket *master_ring;
1154 
1155  /* Used to transmit packets after they have been received
1156  from user space */
1157  struct {
1158  u_int8_t enable_tx_with_bounce;
1159  rwlock_t consume_tx_packets_lock;
1160  int32_t last_tx_dev_idx;
1161  struct net_device *last_tx_dev;
1162  } tx;
1163 
1164  /* ZC (Direct NIC Access) */
1165  zc_dev_mapping zc_mapping;
1166  zc_dev_info *zc_dev;
1167  zc_dev_list *zc_device_entry;
1168 
1169  /* Extra DMA memory */
1170  struct dma_memory_info *extra_dma_memory;
1171 
1172  /* Cluster */
1173  u_int16_t cluster_id /* 0 = no cluster */;
1174 
1175  /* Channel */
1176  int64_t channel_id_mask; /* -1 = any channel */
1177  u_int16_t num_channels_per_ring;
1178 
1179  /* rehash rss function pointer */
1180  do_rehash_rss rehash_rss;
1181 
1182  /* Ring Slots */
1183  u_char *ring_memory;
1184  u_int16_t slot_header_len;
1185  u_int32_t bucket_len, slot_tot_mem;
1186  FlowSlotInfo *slots_info; /* Points to ring_memory */
1187  u_char *ring_slots; /* Points to ring_memory+sizeof(FlowSlotInfo) */
1188 
1189  /* Packet Sampling */
1190  u_int32_t pktToSample, sample_rate;
1191 
1192  /* Virtual Filtering Device */
1193  virtual_filtering_device_element *v_filtering_dev;
1194 
1195  /* VLAN ID */
1196  u_int16_t vlan_id; /* 0 = all VLANs are accepted */
1197 
1198  int32_t bpfFilter; /* bool */
1199 
1200  /* Sw Filtering Rules - default policy */
1201  u_int8_t sw_filtering_rules_default_accept_policy; /* 1=default policy is accept, drop otherwise */
1202 
1203  /* Sw Filtering Rules - hash */
1204  sw_filtering_hash_bucket **sw_filtering_hash;
1205  u_int64_t sw_filtering_hash_match;
1206  u_int64_t sw_filtering_hash_miss;
1207  u_int32_t num_sw_filtering_hash;
1208 
1209  /* Sw Filtering Rules - wildcard */
1210  u_int32_t num_sw_filtering_rules;
1211  struct list_head sw_filtering_rules;
1212 
1213  /* Hw Filtering Rules */
1214  u_int16_t num_hw_filtering_rules;
1215  struct list_head hw_filtering_rules;
1216 
1217  /* Locks */
1218  atomic_t num_ring_users;
1219  wait_queue_head_t ring_slots_waitqueue;
1220  rwlock_t ring_index_lock, ring_rules_lock;
1221 
1222  /* Indexes (Internal) */
1223  u_int32_t insert_page_id, insert_slot_id;
1224 
1225  /* Function pointer */
1226  do_add_packet_to_ring add_packet_to_ring;
1227  do_add_raw_packet_to_ring add_raw_packet_to_ring;
1228 
1229  /* Kernel consumer */
1230  char *kernel_consumer_options, *kernel_consumer_private;
1231 
1232  /* Userspace cluster (ZC) */
1233  struct cluster_referee *cluster_referee;
1234  cluster_client_type cluster_role;
1235 };
1236 
1237 /* **************************************** */
1238 
1239 typedef struct {
1240  struct net *net;
1241 
1242  /* /proc entry for ring module */
1243  struct proc_dir_entry *proc;
1244  struct proc_dir_entry *proc_dir;
1245  struct proc_dir_entry *proc_dev_dir;
1246  struct proc_dir_entry *proc_stats_dir;
1247 
1248  struct list_head list;
1249 } pf_ring_net;
1250 
1251 /* **************************************** */
1252 
1253 #define MAX_NUM_PATTERN 32
1254 
1255 typedef struct {
1256  filtering_rule rule;
1257 
1258 #ifdef CONFIG_TEXTSEARCH
1259  struct ts_config *pattern[MAX_NUM_PATTERN];
1260 #endif
1261  struct list_head list;
1262 } sw_filtering_rule_element;
1263 
1264 typedef struct {
1265  hw_filtering_rule rule;
1266  struct list_head list;
1267 } hw_filtering_rule_element;
1268 
1269 /* **************************************** */
1270 
1271 typedef void (*handle_pfring_zc_dev)(zc_dev_operation operation,
1272  mem_ring_info *rx_info,
1273  mem_ring_info *tx_info,
1274  void *rx_descr_packet_memory,
1275  void *tx_descr_packet_memory,
1276  void *phys_card_memory,
1277  u_int phys_card_memory_len,
1278  u_int channel_id,
1279  struct net_device *dev,
1280  struct device *hwdev,
1281  zc_dev_model device_model,
1282  u_char *device_address,
1283  wait_queue_head_t *packet_waitqueue,
1284  u_int8_t *interrupt_received,
1285  void *rx_adapter_ptr, void *tx_adapter_ptr,
1286  zc_dev_wait_packet wait_packet_function_ptr,
1287  zc_dev_notify dev_notify_function_ptr);
1288 
1289 extern handle_pfring_zc_dev get_ring_zc_dev_handler(void);
1290 extern void set_ring_zc_dev_handler(handle_pfring_zc_dev the_zc_device_handler);
1291 extern void do_ring_zc_dev_handler(zc_dev_operation operation,
1292  mem_ring_info *rx_info,
1293  mem_ring_info *tx_info,
1294  unsigned long *rx_packet_memory,
1295  void *rx_descr_packet_memory,
1296  unsigned long *tx_packet_memory,
1297  void *tx_descr_packet_memory,
1298  void *phys_card_memory,
1299  u_int phys_card_memory_len,
1300  u_int channel_id,
1301  struct net_device *dev,
1302  struct device *hwdev,
1303  zc_dev_model device_model,
1304  u_char *device_address,
1305  wait_queue_head_t * packet_waitqueue,
1306  u_int8_t * interrupt_received,
1307  void *rx_adapter_ptr, void *tx_adapter_ptr,
1308  zc_dev_wait_packet wait_packet_function_ptr,
1309  zc_dev_notify dev_notify_function_ptr);
1310 
1311 typedef int (*handle_ring_skb)(struct sk_buff *skb, u_char recv_packet,
1312  u_char real_skb,
1313  int32_t channel_id,
1314  u_int32_t num_rx_channels);
1315 typedef int (*handle_ring_buffer)(struct net_device *dev,
1316  char *data, int len);
1317 
1318 /* Hack to jump from a device directly to PF_RING */
1319 struct pfring_hooks {
1320  u_int32_t magic; /*
1321  It should be set to PF_RING
1322  and is MUST be the first one on this struct
1323  */
1324  handle_ring_skb ring_handler;
1325  handle_ring_buffer buffer_ring_handler;
1326  handle_pfring_zc_dev zc_dev_handler;
1327 };
1328 
1329 /* *************************************************************** */
1330 
1331 #ifdef REDBORDER_PATCH
1332 typedef enum {
1333  IF_SCAN,
1334  GET_DEV_NUM,
1335  IS_BYPASS,
1336  GET_BYPASS_SLAVE,
1337  GET_BYPASS_CAPS,
1338  GET_WD_SET_CAPS,
1339  SET_BYPASS,
1340  GET_BYPASS
1341  /* ... */
1342 } BPCTL_COMPACT_CMND_TYPE_SD;
1343 
1344 struct bpctl_cmd {
1345  int status;
1346  int data[8];
1347  int in_param[8];
1348  int out_param[8];
1349 };
1350 
1351 #define BPCTL_MAGIC_NUM 'J'
1352 #define BPCTL_IOCTL_TX_MSG(cmd) _IOWR(BPCTL_MAGIC_NUM, cmd, struct bpctl_cmd)
1353 
1354 extern int bpctl_kernel_ioctl(unsigned int ioctl_num, void *ioctl_param);
1355 #endif
1356 
1357 /* *************************************************************** */
1358 
1359 extern void pf_ring_add_module_dependency(void);
1360 extern int pf_ring_inject_packet_to_ring(int if_index, int channel_id, u_char *data, int data_len, struct pfring_pkthdr *hdr);
1361 
1362 /* *********************************** */
1363 
1364 /* pcap header */
1365 struct pcaplike_file_header {
1366  int32_t magic;
1367  u_int16_t version_major, version_minor;
1368  int32_t thiszone; /* gmt to local correction */
1369  u_int32_t sigfigs; /* accuracy of timestamps */
1370  u_int32_t snaplen; /* max length saved portion of each pkt */
1371  u_int32_t linktype; /* data link type (LINKTYPE_*) */
1372 };
1373 
1374 struct pcaplike_timeval {
1375  u_int32_t tv_sec, tv_usec;
1376 };
1377 
1378 struct pcaplike_pkthdr {
1379  struct pcaplike_timeval ts; /* time stamp */
1380  u_int32_t caplen; /* length of portion present */
1381  u_int32_t len; /* length this packet (off wire) */
1382 };
1383 
1384 #endif /* __KERNEL__ */
1385 
1386 /* *********************************** */
1387 
1388 #endif /* __RING_H */
Definition: pf_ring.h:937
Definition: pf_ring.h:338
Definition: pf_ring.h:280
Definition: pf_ring.h:911
Definition: pf_ring.h:212
Definition: pf_ring.h:259
Definition: pf_ring.h:932
Definition: pf_ring.h:751
Definition: pf_ring.h:225
Definition: pf_ring.h:728
Definition: pf_ring.h:240
Definition: pf_ring.h:638
Definition: pf_ring.h:189
Definition: pf_ring.h:270
Definition: pf_ring.h:137
Definition: pf_ring.h:943
Definition: pf_ring.h:154
Definition: pf_ring.h:200
Definition: pf_ring.h:174
Definition: pf_ring.h:306
Definition: pf_ring.h:247