1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * Copyright (c) 2014-2015 Hisilicon Limited. |
4 | */ |
5 | |
6 | #ifndef __HNAE_H |
7 | #define __HNAE_H |
8 | |
9 | /* Names used in this framework: |
10 | * ae handle (handle): |
11 | * a set of queues provided by AE |
12 | * ring buffer queue (rbq): |
13 | * the channel between upper layer and the AE, can do tx and rx |
14 | * ring: |
15 | * a tx or rx channel within a rbq |
16 | * ring description (desc): |
17 | * an element in the ring with packet information |
18 | * buffer: |
19 | * a memory region referred by desc with the full packet payload |
20 | * |
21 | * "num" means a static number set as a parameter, "count" mean a dynamic |
22 | * number set while running |
23 | * "cb" means control block |
24 | */ |
25 | |
26 | #include <linux/acpi.h> |
27 | #include <linux/delay.h> |
28 | #include <linux/device.h> |
29 | #include <linux/module.h> |
30 | #include <linux/netdevice.h> |
31 | #include <linux/notifier.h> |
32 | #include <linux/phy.h> |
33 | #include <linux/types.h> |
34 | |
35 | #define HNAE_DRIVER_VERSION "2.0" |
36 | #define HNAE_DRIVER_NAME "hns" |
37 | #define HNAE_COPYRIGHT "Copyright(c) 2015 Huawei Corporation." |
38 | #define HNAE_DRIVER_STRING "Hisilicon Network Subsystem Driver" |
39 | #define HNAE_DEFAULT_DEVICE_DESCR "Hisilicon Network Subsystem" |
40 | |
41 | #ifdef DEBUG |
42 | |
43 | #ifndef assert |
44 | #define assert(expr) \ |
45 | do { \ |
46 | if (!(expr)) { \ |
47 | pr_err("Assertion failed! %s, %s, %s, line %d\n", \ |
48 | #expr, __FILE__, __func__, __LINE__); \ |
49 | } \ |
50 | } while (0) |
51 | #endif |
52 | |
53 | #else |
54 | |
55 | #ifndef assert |
56 | #define assert(expr) |
57 | #endif |
58 | |
59 | #endif |
60 | |
61 | #define AE_VERSION_1 ('6' << 16 | '6' << 8 | '0') |
62 | #define AE_VERSION_2 ('1' << 24 | '6' << 16 | '1' << 8 | '0') |
63 | #define AE_IS_VER1(ver) ((ver) == AE_VERSION_1) |
64 | #define AE_NAME_SIZE 16 |
65 | |
66 | #define BD_SIZE_2048_MAX_MTU 6000 |
67 | |
68 | /* some said the RX and TX RCB format should not be the same in the future. But |
69 | * it is the same now... |
70 | */ |
71 | #define RCB_REG_BASEADDR_L 0x00 /* P660 support only 32bit accessing */ |
72 | #define RCB_REG_BASEADDR_H 0x04 |
73 | #define RCB_REG_BD_NUM 0x08 |
74 | #define RCB_REG_BD_LEN 0x0C |
75 | #define RCB_REG_PKTLINE 0x10 |
76 | #define RCB_REG_TAIL 0x18 |
77 | #define RCB_REG_HEAD 0x1C |
78 | #define RCB_REG_FBDNUM 0x20 |
79 | #define RCB_REG_OFFSET 0x24 /* pkt num to be handled */ |
80 | #define RCB_REG_PKTNUM_RECORD 0x2C /* total pkt received */ |
81 | |
82 | #define HNS_RX_HEAD_SIZE 256 |
83 | |
84 | #define HNAE_AE_REGISTER 0x1 |
85 | |
86 | #define RCB_RING_NAME_LEN (IFNAMSIZ + 4) |
87 | |
88 | #define HNAE_LOWEST_LATENCY_COAL_PARAM 30 |
89 | #define HNAE_LOW_LATENCY_COAL_PARAM 80 |
90 | #define HNAE_BULK_LATENCY_COAL_PARAM 150 |
91 | |
92 | enum hnae_led_state { |
93 | HNAE_LED_INACTIVE, |
94 | HNAE_LED_ACTIVE, |
95 | HNAE_LED_ON, |
96 | HNAE_LED_OFF |
97 | }; |
98 | |
99 | #define HNS_RX_FLAG_VLAN_PRESENT 0x1 |
100 | #define HNS_RX_FLAG_L3ID_IPV4 0x0 |
101 | #define HNS_RX_FLAG_L3ID_IPV6 0x1 |
102 | #define HNS_RX_FLAG_L4ID_UDP 0x0 |
103 | #define HNS_RX_FLAG_L4ID_TCP 0x1 |
104 | #define HNS_RX_FLAG_L4ID_SCTP 0x3 |
105 | |
106 | #define HNS_TXD_ASID_S 0 |
107 | #define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S) |
108 | #define HNS_TXD_BUFNUM_S 8 |
109 | #define HNS_TXD_BUFNUM_M (0x3 << HNS_TXD_BUFNUM_S) |
110 | #define HNS_TXD_PORTID_S 10 |
111 | #define HNS_TXD_PORTID_M (0x7 << HNS_TXD_PORTID_S) |
112 | |
113 | #define HNS_TXD_RA_B 8 |
114 | #define HNS_TXD_RI_B 9 |
115 | #define HNS_TXD_L4CS_B 10 |
116 | #define HNS_TXD_L3CS_B 11 |
117 | #define HNS_TXD_FE_B 12 |
118 | #define HNS_TXD_VLD_B 13 |
119 | #define HNS_TXD_IPOFFSET_S 14 |
120 | #define HNS_TXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S) |
121 | |
122 | #define HNS_RXD_IPOFFSET_S 0 |
123 | #define HNS_RXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S) |
124 | #define HNS_RXD_BUFNUM_S 8 |
125 | #define HNS_RXD_BUFNUM_M (0x3 << HNS_RXD_BUFNUM_S) |
126 | #define HNS_RXD_PORTID_S 10 |
127 | #define HNS_RXD_PORTID_M (0x7 << HNS_RXD_PORTID_S) |
128 | #define HNS_RXD_DMAC_S 13 |
129 | #define HNS_RXD_DMAC_M (0x3 << HNS_RXD_DMAC_S) |
130 | #define HNS_RXD_VLAN_S 15 |
131 | #define HNS_RXD_VLAN_M (0x3 << HNS_RXD_VLAN_S) |
132 | #define HNS_RXD_L3ID_S 17 |
133 | #define HNS_RXD_L3ID_M (0xf << HNS_RXD_L3ID_S) |
134 | #define HNS_RXD_L4ID_S 21 |
135 | #define HNS_RXD_L4ID_M (0xf << HNS_RXD_L4ID_S) |
136 | #define HNS_RXD_FE_B 25 |
137 | #define HNS_RXD_FRAG_B 26 |
138 | #define HNS_RXD_VLD_B 27 |
139 | #define HNS_RXD_L2E_B 28 |
140 | #define HNS_RXD_L3E_B 29 |
141 | #define HNS_RXD_L4E_B 30 |
142 | #define HNS_RXD_DROP_B 31 |
143 | |
144 | #define HNS_RXD_VLANID_S 8 |
145 | #define HNS_RXD_VLANID_M (0xfff << HNS_RXD_VLANID_S) |
146 | #define HNS_RXD_CFI_B 20 |
147 | #define HNS_RXD_PRI_S 21 |
148 | #define HNS_RXD_PRI_M (0x7 << HNS_RXD_PRI_S) |
149 | #define HNS_RXD_ASID_S 24 |
150 | #define HNS_RXD_ASID_M (0xff << HNS_RXD_ASID_S) |
151 | |
152 | #define HNSV2_TXD_BUFNUM_S 0 |
153 | #define HNSV2_TXD_BUFNUM_M (0x7 << HNSV2_TXD_BUFNUM_S) |
154 | #define HNSV2_TXD_PORTID_S 4 |
155 | #define HNSV2_TXD_PORTID_M (0X7 << HNSV2_TXD_PORTID_S) |
156 | #define HNSV2_TXD_RI_B 1 |
157 | #define HNSV2_TXD_L4CS_B 2 |
158 | #define HNSV2_TXD_L3CS_B 3 |
159 | #define HNSV2_TXD_FE_B 4 |
160 | #define HNSV2_TXD_VLD_B 5 |
161 | |
162 | #define HNSV2_TXD_TSE_B 0 |
163 | #define HNSV2_TXD_VLAN_EN_B 1 |
164 | #define HNSV2_TXD_SNAP_B 2 |
165 | #define HNSV2_TXD_IPV6_B 3 |
166 | #define HNSV2_TXD_SCTP_B 4 |
167 | |
168 | /* hardware spec ring buffer format */ |
169 | struct __packed hnae_desc { |
170 | __le64 addr; |
171 | union { |
172 | struct { |
173 | union { |
174 | __le16 asid_bufnum_pid; |
175 | __le16 asid; |
176 | }; |
177 | __le16 send_size; |
178 | union { |
179 | __le32 flag_ipoffset; |
180 | struct { |
181 | __u8 bn_pid; |
182 | __u8 ra_ri_cs_fe_vld; |
183 | __u8 ip_offset; |
184 | __u8 tse_vlan_snap_v6_sctp_nth; |
185 | }; |
186 | }; |
187 | __le16 mss; |
188 | __u8 l4_len; |
189 | __u8 reserved1; |
190 | __le16 paylen; |
191 | __u8 vmid; |
192 | __u8 qid; |
193 | __le32 reserved2[2]; |
194 | } tx; |
195 | |
196 | struct { |
197 | __le32 ipoff_bnum_pid_flag; |
198 | __le16 pkt_len; |
199 | __le16 size; |
200 | union { |
201 | __le32 vlan_pri_asid; |
202 | struct { |
203 | __le16 asid; |
204 | __le16 vlan_cfi_pri; |
205 | }; |
206 | }; |
207 | __le32 ; |
208 | __le32 reserved_1[2]; |
209 | } rx; |
210 | }; |
211 | }; |
212 | |
213 | struct hnae_desc_cb { |
214 | dma_addr_t dma; /* dma address of this desc */ |
215 | void *buf; /* cpu addr for a desc */ |
216 | |
217 | /* priv data for the desc, e.g. skb when use with ip stack*/ |
218 | void *priv; |
219 | u32 page_offset; |
220 | u32 length; /* length of the buffer */ |
221 | |
222 | u16 reuse_flag; |
223 | |
224 | /* desc type, used by the ring user to mark the type of the priv data */ |
225 | u16 type; |
226 | }; |
227 | |
228 | #define setflags(flags, bits) ((flags) |= (bits)) |
229 | #define unsetflags(flags, bits) ((flags) &= ~(bits)) |
230 | |
231 | /* hnae_ring->flags fields */ |
232 | #define RINGF_DIR 0x1 /* TX or RX ring, set if TX */ |
233 | #define is_tx_ring(ring) ((ring)->flags & RINGF_DIR) |
234 | #define is_rx_ring(ring) (!is_tx_ring(ring)) |
235 | #define ring_to_dma_dir(ring) (is_tx_ring(ring) ? \ |
236 | DMA_TO_DEVICE : DMA_FROM_DEVICE) |
237 | |
238 | struct ring_stats { |
239 | u64 io_err_cnt; |
240 | u64 sw_err_cnt; |
241 | u64 seg_pkt_cnt; |
242 | union { |
243 | struct { |
244 | u64 tx_pkts; |
245 | u64 tx_bytes; |
246 | u64 tx_err_cnt; |
247 | u64 restart_queue; |
248 | u64 tx_busy; |
249 | }; |
250 | struct { |
251 | u64 rx_pkts; |
252 | u64 rx_bytes; |
253 | u64 rx_err_cnt; |
254 | u64 reuse_pg_cnt; |
255 | u64 err_pkt_len; |
256 | u64 non_vld_descs; |
257 | u64 err_bd_num; |
258 | u64 l2_err; |
259 | u64 l3l4_csum_err; |
260 | }; |
261 | }; |
262 | }; |
263 | |
264 | struct hnae_queue; |
265 | |
266 | struct hnae_ring { |
267 | u8 __iomem *io_base; /* base io address for the ring */ |
268 | struct hnae_desc *desc; /* dma map address space */ |
269 | struct hnae_desc_cb *desc_cb; |
270 | struct hnae_queue *q; |
271 | int irq; |
272 | char ring_name[RCB_RING_NAME_LEN]; |
273 | |
274 | /* statistic */ |
275 | struct ring_stats stats; |
276 | |
277 | dma_addr_t desc_dma_addr; |
278 | u32 buf_size; /* size for hnae_desc->addr, preset by AE */ |
279 | u16 desc_num; /* total number of desc */ |
280 | u16 max_desc_num_per_pkt; |
281 | u16 max_raw_data_sz_per_desc; |
282 | u16 max_pkt_size; |
283 | int next_to_use; /* idx of next spare desc */ |
284 | |
285 | /* idx of lastest sent desc, the ring is empty when equal to |
286 | * next_to_use |
287 | */ |
288 | int next_to_clean; |
289 | |
290 | int flags; /* ring attribute */ |
291 | int irq_init_flag; |
292 | |
293 | /* total rx bytes after last rx rate calucated */ |
294 | u64 coal_last_rx_bytes; |
295 | unsigned long coal_last_jiffies; |
296 | u32 coal_param; |
297 | u32 coal_rx_rate; /* rx rate in MB */ |
298 | }; |
299 | |
300 | #define ring_ptr_move_fw(ring, p) \ |
301 | ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) |
302 | #define ring_ptr_move_bw(ring, p) \ |
303 | ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num) |
304 | |
305 | enum hns_desc_type { |
306 | DESC_TYPE_SKB, |
307 | DESC_TYPE_PAGE, |
308 | }; |
309 | |
310 | #define assert_is_ring_idx(ring, idx) \ |
311 | assert((idx) >= 0 && (idx) < (ring)->desc_num) |
312 | |
313 | /* the distance between [begin, end) in a ring buffer |
314 | * note: there is a unuse slot between the begin and the end |
315 | */ |
316 | static inline int ring_dist(struct hnae_ring *ring, int begin, int end) |
317 | { |
318 | assert_is_ring_idx(ring, begin); |
319 | assert_is_ring_idx(ring, end); |
320 | |
321 | return (end - begin + ring->desc_num) % ring->desc_num; |
322 | } |
323 | |
324 | static inline int ring_space(struct hnae_ring *ring) |
325 | { |
326 | return ring->desc_num - |
327 | ring_dist(ring, begin: ring->next_to_clean, end: ring->next_to_use) - 1; |
328 | } |
329 | |
330 | static inline int is_ring_empty(struct hnae_ring *ring) |
331 | { |
332 | assert_is_ring_idx(ring, ring->next_to_use); |
333 | assert_is_ring_idx(ring, ring->next_to_clean); |
334 | |
335 | return ring->next_to_use == ring->next_to_clean; |
336 | } |
337 | |
338 | #define hnae_buf_size(_ring) ((_ring)->buf_size) |
339 | #define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring))) |
340 | #define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring)) |
341 | |
342 | struct hnae_handle; |
343 | |
344 | /* allocate and dma map space for hnae desc */ |
345 | struct hnae_buf_ops { |
346 | int (*alloc_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb); |
347 | void (*free_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb); |
348 | int (*map_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb); |
349 | void (*unmap_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb); |
350 | }; |
351 | |
352 | struct hnae_queue { |
353 | u8 __iomem *io_base; |
354 | phys_addr_t phy_base; |
355 | struct hnae_ae_dev *dev; /* the device who use this queue */ |
356 | struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; |
357 | struct hnae_ring tx_ring ____cacheline_internodealigned_in_smp; |
358 | struct hnae_handle *handle; |
359 | }; |
360 | |
361 | /*hnae loop mode*/ |
362 | enum hnae_loop { |
363 | MAC_INTERNALLOOP_MAC = 0, |
364 | MAC_INTERNALLOOP_SERDES, |
365 | MAC_INTERNALLOOP_PHY, |
366 | MAC_LOOP_PHY_NONE, |
367 | MAC_LOOP_NONE, |
368 | }; |
369 | |
370 | /*hnae port type*/ |
371 | enum hnae_port_type { |
372 | HNAE_PORT_SERVICE = 0, |
373 | HNAE_PORT_DEBUG |
374 | }; |
375 | |
376 | /* mac media type */ |
377 | enum hnae_media_type { |
378 | HNAE_MEDIA_TYPE_UNKNOWN = 0, |
379 | HNAE_MEDIA_TYPE_FIBER, |
380 | HNAE_MEDIA_TYPE_COPPER, |
381 | HNAE_MEDIA_TYPE_BACKPLANE, |
382 | }; |
383 | |
384 | /* This struct defines the operation on the handle. |
385 | * |
386 | * get_handle(): (mandatory) |
387 | * Get a handle from AE according to its name and options. |
388 | * the AE driver should manage the space used by handle and its queues while |
389 | * the HNAE framework will allocate desc and desc_cb for all rings in the |
390 | * queues. |
391 | * put_handle(): |
392 | * Release the handle. |
393 | * start(): |
394 | * Enable the hardware, include all queues |
395 | * stop(): |
396 | * Disable the hardware |
397 | * set_opts(): (mandatory) |
398 | * Set options to the AE |
399 | * get_opts(): (mandatory) |
400 | * Get options from the AE |
401 | * get_status(): |
402 | * Get the carrier state of the back channel of the handle, 1 for ok, 0 for |
403 | * non-ok |
404 | * toggle_ring_irq(): (mandatory) |
405 | * Set the ring irq to be enabled(0) or disable(1) |
406 | * toggle_queue_status(): (mandatory) |
407 | * Set the queue to be enabled(1) or disable(0), this will not change the |
408 | * ring irq state |
409 | * adjust_link() |
410 | * adjust link status |
411 | * set_loopback() |
412 | * set loopback |
413 | * get_ring_bdnum_limit() |
414 | * get ring bd number limit |
415 | * get_pauseparam() |
416 | * get tx and rx of pause frame use |
417 | * set_pauseparam() |
418 | * set tx and rx of pause frame use |
419 | * get_coalesce_usecs() |
420 | * get usecs to delay a TX interrupt after a packet is sent |
421 | * get_rx_max_coalesced_frames() |
422 | * get Maximum number of packets to be sent before a TX interrupt. |
423 | * set_coalesce_usecs() |
424 | * set usecs to delay a TX interrupt after a packet is sent |
425 | * set_coalesce_frames() |
426 | * set Maximum number of packets to be sent before a TX interrupt. |
427 | * get_ringnum() |
428 | * get RX/TX ring number |
429 | * get_max_ringnum() |
430 | * get RX/TX ring maximum number |
431 | * get_mac_addr() |
432 | * get mac address |
433 | * set_mac_addr() |
434 | * set mac address |
435 | * clr_mc_addr() |
436 | * clear mcast tcam table |
437 | * set_mc_addr() |
438 | * set multicast mode |
439 | * add_uc_addr() |
440 | * add ucast address |
441 | * rm_uc_addr() |
442 | * remove ucast address |
443 | * set_mtu() |
444 | * set mtu |
445 | * update_stats() |
446 | * update Old network device statistics |
447 | * get_ethtool_stats() |
448 | * get ethtool network device statistics |
449 | * get_strings() |
450 | * get a set of strings that describe the requested objects |
451 | * get_sset_count() |
452 | * get number of strings that @get_strings will write |
453 | * update_led_status() |
454 | * update the led status |
455 | * set_led_id() |
456 | * set led id |
457 | * get_regs() |
458 | * get regs dump |
459 | * get_regs_len() |
460 | * get the len of the regs dump |
461 | */ |
462 | struct hnae_ae_ops { |
463 | struct hnae_handle *(*get_handle)(struct hnae_ae_dev *dev, |
464 | u32 port_id); |
465 | void (*put_handle)(struct hnae_handle *handle); |
466 | void (*init_queue)(struct hnae_queue *q); |
467 | void (*fini_queue)(struct hnae_queue *q); |
468 | int (*start)(struct hnae_handle *handle); |
469 | void (*stop)(struct hnae_handle *handle); |
470 | void (*reset)(struct hnae_handle *handle); |
471 | int (*set_opts)(struct hnae_handle *handle, int type, void *opts); |
472 | int (*get_opts)(struct hnae_handle *handle, int type, void **opts); |
473 | int (*get_status)(struct hnae_handle *handle); |
474 | int (*get_info)(struct hnae_handle *handle, |
475 | u8 *auto_neg, u16 *speed, u8 *duplex); |
476 | void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); |
477 | void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); |
478 | bool (*need_adjust_link)(struct hnae_handle *handle, |
479 | int speed, int duplex); |
480 | int (*set_loopback)(struct hnae_handle *handle, |
481 | enum hnae_loop loop_mode, int en); |
482 | void (*get_ring_bdnum_limit)(struct hnae_queue *queue, |
483 | u32 *uplimit); |
484 | void (*get_pauseparam)(struct hnae_handle *handle, |
485 | u32 *auto_neg, u32 *rx_en, u32 *tx_en); |
486 | int (*set_pauseparam)(struct hnae_handle *handle, |
487 | u32 auto_neg, u32 rx_en, u32 tx_en); |
488 | void (*get_coalesce_usecs)(struct hnae_handle *handle, |
489 | u32 *tx_usecs, u32 *rx_usecs); |
490 | void (*get_max_coalesced_frames)(struct hnae_handle *handle, |
491 | u32 *tx_frames, u32 *rx_frames); |
492 | int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout); |
493 | int (*set_coalesce_frames)(struct hnae_handle *handle, |
494 | u32 tx_frames, u32 rx_frames); |
495 | void (*get_coalesce_range)(struct hnae_handle *handle, |
496 | u32 *tx_frames_low, u32 *rx_frames_low, |
497 | u32 *tx_frames_high, u32 *rx_frames_high, |
498 | u32 *tx_usecs_low, u32 *rx_usecs_low, |
499 | u32 *tx_usecs_high, u32 *rx_usecs_high); |
500 | void (*set_promisc_mode)(struct hnae_handle *handle, u32 en); |
501 | int (*get_mac_addr)(struct hnae_handle *handle, void **p); |
502 | int (*set_mac_addr)(struct hnae_handle *handle, const void *p); |
503 | int (*add_uc_addr)(struct hnae_handle *handle, |
504 | const unsigned char *addr); |
505 | int (*rm_uc_addr)(struct hnae_handle *handle, |
506 | const unsigned char *addr); |
507 | int (*clr_mc_addr)(struct hnae_handle *handle); |
508 | int (*set_mc_addr)(struct hnae_handle *handle, void *addr); |
509 | int (*set_mtu)(struct hnae_handle *handle, int new_mtu); |
510 | void (*set_tso_stats)(struct hnae_handle *handle, int enable); |
511 | void (*update_stats)(struct hnae_handle *handle, |
512 | struct net_device_stats *net_stats); |
513 | void (*get_stats)(struct hnae_handle *handle, u64 *data); |
514 | void (*get_strings)(struct hnae_handle *handle, |
515 | u32 stringset, u8 *data); |
516 | int (*get_sset_count)(struct hnae_handle *handle, int stringset); |
517 | void (*update_led_status)(struct hnae_handle *handle); |
518 | int (*set_led_id)(struct hnae_handle *handle, |
519 | enum hnae_led_state status); |
520 | void (*get_regs)(struct hnae_handle *handle, void *data); |
521 | int (*get_regs_len)(struct hnae_handle *handle); |
522 | u32 (*)(struct hnae_handle *handle); |
523 | u32 (*)(struct hnae_handle *handle); |
524 | int (*)(struct hnae_handle *handle, u32 *indir, u8 *key, |
525 | u8 *hfunc); |
526 | int (*)(struct hnae_handle *handle, const u32 *indir, |
527 | const u8 *key, const u8 hfunc); |
528 | }; |
529 | |
530 | struct hnae_ae_dev { |
531 | struct device cls_dev; /* the class dev */ |
532 | struct device *dev; /* the presented dev */ |
533 | struct hnae_ae_ops *ops; |
534 | struct list_head node; |
535 | struct module *owner; /* the module who provides this dev */ |
536 | int id; |
537 | char name[AE_NAME_SIZE]; |
538 | struct list_head handle_list; |
539 | spinlock_t lock; /* lock to protect the handle_list */ |
540 | }; |
541 | |
542 | struct hnae_handle { |
543 | struct device *owner_dev; /* the device which make use of this handle */ |
544 | struct hnae_ae_dev *dev; /* the device who provides this handle */ |
545 | struct phy_device *phy_dev; |
546 | phy_interface_t phy_if; |
547 | u32 if_support; |
548 | int q_num; |
549 | int vf_id; |
550 | unsigned long coal_last_jiffies; |
551 | u32 coal_param; /* self adapt coalesce param */ |
552 | /* the ring index of last ring that set coal param */ |
553 | u32 coal_ring_idx; |
554 | u32 eport_id; |
555 | u32 dport_id; /* v2 tx bd should fill the dport_id */ |
556 | bool coal_adapt_en; |
557 | enum hnae_port_type port_type; |
558 | enum hnae_media_type media_type; |
559 | struct list_head node; /* list to hnae_ae_dev->handle_list */ |
560 | struct hnae_buf_ops *bops; /* operation for the buffer */ |
561 | struct hnae_queue *qs[]; /* flexible array of all queues */ |
562 | }; |
563 | |
564 | #define ring_to_dev(ring) ((ring)->q->dev->dev) |
565 | |
566 | struct hnae_handle *hnae_get_handle(struct device *owner_dev, |
567 | const struct fwnode_handle *fwnode, |
568 | u32 port_id, |
569 | struct hnae_buf_ops *bops); |
570 | |
571 | void hnae_put_handle(struct hnae_handle *handle); |
572 | int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner); |
573 | void hnae_ae_unregister(struct hnae_ae_dev *dev); |
574 | |
575 | int hnae_register_notifier(struct notifier_block *nb); |
576 | void hnae_unregister_notifier(struct notifier_block *nb); |
577 | int hnae_reinit_handle(struct hnae_handle *handle); |
578 | |
579 | #define hnae_queue_xmit(q, buf_num) writel_relaxed(buf_num, \ |
580 | (q)->tx_ring.io_base + RCB_REG_TAIL) |
581 | |
582 | #ifndef assert |
583 | #define assert(cond) |
584 | #endif |
585 | |
586 | static inline int hnae_reserve_buffer_map(struct hnae_ring *ring, |
587 | struct hnae_desc_cb *cb) |
588 | { |
589 | struct hnae_buf_ops *bops = ring->q->handle->bops; |
590 | int ret; |
591 | |
592 | ret = bops->alloc_buffer(ring, cb); |
593 | if (ret) |
594 | goto out; |
595 | |
596 | ret = bops->map_buffer(ring, cb); |
597 | if (ret) |
598 | goto out_with_buf; |
599 | |
600 | return 0; |
601 | |
602 | out_with_buf: |
603 | bops->free_buffer(ring, cb); |
604 | out: |
605 | return ret; |
606 | } |
607 | |
608 | static inline int hnae_alloc_buffer_attach(struct hnae_ring *ring, int i) |
609 | { |
610 | int ret = hnae_reserve_buffer_map(ring, cb: &ring->desc_cb[i]); |
611 | |
612 | if (ret) |
613 | return ret; |
614 | |
615 | ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); |
616 | |
617 | return 0; |
618 | } |
619 | |
620 | static inline void hnae_buffer_detach(struct hnae_ring *ring, int i) |
621 | { |
622 | ring->q->handle->bops->unmap_buffer(ring, &ring->desc_cb[i]); |
623 | ring->desc[i].addr = 0; |
624 | } |
625 | |
626 | static inline void hnae_free_buffer_detach(struct hnae_ring *ring, int i) |
627 | { |
628 | struct hnae_buf_ops *bops = ring->q->handle->bops; |
629 | struct hnae_desc_cb *cb = &ring->desc_cb[i]; |
630 | |
631 | if (!ring->desc_cb[i].dma) |
632 | return; |
633 | |
634 | hnae_buffer_detach(ring, i); |
635 | bops->free_buffer(ring, cb); |
636 | } |
637 | |
638 | /* detach a in-used buffer and replace with a reserved one */ |
639 | static inline void hnae_replace_buffer(struct hnae_ring *ring, int i, |
640 | struct hnae_desc_cb *res_cb) |
641 | { |
642 | struct hnae_buf_ops *bops = ring->q->handle->bops; |
643 | |
644 | bops->unmap_buffer(ring, &ring->desc_cb[i]); |
645 | ring->desc_cb[i] = *res_cb; |
646 | ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); |
647 | ring->desc[i].rx.ipoff_bnum_pid_flag = 0; |
648 | } |
649 | |
650 | static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i) |
651 | { |
652 | ring->desc_cb[i].reuse_flag = 0; |
653 | ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma |
654 | + ring->desc_cb[i].page_offset); |
655 | ring->desc[i].rx.ipoff_bnum_pid_flag = 0; |
656 | } |
657 | |
658 | /* when reinit buffer size, we should reinit buffer description */ |
659 | static inline void hnae_reinit_all_ring_desc(struct hnae_handle *h) |
660 | { |
661 | int i, j; |
662 | struct hnae_ring *ring; |
663 | |
664 | for (i = 0; i < h->q_num; i++) { |
665 | ring = &h->qs[i]->rx_ring; |
666 | for (j = 0; j < ring->desc_num; j++) |
667 | ring->desc[j].addr = cpu_to_le64(ring->desc_cb[j].dma); |
668 | } |
669 | |
670 | wmb(); /* commit all data before submit */ |
671 | } |
672 | |
673 | /* when reinit buffer size, we should reinit page offset */ |
674 | static inline void hnae_reinit_all_ring_page_off(struct hnae_handle *h) |
675 | { |
676 | int i, j; |
677 | struct hnae_ring *ring; |
678 | |
679 | for (i = 0; i < h->q_num; i++) { |
680 | ring = &h->qs[i]->rx_ring; |
681 | for (j = 0; j < ring->desc_num; j++) { |
682 | ring->desc_cb[j].page_offset = 0; |
683 | if (ring->desc[j].addr != |
684 | cpu_to_le64(ring->desc_cb[j].dma)) |
685 | ring->desc[j].addr = |
686 | cpu_to_le64(ring->desc_cb[j].dma); |
687 | } |
688 | } |
689 | |
690 | wmb(); /* commit all data before submit */ |
691 | } |
692 | |
693 | #define hnae_set_field(origin, mask, shift, val) \ |
694 | do { \ |
695 | (origin) &= (~(mask)); \ |
696 | (origin) |= ((val) << (shift)) & (mask); \ |
697 | } while (0) |
698 | |
699 | #define hnae_set_bit(origin, shift, val) \ |
700 | hnae_set_field((origin), (0x1 << (shift)), (shift), (val)) |
701 | |
702 | #define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift)) |
703 | |
704 | #define hnae_get_bit(origin, shift) \ |
705 | hnae_get_field((origin), (0x1 << (shift)), (shift)) |
706 | |
707 | #endif |
708 | |