1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2/* Copyright (C) 2019 Netronome Systems, Inc. */
3
4#ifndef _NFP_NET_DP_
5#define _NFP_NET_DP_
6
7#include "nfp_net.h"
8
9static inline dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
10{
11 return dma_map_single_attrs(dev: dp->dev, ptr: frag + NFP_NET_RX_BUF_HEADROOM,
12 size: dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
13 dir: dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
14}
15
16static inline void
17nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
18{
19 dma_sync_single_for_device(dev: dp->dev, addr: dma_addr,
20 size: dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
21 dir: dp->rx_dma_dir);
22}
23
24static inline void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp,
25 dma_addr_t dma_addr)
26{
27 dma_unmap_single_attrs(dev: dp->dev, addr: dma_addr,
28 size: dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
29 dir: dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
30}
31
32static inline void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp,
33 dma_addr_t dma_addr,
34 unsigned int len)
35{
36 dma_sync_single_for_cpu(dev: dp->dev, addr: dma_addr - NFP_NET_RX_BUF_HEADROOM,
37 size: len, dir: dp->rx_dma_dir);
38}
39
40/**
41 * nfp_net_tx_full() - check if the TX ring is full
42 * @tx_ring: TX ring to check
43 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
44 *
45 * This function checks, based on the *host copy* of read/write
46 * pointer if a given TX ring is full. The real TX queue may have
47 * some newly made available slots.
48 *
49 * Return: True if the ring is full.
50 */
51static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
52{
53 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
54}
55
56static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
57{
58 wmb(); /* drain writebuffer */
59 nfp_qcp_wr_ptr_add(q: tx_ring->qcp_q, val: tx_ring->wr_ptr_add);
60 tx_ring->wr_ptr_add = 0;
61}
62
63static inline u32
64nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp)
65{
66 if (tx_ring->txrwb)
67 return *tx_ring->txrwb;
68 return nfp_qcp_rd_ptr_read(q: tx_ring->qcp_q);
69}
70
71static inline void nfp_net_free_frag(void *frag, bool xdp)
72{
73 if (!xdp)
74 skb_free_frag(addr: frag);
75 else
76 __free_page(virt_to_page(frag));
77}
78
79/**
80 * nfp_net_irq_unmask() - Unmask automasked interrupt
81 * @nn: NFP Network structure
82 * @entry_nr: MSI-X table entry
83 *
84 * Clear the ICR for the IRQ entry.
85 */
86static inline void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
87{
88 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
89 nn_pci_flush(nn);
90}
91
92struct seq_file;
93
94/* Common */
95void
96nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
97 struct nfp_net_rx_ring *rx_ring, unsigned int idx);
98void
99nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
100 struct nfp_net_tx_ring *tx_ring, unsigned int idx);
101void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx);
102
103void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr);
104int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
105int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
106void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
107void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
108void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
109bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
110 const struct nfp_meta_parsed *meta);
111
112enum nfp_nfd_version {
113 NFP_NFD_VER_NFD3,
114 NFP_NFD_VER_NFDK,
115};
116
117/**
118 * struct nfp_dp_ops - Hooks to wrap different implementation of different dp
119 * @version: Indicate dp type
120 * @tx_min_desc_per_pkt: Minimal TX descs needed for each packet
121 * @cap_mask: Mask of supported features
122 * @dma_mask: DMA addressing capability
123 * @poll: Napi poll for normal rx/tx
124 * @xsk_poll: Napi poll when xsk is enabled
125 * @ctrl_poll: Tasklet poll for ctrl rx/tx
126 * @xmit: Xmit for normal path
127 * @ctrl_tx_one: Xmit for ctrl path
128 * @rx_ring_fill_freelist: Give buffers from the ring to FW
129 * @tx_ring_alloc: Allocate resource for a TX ring
130 * @tx_ring_reset: Free any untransmitted buffers and reset pointers
131 * @tx_ring_free: Free resources allocated to a TX ring
132 * @tx_ring_bufs_alloc: Allocate resource for each TX buffer
133 * @tx_ring_bufs_free: Free resources allocated to each TX buffer
134 * @print_tx_descs: Show TX ring's info for debug purpose
135 */
136struct nfp_dp_ops {
137 enum nfp_nfd_version version;
138 unsigned int tx_min_desc_per_pkt;
139 u32 cap_mask;
140 u64 dma_mask;
141
142 int (*poll)(struct napi_struct *napi, int budget);
143 int (*xsk_poll)(struct napi_struct *napi, int budget);
144 void (*ctrl_poll)(struct tasklet_struct *t);
145 netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *netdev);
146 bool (*ctrl_tx_one)(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
147 struct sk_buff *skb, bool old);
148 void (*rx_ring_fill_freelist)(struct nfp_net_dp *dp,
149 struct nfp_net_rx_ring *rx_ring);
150 int (*tx_ring_alloc)(struct nfp_net_dp *dp,
151 struct nfp_net_tx_ring *tx_ring);
152 void (*tx_ring_reset)(struct nfp_net_dp *dp,
153 struct nfp_net_tx_ring *tx_ring);
154 void (*tx_ring_free)(struct nfp_net_tx_ring *tx_ring);
155 int (*tx_ring_bufs_alloc)(struct nfp_net_dp *dp,
156 struct nfp_net_tx_ring *tx_ring);
157 void (*tx_ring_bufs_free)(struct nfp_net_dp *dp,
158 struct nfp_net_tx_ring *tx_ring);
159
160 void (*print_tx_descs)(struct seq_file *file,
161 struct nfp_net_r_vector *r_vec,
162 struct nfp_net_tx_ring *tx_ring,
163 u32 d_rd_p, u32 d_wr_p);
164};
165
166static inline void
167nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
168{
169 return dp->ops->tx_ring_reset(dp, tx_ring);
170}
171
172static inline void
173nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
174 struct nfp_net_rx_ring *rx_ring)
175{
176 dp->ops->rx_ring_fill_freelist(dp, rx_ring);
177}
178
179static inline int
180nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
181{
182 return dp->ops->tx_ring_alloc(dp, tx_ring);
183}
184
185static inline void
186nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
187{
188 dp->ops->tx_ring_free(tx_ring);
189}
190
191static inline int
192nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
193 struct nfp_net_tx_ring *tx_ring)
194{
195 return dp->ops->tx_ring_bufs_alloc(dp, tx_ring);
196}
197
198static inline void
199nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
200 struct nfp_net_tx_ring *tx_ring)
201{
202 dp->ops->tx_ring_bufs_free(dp, tx_ring);
203}
204
205static inline void
206nfp_net_debugfs_print_tx_descs(struct seq_file *file, struct nfp_net_dp *dp,
207 struct nfp_net_r_vector *r_vec,
208 struct nfp_net_tx_ring *tx_ring,
209 u32 d_rd_p, u32 d_wr_p)
210{
211 dp->ops->print_tx_descs(file, r_vec, tx_ring, d_rd_p, d_wr_p);
212}
213
214extern const struct nfp_dp_ops nfp_nfd3_ops;
215extern const struct nfp_dp_ops nfp_nfdk_ops;
216
217netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev);
218
219#endif /* _NFP_NET_DP_ */
220

source code of linux/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h