1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. |
4 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. |
5 | */ |
6 | #ifndef _VNIC_RQ_H_ |
7 | #define _VNIC_RQ_H_ |
8 | |
9 | #include <linux/pci.h> |
10 | #include "vnic_dev.h" |
11 | #include "vnic_cq.h" |
12 | |
13 | /* |
14 | * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth |
15 | * Driver) when both are built with CONFIG options =y |
16 | */ |
17 | #define vnic_rq_desc_avail fnic_rq_desc_avail |
18 | #define vnic_rq_desc_used fnic_rq_desc_used |
19 | #define vnic_rq_next_desc fnic_rq_next_desc |
20 | #define vnic_rq_next_index fnic_rq_next_index |
21 | #define vnic_rq_next_buf_index fnic_rq_next_buf_index |
22 | #define vnic_rq_post fnic_rq_post |
23 | #define vnic_rq_posting_soon fnic_rq_posting_soon |
24 | #define vnic_rq_return_descs fnic_rq_return_descs |
25 | #define vnic_rq_service fnic_rq_service |
26 | #define vnic_rq_fill fnic_rq_fill |
27 | #define vnic_rq_free fnic_rq_free |
28 | #define vnic_rq_alloc fnic_rq_alloc |
29 | #define vnic_rq_init fnic_rq_init |
30 | #define vnic_rq_error_status fnic_rq_error_status |
31 | #define vnic_rq_enable fnic_rq_enable |
32 | #define vnic_rq_disable fnic_rq_disable |
33 | #define vnic_rq_clean fnic_rq_clean |
34 | |
35 | /* Receive queue control */ |
36 | struct vnic_rq_ctrl { |
37 | u64 ring_base; /* 0x00 */ |
38 | u32 ring_size; /* 0x08 */ |
39 | u32 pad0; |
40 | u32 posted_index; /* 0x10 */ |
41 | u32 pad1; |
42 | u32 cq_index; /* 0x18 */ |
43 | u32 pad2; |
44 | u32 enable; /* 0x20 */ |
45 | u32 pad3; |
46 | u32 running; /* 0x28 */ |
47 | u32 pad4; |
48 | u32 fetch_index; /* 0x30 */ |
49 | u32 pad5; |
50 | u32 error_interrupt_enable; /* 0x38 */ |
51 | u32 pad6; |
52 | u32 error_interrupt_offset; /* 0x40 */ |
53 | u32 pad7; |
54 | u32 error_status; /* 0x48 */ |
55 | u32 pad8; |
56 | u32 dropped_packet_count; /* 0x50 */ |
57 | u32 pad9; |
58 | u32 dropped_packet_count_rc; /* 0x58 */ |
59 | u32 pad10; |
60 | }; |
61 | |
62 | /* Break the vnic_rq_buf allocations into blocks of 64 entries */ |
63 | #define VNIC_RQ_BUF_BLK_ENTRIES 64 |
64 | #define VNIC_RQ_BUF_BLK_SZ \ |
65 | (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf)) |
66 | #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ |
67 | DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES) |
68 | #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) |
69 | |
70 | struct vnic_rq_buf { |
71 | struct vnic_rq_buf *next; |
72 | dma_addr_t dma_addr; |
73 | void *os_buf; |
74 | unsigned int os_buf_index; |
75 | unsigned int len; |
76 | unsigned int index; |
77 | void *desc; |
78 | }; |
79 | |
80 | struct vnic_rq { |
81 | unsigned int index; |
82 | struct vnic_dev *vdev; |
83 | struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ |
84 | struct vnic_dev_ring ring; |
85 | struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX]; |
86 | struct vnic_rq_buf *to_use; |
87 | struct vnic_rq_buf *to_clean; |
88 | void *os_buf_head; |
89 | unsigned int buf_index; |
90 | unsigned int pkts_outstanding; |
91 | }; |
92 | |
93 | static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) |
94 | { |
95 | /* how many does SW own? */ |
96 | return rq->ring.desc_avail; |
97 | } |
98 | |
99 | static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) |
100 | { |
101 | /* how many does HW own? */ |
102 | return rq->ring.desc_count - rq->ring.desc_avail - 1; |
103 | } |
104 | |
105 | static inline void *vnic_rq_next_desc(struct vnic_rq *rq) |
106 | { |
107 | return rq->to_use->desc; |
108 | } |
109 | |
110 | static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) |
111 | { |
112 | return rq->to_use->index; |
113 | } |
114 | |
115 | static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) |
116 | { |
117 | return rq->buf_index++; |
118 | } |
119 | |
120 | static inline void vnic_rq_post(struct vnic_rq *rq, |
121 | void *os_buf, unsigned int os_buf_index, |
122 | dma_addr_t dma_addr, unsigned int len) |
123 | { |
124 | struct vnic_rq_buf *buf = rq->to_use; |
125 | |
126 | buf->os_buf = os_buf; |
127 | buf->os_buf_index = os_buf_index; |
128 | buf->dma_addr = dma_addr; |
129 | buf->len = len; |
130 | |
131 | buf = buf->next; |
132 | rq->to_use = buf; |
133 | rq->ring.desc_avail--; |
134 | |
135 | /* Move the posted_index every nth descriptor |
136 | */ |
137 | |
138 | #ifndef VNIC_RQ_RETURN_RATE |
139 | #define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */ |
140 | #endif |
141 | |
142 | if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { |
143 | /* Adding write memory barrier prevents compiler and/or CPU |
144 | * reordering, thus avoiding descriptor posting before |
145 | * descriptor is initialized. Otherwise, hardware can read |
146 | * stale descriptor fields. |
147 | */ |
148 | wmb(); |
149 | iowrite32(buf->index, &rq->ctrl->posted_index); |
150 | } |
151 | } |
152 | |
153 | static inline int vnic_rq_posting_soon(struct vnic_rq *rq) |
154 | { |
155 | return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0; |
156 | } |
157 | |
158 | static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) |
159 | { |
160 | rq->ring.desc_avail += count; |
161 | } |
162 | |
163 | enum desc_return_options { |
164 | VNIC_RQ_RETURN_DESC, |
165 | VNIC_RQ_DEFER_RETURN_DESC, |
166 | }; |
167 | |
168 | static inline void vnic_rq_service(struct vnic_rq *rq, |
169 | struct cq_desc *cq_desc, u16 completed_index, |
170 | int desc_return, void (*buf_service)(struct vnic_rq *rq, |
171 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, |
172 | int skipped, void *opaque), void *opaque) |
173 | { |
174 | struct vnic_rq_buf *buf; |
175 | int skipped; |
176 | |
177 | buf = rq->to_clean; |
178 | while (1) { |
179 | |
180 | skipped = (buf->index != completed_index); |
181 | |
182 | (*buf_service)(rq, cq_desc, buf, skipped, opaque); |
183 | |
184 | if (desc_return == VNIC_RQ_RETURN_DESC) |
185 | rq->ring.desc_avail++; |
186 | |
187 | rq->to_clean = buf->next; |
188 | |
189 | if (!skipped) |
190 | break; |
191 | |
192 | buf = rq->to_clean; |
193 | } |
194 | } |
195 | |
196 | static inline int vnic_rq_fill(struct vnic_rq *rq, |
197 | int (*buf_fill)(struct vnic_rq *rq)) |
198 | { |
199 | int err; |
200 | |
201 | while (vnic_rq_desc_avail(rq) > 1) { |
202 | |
203 | err = (*buf_fill)(rq); |
204 | if (err) |
205 | return err; |
206 | } |
207 | |
208 | return 0; |
209 | } |
210 | |
211 | void vnic_rq_free(struct vnic_rq *rq); |
212 | int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, |
213 | unsigned int desc_count, unsigned int desc_size); |
214 | void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, |
215 | unsigned int error_interrupt_enable, |
216 | unsigned int error_interrupt_offset); |
217 | unsigned int vnic_rq_error_status(struct vnic_rq *rq); |
218 | void vnic_rq_enable(struct vnic_rq *rq); |
219 | int vnic_rq_disable(struct vnic_rq *rq); |
220 | void vnic_rq_clean(struct vnic_rq *rq, |
221 | void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)); |
222 | |
223 | #endif /* _VNIC_RQ_H_ */ |
224 | |