1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. |
4 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. |
5 | */ |
6 | |
7 | #ifndef _VNIC_WQ_H_ |
8 | #define _VNIC_WQ_H_ |
9 | |
10 | #include <linux/pci.h> |
11 | |
12 | #include "vnic_dev.h" |
13 | #include "vnic_cq.h" |
14 | |
15 | /* Work queue control */ |
16 | struct vnic_wq_ctrl { |
17 | u64 ring_base; /* 0x00 */ |
18 | u32 ring_size; /* 0x08 */ |
19 | u32 pad0; |
20 | u32 posted_index; /* 0x10 */ |
21 | u32 pad1; |
22 | u32 cq_index; /* 0x18 */ |
23 | u32 pad2; |
24 | u32 enable; /* 0x20 */ |
25 | u32 pad3; |
26 | u32 running; /* 0x28 */ |
27 | u32 pad4; |
28 | u32 fetch_index; /* 0x30 */ |
29 | u32 pad5; |
30 | u32 dca_value; /* 0x38 */ |
31 | u32 pad6; |
32 | u32 error_interrupt_enable; /* 0x40 */ |
33 | u32 pad7; |
34 | u32 error_interrupt_offset; /* 0x48 */ |
35 | u32 pad8; |
36 | u32 error_status; /* 0x50 */ |
37 | u32 pad9; |
38 | }; |
39 | |
40 | struct vnic_wq_buf { |
41 | struct vnic_wq_buf *next; |
42 | dma_addr_t dma_addr; |
43 | void *os_buf; |
44 | unsigned int len; |
45 | unsigned int index; |
46 | int sop; |
47 | void *desc; |
48 | uint64_t wr_id; /* Cookie */ |
49 | uint8_t cq_entry; /* Gets completion event from hw */ |
50 | uint8_t desc_skip_cnt; /* Num descs to occupy */ |
51 | uint8_t compressed_send; /* Both hdr and payload in one desc */ |
52 | struct vnic_wq_buf *prev; |
53 | }; |
54 | |
55 | /* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ |
56 | #define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32 |
57 | #define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64 |
58 | #define VNIC_WQ_BUF_BLK_ENTRIES(entries) \ |
59 | ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \ |
60 | VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)) |
61 | #define VNIC_WQ_BUF_BLK_SZ(entries) \ |
62 | (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf)) |
63 | #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ |
64 | DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries)) |
65 | #define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) |
66 | |
67 | struct vnic_wq { |
68 | unsigned int index; |
69 | struct vnic_dev *vdev; |
70 | struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ |
71 | struct vnic_dev_ring ring; |
72 | struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; |
73 | struct vnic_wq_buf *to_use; |
74 | struct vnic_wq_buf *to_clean; |
75 | unsigned int pkts_outstanding; |
76 | }; |
77 | |
78 | struct devcmd2_controller { |
79 | struct vnic_wq_ctrl __iomem *wq_ctrl; |
80 | struct vnic_devcmd2 *cmd_ring; |
81 | struct devcmd2_result *result; |
82 | u16 next_result; |
83 | u16 result_size; |
84 | int color; |
85 | struct vnic_dev_ring results_ring; |
86 | struct vnic_wq wq; |
87 | u32 posted; |
88 | }; |
89 | |
90 | static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) |
91 | { |
92 | /* how many does SW own? */ |
93 | return wq->ring.desc_avail; |
94 | } |
95 | |
96 | static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) |
97 | { |
98 | /* how many does HW own? */ |
99 | return wq->ring.desc_count - wq->ring.desc_avail - 1; |
100 | } |
101 | |
102 | static inline void *vnic_wq_next_desc(struct vnic_wq *wq) |
103 | { |
104 | return wq->to_use->desc; |
105 | } |
106 | |
107 | static inline void vnic_wq_doorbell(struct vnic_wq *wq) |
108 | { |
109 | /* Adding write memory barrier prevents compiler and/or CPU |
110 | * reordering, thus avoiding descriptor posting before |
111 | * descriptor is initialized. Otherwise, hardware can read |
112 | * stale descriptor fields. |
113 | */ |
114 | wmb(); |
115 | iowrite32(wq->to_use->index, &wq->ctrl->posted_index); |
116 | } |
117 | |
118 | static inline void vnic_wq_post(struct vnic_wq *wq, |
119 | void *os_buf, dma_addr_t dma_addr, |
120 | unsigned int len, int sop, int eop, |
121 | uint8_t desc_skip_cnt, uint8_t cq_entry, |
122 | uint8_t compressed_send, uint64_t wrid) |
123 | { |
124 | struct vnic_wq_buf *buf = wq->to_use; |
125 | |
126 | buf->sop = sop; |
127 | buf->cq_entry = cq_entry; |
128 | buf->compressed_send = compressed_send; |
129 | buf->desc_skip_cnt = desc_skip_cnt; |
130 | buf->os_buf = eop ? os_buf : NULL; |
131 | buf->dma_addr = dma_addr; |
132 | buf->len = len; |
133 | buf->wr_id = wrid; |
134 | |
135 | buf = buf->next; |
136 | wq->to_use = buf; |
137 | |
138 | wq->ring.desc_avail -= desc_skip_cnt; |
139 | } |
140 | |
141 | static inline void vnic_wq_service(struct vnic_wq *wq, |
142 | struct cq_desc *cq_desc, u16 completed_index, |
143 | void (*buf_service)(struct vnic_wq *wq, |
144 | struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), |
145 | void *opaque) |
146 | { |
147 | struct vnic_wq_buf *buf; |
148 | |
149 | buf = wq->to_clean; |
150 | while (1) { |
151 | |
152 | (*buf_service)(wq, cq_desc, buf, opaque); |
153 | |
154 | wq->ring.desc_avail++; |
155 | |
156 | wq->to_clean = buf->next; |
157 | |
158 | if (buf->index == completed_index) |
159 | break; |
160 | |
161 | buf = wq->to_clean; |
162 | } |
163 | } |
164 | |
165 | void vnic_wq_free(struct vnic_wq *wq); |
166 | int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, |
167 | unsigned int desc_count, unsigned int desc_size); |
168 | void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, |
169 | unsigned int error_interrupt_enable, |
170 | unsigned int error_interrupt_offset); |
171 | unsigned int vnic_wq_error_status(struct vnic_wq *wq); |
172 | void vnic_wq_enable(struct vnic_wq *wq); |
173 | int vnic_wq_disable(struct vnic_wq *wq); |
174 | void vnic_wq_clean(struct vnic_wq *wq, |
175 | void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); |
176 | int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, |
177 | unsigned int desc_count, unsigned int desc_size); |
178 | void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, |
179 | unsigned int fetch_index, unsigned int posted_index, |
180 | unsigned int error_interrupt_enable, |
181 | unsigned int error_interrupt_offset); |
182 | |
183 | #endif /* _VNIC_WQ_H_ */ |
184 | |