1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | /* |
3 | * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. |
4 | */ |
5 | |
6 | #include "ena_com.h" |
7 | |
8 | /*****************************************************************************/ |
9 | /*****************************************************************************/ |
10 | |
11 | /* Timeout in micro-sec */ |
12 | #define ADMIN_CMD_TIMEOUT_US (3000000) |
13 | |
14 | #define ENA_ASYNC_QUEUE_DEPTH 16 |
15 | #define ENA_ADMIN_QUEUE_DEPTH 32 |
16 | |
17 | |
18 | #define ENA_CTRL_MAJOR 0 |
19 | #define ENA_CTRL_MINOR 0 |
20 | #define ENA_CTRL_SUB_MINOR 1 |
21 | |
22 | #define MIN_ENA_CTRL_VER \ |
23 | (((ENA_CTRL_MAJOR) << \ |
24 | (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ |
25 | ((ENA_CTRL_MINOR) << \ |
26 | (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ |
27 | (ENA_CTRL_SUB_MINOR)) |
28 | |
29 | #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) |
30 | #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) |
31 | |
32 | #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF |
33 | |
34 | #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4 |
35 | |
36 | #define ENA_REGS_ADMIN_INTR_MASK 1 |
37 | |
38 | #define ENA_MAX_BACKOFF_DELAY_EXP 16U |
39 | |
40 | #define ENA_MIN_ADMIN_POLL_US 100 |
41 | |
42 | #define ENA_MAX_ADMIN_POLL_US 5000 |
43 | |
44 | /*****************************************************************************/ |
45 | /*****************************************************************************/ |
46 | /*****************************************************************************/ |
47 | |
48 | enum ena_cmd_status { |
49 | ENA_CMD_SUBMITTED, |
50 | ENA_CMD_COMPLETED, |
51 | /* Abort - canceled by the driver */ |
52 | ENA_CMD_ABORTED, |
53 | }; |
54 | |
55 | struct ena_comp_ctx { |
56 | struct completion wait_event; |
57 | struct ena_admin_acq_entry *user_cqe; |
58 | u32 comp_size; |
59 | enum ena_cmd_status status; |
60 | /* status from the device */ |
61 | u8 comp_status; |
62 | u8 cmd_opcode; |
63 | bool occupied; |
64 | }; |
65 | |
66 | struct ena_com_stats_ctx { |
67 | struct ena_admin_aq_get_stats_cmd get_cmd; |
68 | struct ena_admin_acq_get_stats_resp get_resp; |
69 | }; |
70 | |
71 | static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, |
72 | struct ena_common_mem_addr *ena_addr, |
73 | dma_addr_t addr) |
74 | { |
75 | if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { |
76 | netdev_err(dev: ena_dev->net_device, |
77 | format: "DMA address has more bits that the device supports\n" ); |
78 | return -EINVAL; |
79 | } |
80 | |
81 | ena_addr->mem_addr_low = lower_32_bits(addr); |
82 | ena_addr->mem_addr_high = (u16)upper_32_bits(addr); |
83 | |
84 | return 0; |
85 | } |
86 | |
87 | static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue) |
88 | { |
89 | struct ena_com_dev *ena_dev = admin_queue->ena_dev; |
90 | struct ena_com_admin_sq *sq = &admin_queue->sq; |
91 | u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); |
92 | |
93 | sq->entries = dma_alloc_coherent(dev: admin_queue->q_dmadev, size, dma_handle: &sq->dma_addr, GFP_KERNEL); |
94 | |
95 | if (!sq->entries) { |
96 | netdev_err(dev: ena_dev->net_device, format: "Memory allocation failed\n" ); |
97 | return -ENOMEM; |
98 | } |
99 | |
100 | sq->head = 0; |
101 | sq->tail = 0; |
102 | sq->phase = 1; |
103 | |
104 | sq->db_addr = NULL; |
105 | |
106 | return 0; |
107 | } |
108 | |
109 | static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue) |
110 | { |
111 | struct ena_com_dev *ena_dev = admin_queue->ena_dev; |
112 | struct ena_com_admin_cq *cq = &admin_queue->cq; |
113 | u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); |
114 | |
115 | cq->entries = dma_alloc_coherent(dev: admin_queue->q_dmadev, size, dma_handle: &cq->dma_addr, GFP_KERNEL); |
116 | |
117 | if (!cq->entries) { |
118 | netdev_err(dev: ena_dev->net_device, format: "Memory allocation failed\n" ); |
119 | return -ENOMEM; |
120 | } |
121 | |
122 | cq->head = 0; |
123 | cq->phase = 1; |
124 | |
125 | return 0; |
126 | } |
127 | |
128 | static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, |
129 | struct ena_aenq_handlers *aenq_handlers) |
130 | { |
131 | struct ena_com_aenq *aenq = &ena_dev->aenq; |
132 | u32 addr_low, addr_high, aenq_caps; |
133 | u16 size; |
134 | |
135 | ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; |
136 | size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); |
137 | aenq->entries = dma_alloc_coherent(dev: ena_dev->dmadev, size, dma_handle: &aenq->dma_addr, GFP_KERNEL); |
138 | |
139 | if (!aenq->entries) { |
140 | netdev_err(dev: ena_dev->net_device, format: "Memory allocation failed\n" ); |
141 | return -ENOMEM; |
142 | } |
143 | |
144 | aenq->head = aenq->q_depth; |
145 | aenq->phase = 1; |
146 | |
147 | addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); |
148 | addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); |
149 | |
150 | writel(val: addr_low, addr: ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); |
151 | writel(val: addr_high, addr: ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); |
152 | |
153 | aenq_caps = 0; |
154 | aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; |
155 | aenq_caps |= |
156 | (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & |
157 | ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; |
158 | writel(val: aenq_caps, addr: ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); |
159 | |
160 | if (unlikely(!aenq_handlers)) { |
161 | netdev_err(dev: ena_dev->net_device, format: "AENQ handlers pointer is NULL\n" ); |
162 | return -EINVAL; |
163 | } |
164 | |
165 | aenq->aenq_handlers = aenq_handlers; |
166 | |
167 | return 0; |
168 | } |
169 | |
170 | static void comp_ctxt_release(struct ena_com_admin_queue *queue, |
171 | struct ena_comp_ctx *comp_ctx) |
172 | { |
173 | comp_ctx->occupied = false; |
174 | atomic_dec(v: &queue->outstanding_cmds); |
175 | } |
176 | |
177 | static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue, |
178 | u16 command_id, bool capture) |
179 | { |
180 | if (unlikely(command_id >= admin_queue->q_depth)) { |
181 | netdev_err(dev: admin_queue->ena_dev->net_device, |
182 | format: "Command id is larger than the queue size. cmd_id: %u queue size %d\n" , |
183 | command_id, admin_queue->q_depth); |
184 | return NULL; |
185 | } |
186 | |
187 | if (unlikely(!admin_queue->comp_ctx)) { |
188 | netdev_err(dev: admin_queue->ena_dev->net_device, format: "Completion context is NULL\n" ); |
189 | return NULL; |
190 | } |
191 | |
192 | if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) { |
193 | netdev_err(dev: admin_queue->ena_dev->net_device, format: "Completion context is occupied\n" ); |
194 | return NULL; |
195 | } |
196 | |
197 | if (capture) { |
198 | atomic_inc(v: &admin_queue->outstanding_cmds); |
199 | admin_queue->comp_ctx[command_id].occupied = true; |
200 | } |
201 | |
202 | return &admin_queue->comp_ctx[command_id]; |
203 | } |
204 | |
205 | static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, |
206 | struct ena_admin_aq_entry *cmd, |
207 | size_t cmd_size_in_bytes, |
208 | struct ena_admin_acq_entry *comp, |
209 | size_t comp_size_in_bytes) |
210 | { |
211 | struct ena_comp_ctx *comp_ctx; |
212 | u16 tail_masked, cmd_id; |
213 | u16 queue_size_mask; |
214 | u16 cnt; |
215 | |
216 | queue_size_mask = admin_queue->q_depth - 1; |
217 | |
218 | tail_masked = admin_queue->sq.tail & queue_size_mask; |
219 | |
220 | /* In case of queue FULL */ |
221 | cnt = (u16)atomic_read(v: &admin_queue->outstanding_cmds); |
222 | if (cnt >= admin_queue->q_depth) { |
223 | netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n" ); |
224 | admin_queue->stats.out_of_space++; |
225 | return ERR_PTR(error: -ENOSPC); |
226 | } |
227 | |
228 | cmd_id = admin_queue->curr_cmd_id; |
229 | |
230 | cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & |
231 | ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; |
232 | |
233 | cmd->aq_common_descriptor.command_id |= cmd_id & |
234 | ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; |
235 | |
236 | comp_ctx = get_comp_ctxt(admin_queue, command_id: cmd_id, capture: true); |
237 | if (unlikely(!comp_ctx)) |
238 | return ERR_PTR(error: -EINVAL); |
239 | |
240 | comp_ctx->status = ENA_CMD_SUBMITTED; |
241 | comp_ctx->comp_size = (u32)comp_size_in_bytes; |
242 | comp_ctx->user_cqe = comp; |
243 | comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; |
244 | |
245 | reinit_completion(x: &comp_ctx->wait_event); |
246 | |
247 | memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); |
248 | |
249 | admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & |
250 | queue_size_mask; |
251 | |
252 | admin_queue->sq.tail++; |
253 | admin_queue->stats.submitted_cmd++; |
254 | |
255 | if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) |
256 | admin_queue->sq.phase = !admin_queue->sq.phase; |
257 | |
258 | writel(val: admin_queue->sq.tail, addr: admin_queue->sq.db_addr); |
259 | |
260 | return comp_ctx; |
261 | } |
262 | |
263 | static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue) |
264 | { |
265 | struct ena_com_dev *ena_dev = admin_queue->ena_dev; |
266 | size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx); |
267 | struct ena_comp_ctx *comp_ctx; |
268 | u16 i; |
269 | |
270 | admin_queue->comp_ctx = devm_kzalloc(dev: admin_queue->q_dmadev, size, GFP_KERNEL); |
271 | if (unlikely(!admin_queue->comp_ctx)) { |
272 | netdev_err(dev: ena_dev->net_device, format: "Memory allocation failed\n" ); |
273 | return -ENOMEM; |
274 | } |
275 | |
276 | for (i = 0; i < admin_queue->q_depth; i++) { |
277 | comp_ctx = get_comp_ctxt(admin_queue, command_id: i, capture: false); |
278 | if (comp_ctx) |
279 | init_completion(x: &comp_ctx->wait_event); |
280 | } |
281 | |
282 | return 0; |
283 | } |
284 | |
285 | static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, |
286 | struct ena_admin_aq_entry *cmd, |
287 | size_t cmd_size_in_bytes, |
288 | struct ena_admin_acq_entry *comp, |
289 | size_t comp_size_in_bytes) |
290 | { |
291 | unsigned long flags = 0; |
292 | struct ena_comp_ctx *comp_ctx; |
293 | |
294 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
295 | if (unlikely(!admin_queue->running_state)) { |
296 | spin_unlock_irqrestore(lock: &admin_queue->q_lock, flags); |
297 | return ERR_PTR(error: -ENODEV); |
298 | } |
299 | comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd, |
300 | cmd_size_in_bytes, |
301 | comp, |
302 | comp_size_in_bytes); |
303 | if (IS_ERR(ptr: comp_ctx)) |
304 | admin_queue->running_state = false; |
305 | spin_unlock_irqrestore(lock: &admin_queue->q_lock, flags); |
306 | |
307 | return comp_ctx; |
308 | } |
309 | |
310 | static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, |
311 | struct ena_com_create_io_ctx *ctx, |
312 | struct ena_com_io_sq *io_sq) |
313 | { |
314 | size_t size; |
315 | int dev_node = 0; |
316 | |
317 | memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); |
318 | |
319 | io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; |
320 | io_sq->desc_entry_size = |
321 | (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? |
322 | sizeof(struct ena_eth_io_tx_desc) : |
323 | sizeof(struct ena_eth_io_rx_desc); |
324 | |
325 | size = io_sq->desc_entry_size * io_sq->q_depth; |
326 | |
327 | if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { |
328 | dev_node = dev_to_node(dev: ena_dev->dmadev); |
329 | set_dev_node(dev: ena_dev->dmadev, node: ctx->numa_node); |
330 | io_sq->desc_addr.virt_addr = |
331 | dma_alloc_coherent(dev: ena_dev->dmadev, size, dma_handle: &io_sq->desc_addr.phys_addr, |
332 | GFP_KERNEL); |
333 | set_dev_node(dev: ena_dev->dmadev, node: dev_node); |
334 | if (!io_sq->desc_addr.virt_addr) { |
335 | io_sq->desc_addr.virt_addr = |
336 | dma_alloc_coherent(dev: ena_dev->dmadev, size, |
337 | dma_handle: &io_sq->desc_addr.phys_addr, GFP_KERNEL); |
338 | } |
339 | |
340 | if (!io_sq->desc_addr.virt_addr) { |
341 | netdev_err(dev: ena_dev->net_device, format: "Memory allocation failed\n" ); |
342 | return -ENOMEM; |
343 | } |
344 | } |
345 | |
346 | if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
347 | /* Allocate bounce buffers */ |
348 | io_sq->bounce_buf_ctrl.buffer_size = |
349 | ena_dev->llq_info.desc_list_entry_size; |
350 | io_sq->bounce_buf_ctrl.buffers_num = |
351 | ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; |
352 | io_sq->bounce_buf_ctrl.next_to_use = 0; |
353 | |
354 | size = (size_t)io_sq->bounce_buf_ctrl.buffer_size * |
355 | io_sq->bounce_buf_ctrl.buffers_num; |
356 | |
357 | dev_node = dev_to_node(dev: ena_dev->dmadev); |
358 | set_dev_node(dev: ena_dev->dmadev, node: ctx->numa_node); |
359 | io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(dev: ena_dev->dmadev, size, GFP_KERNEL); |
360 | set_dev_node(dev: ena_dev->dmadev, node: dev_node); |
361 | if (!io_sq->bounce_buf_ctrl.base_buffer) |
362 | io_sq->bounce_buf_ctrl.base_buffer = |
363 | devm_kzalloc(dev: ena_dev->dmadev, size, GFP_KERNEL); |
364 | |
365 | if (!io_sq->bounce_buf_ctrl.base_buffer) { |
366 | netdev_err(dev: ena_dev->net_device, format: "Bounce buffer memory allocation failed\n" ); |
367 | return -ENOMEM; |
368 | } |
369 | |
370 | memcpy(&io_sq->llq_info, &ena_dev->llq_info, |
371 | sizeof(io_sq->llq_info)); |
372 | |
373 | /* Initiate the first bounce buffer */ |
374 | io_sq->llq_buf_ctrl.curr_bounce_buf = |
375 | ena_com_get_next_bounce_buffer(bounce_buf_ctrl: &io_sq->bounce_buf_ctrl); |
376 | memset(io_sq->llq_buf_ctrl.curr_bounce_buf, |
377 | 0x0, io_sq->llq_info.desc_list_entry_size); |
378 | io_sq->llq_buf_ctrl.descs_left_in_line = |
379 | io_sq->llq_info.descs_num_before_header; |
380 | io_sq->disable_meta_caching = |
381 | io_sq->llq_info.disable_meta_caching; |
382 | |
383 | if (io_sq->llq_info.max_entries_in_tx_burst > 0) |
384 | io_sq->entries_in_tx_burst_left = |
385 | io_sq->llq_info.max_entries_in_tx_burst; |
386 | } |
387 | |
388 | io_sq->tail = 0; |
389 | io_sq->next_to_comp = 0; |
390 | io_sq->phase = 1; |
391 | |
392 | return 0; |
393 | } |
394 | |
395 | static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, |
396 | struct ena_com_create_io_ctx *ctx, |
397 | struct ena_com_io_cq *io_cq) |
398 | { |
399 | size_t size; |
400 | int prev_node = 0; |
401 | |
402 | memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr)); |
403 | |
404 | /* Use the basic completion descriptor for Rx */ |
405 | io_cq->cdesc_entry_size_in_bytes = |
406 | (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? |
407 | sizeof(struct ena_eth_io_tx_cdesc) : |
408 | sizeof(struct ena_eth_io_rx_cdesc_base); |
409 | |
410 | size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; |
411 | |
412 | prev_node = dev_to_node(dev: ena_dev->dmadev); |
413 | set_dev_node(dev: ena_dev->dmadev, node: ctx->numa_node); |
414 | io_cq->cdesc_addr.virt_addr = |
415 | dma_alloc_coherent(dev: ena_dev->dmadev, size, dma_handle: &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); |
416 | set_dev_node(dev: ena_dev->dmadev, node: prev_node); |
417 | if (!io_cq->cdesc_addr.virt_addr) { |
418 | io_cq->cdesc_addr.virt_addr = |
419 | dma_alloc_coherent(dev: ena_dev->dmadev, size, dma_handle: &io_cq->cdesc_addr.phys_addr, |
420 | GFP_KERNEL); |
421 | } |
422 | |
423 | if (!io_cq->cdesc_addr.virt_addr) { |
424 | netdev_err(dev: ena_dev->net_device, format: "Memory allocation failed\n" ); |
425 | return -ENOMEM; |
426 | } |
427 | |
428 | io_cq->phase = 1; |
429 | io_cq->head = 0; |
430 | |
431 | return 0; |
432 | } |
433 | |
434 | static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, |
435 | struct ena_admin_acq_entry *cqe) |
436 | { |
437 | struct ena_comp_ctx *comp_ctx; |
438 | u16 cmd_id; |
439 | |
440 | cmd_id = cqe->acq_common_descriptor.command & |
441 | ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; |
442 | |
443 | comp_ctx = get_comp_ctxt(admin_queue, command_id: cmd_id, capture: false); |
444 | if (unlikely(!comp_ctx)) { |
445 | netdev_err(dev: admin_queue->ena_dev->net_device, |
446 | format: "comp_ctx is NULL. Changing the admin queue running state\n" ); |
447 | admin_queue->running_state = false; |
448 | return; |
449 | } |
450 | |
451 | comp_ctx->status = ENA_CMD_COMPLETED; |
452 | comp_ctx->comp_status = cqe->acq_common_descriptor.status; |
453 | |
454 | if (comp_ctx->user_cqe) |
455 | memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); |
456 | |
457 | if (!admin_queue->polling) |
458 | complete(&comp_ctx->wait_event); |
459 | } |
460 | |
461 | static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) |
462 | { |
463 | struct ena_admin_acq_entry *cqe = NULL; |
464 | u16 comp_num = 0; |
465 | u16 head_masked; |
466 | u8 phase; |
467 | |
468 | head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); |
469 | phase = admin_queue->cq.phase; |
470 | |
471 | cqe = &admin_queue->cq.entries[head_masked]; |
472 | |
473 | /* Go over all the completions */ |
474 | while ((READ_ONCE(cqe->acq_common_descriptor.flags) & |
475 | ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { |
476 | /* Do not read the rest of the completion entry before the |
477 | * phase bit was validated |
478 | */ |
479 | dma_rmb(); |
480 | ena_com_handle_single_admin_completion(admin_queue, cqe); |
481 | |
482 | head_masked++; |
483 | comp_num++; |
484 | if (unlikely(head_masked == admin_queue->q_depth)) { |
485 | head_masked = 0; |
486 | phase = !phase; |
487 | } |
488 | |
489 | cqe = &admin_queue->cq.entries[head_masked]; |
490 | } |
491 | |
492 | admin_queue->cq.head += comp_num; |
493 | admin_queue->cq.phase = phase; |
494 | admin_queue->sq.head += comp_num; |
495 | admin_queue->stats.completed_cmd += comp_num; |
496 | } |
497 | |
498 | static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue, |
499 | u8 comp_status) |
500 | { |
501 | if (unlikely(comp_status != 0)) |
502 | netdev_err(dev: admin_queue->ena_dev->net_device, format: "Admin command failed[%u]\n" , |
503 | comp_status); |
504 | |
505 | switch (comp_status) { |
506 | case ENA_ADMIN_SUCCESS: |
507 | return 0; |
508 | case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: |
509 | return -ENOMEM; |
510 | case ENA_ADMIN_UNSUPPORTED_OPCODE: |
511 | return -EOPNOTSUPP; |
512 | case ENA_ADMIN_BAD_OPCODE: |
513 | case ENA_ADMIN_MALFORMED_REQUEST: |
514 | case ENA_ADMIN_ILLEGAL_PARAMETER: |
515 | case ENA_ADMIN_UNKNOWN_ERROR: |
516 | return -EINVAL; |
517 | case ENA_ADMIN_RESOURCE_BUSY: |
518 | return -EAGAIN; |
519 | } |
520 | |
521 | return -EINVAL; |
522 | } |
523 | |
524 | static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) |
525 | { |
526 | exp = min_t(u32, exp, ENA_MAX_BACKOFF_DELAY_EXP); |
527 | delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us); |
528 | delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US); |
529 | usleep_range(min: delay_us, max: 2 * delay_us); |
530 | } |
531 | |
532 | static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, |
533 | struct ena_com_admin_queue *admin_queue) |
534 | { |
535 | unsigned long flags = 0; |
536 | unsigned long timeout; |
537 | int ret; |
538 | u32 exp = 0; |
539 | |
540 | timeout = jiffies + usecs_to_jiffies(u: admin_queue->completion_timeout); |
541 | |
542 | while (1) { |
543 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
544 | ena_com_handle_admin_completion(admin_queue); |
545 | spin_unlock_irqrestore(lock: &admin_queue->q_lock, flags); |
546 | |
547 | if (comp_ctx->status != ENA_CMD_SUBMITTED) |
548 | break; |
549 | |
550 | if (time_is_before_jiffies(timeout)) { |
551 | netdev_err(dev: admin_queue->ena_dev->net_device, |
552 | format: "Wait for completion (polling) timeout\n" ); |
553 | /* ENA didn't have any completion */ |
554 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
555 | admin_queue->stats.no_completion++; |
556 | admin_queue->running_state = false; |
557 | spin_unlock_irqrestore(lock: &admin_queue->q_lock, flags); |
558 | |
559 | ret = -ETIME; |
560 | goto err; |
561 | } |
562 | |
563 | ena_delay_exponential_backoff_us(exp: exp++, |
564 | delay_us: admin_queue->ena_dev->ena_min_poll_delay_us); |
565 | } |
566 | |
567 | if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { |
568 | netdev_err(dev: admin_queue->ena_dev->net_device, format: "Command was aborted\n" ); |
569 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
570 | admin_queue->stats.aborted_cmd++; |
571 | spin_unlock_irqrestore(lock: &admin_queue->q_lock, flags); |
572 | ret = -ENODEV; |
573 | goto err; |
574 | } |
575 | |
576 | WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n" , comp_ctx->status); |
577 | |
578 | ret = ena_com_comp_status_to_errno(admin_queue, comp_status: comp_ctx->comp_status); |
579 | err: |
580 | comp_ctxt_release(queue: admin_queue, comp_ctx); |
581 | return ret; |
582 | } |
583 | |
584 | /* |
585 | * Set the LLQ configurations of the firmware |
586 | * |
587 | * The driver provides only the enabled feature values to the device, |
588 | * which in turn, checks if they are supported. |
589 | */ |
590 | static int ena_com_set_llq(struct ena_com_dev *ena_dev) |
591 | { |
592 | struct ena_com_admin_queue *admin_queue; |
593 | struct ena_admin_set_feat_cmd cmd; |
594 | struct ena_admin_set_feat_resp resp; |
595 | struct ena_com_llq_info *llq_info = &ena_dev->llq_info; |
596 | int ret; |
597 | |
598 | memset(&cmd, 0x0, sizeof(cmd)); |
599 | admin_queue = &ena_dev->admin_queue; |
600 | |
601 | cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; |
602 | cmd.feat_common.feature_id = ENA_ADMIN_LLQ; |
603 | |
604 | cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; |
605 | cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; |
606 | cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; |
607 | cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; |
608 | |
609 | cmd.u.llq.accel_mode.u.set.enabled_flags = |
610 | BIT(ENA_ADMIN_DISABLE_META_CACHING) | |
611 | BIT(ENA_ADMIN_LIMIT_TX_BURST); |
612 | |
613 | ret = ena_com_execute_admin_command(admin_queue, |
614 | cmd: (struct ena_admin_aq_entry *)&cmd, |
615 | cmd_size: sizeof(cmd), |
616 | cmd_comp: (struct ena_admin_acq_entry *)&resp, |
617 | cmd_comp_size: sizeof(resp)); |
618 | |
619 | if (unlikely(ret)) |
620 | netdev_err(dev: ena_dev->net_device, format: "Failed to set LLQ configurations: %d\n" , ret); |
621 | |
622 | return ret; |
623 | } |
624 | |
625 | static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, |
626 | struct ena_admin_feature_llq_desc *llq_features, |
627 | struct ena_llq_configurations *llq_default_cfg) |
628 | { |
629 | struct ena_com_llq_info *llq_info = &ena_dev->llq_info; |
630 | struct ena_admin_accel_mode_get llq_accel_mode_get; |
631 | u16 supported_feat; |
632 | int rc; |
633 | |
634 | memset(llq_info, 0, sizeof(*llq_info)); |
635 | |
636 | supported_feat = llq_features->header_location_ctrl_supported; |
637 | |
638 | if (likely(supported_feat & llq_default_cfg->llq_header_location)) { |
639 | llq_info->header_location_ctrl = |
640 | llq_default_cfg->llq_header_location; |
641 | } else { |
642 | netdev_err(dev: ena_dev->net_device, |
643 | format: "Invalid header location control, supported: 0x%x\n" , supported_feat); |
644 | return -EINVAL; |
645 | } |
646 | |
647 | if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { |
648 | supported_feat = llq_features->descriptors_stride_ctrl_supported; |
649 | if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { |
650 | llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; |
651 | } else { |
652 | if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) { |
653 | llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; |
654 | } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { |
655 | llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; |
656 | } else { |
657 | netdev_err(dev: ena_dev->net_device, |
658 | format: "Invalid desc_stride_ctrl, supported: 0x%x\n" , |
659 | supported_feat); |
660 | return -EINVAL; |
661 | } |
662 | |
663 | netdev_err(dev: ena_dev->net_device, |
664 | format: "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n" , |
665 | llq_default_cfg->llq_stride_ctrl, supported_feat, |
666 | llq_info->desc_stride_ctrl); |
667 | } |
668 | } else { |
669 | llq_info->desc_stride_ctrl = 0; |
670 | } |
671 | |
672 | supported_feat = llq_features->entry_size_ctrl_supported; |
673 | if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { |
674 | llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; |
675 | llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; |
676 | } else { |
677 | if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) { |
678 | llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; |
679 | llq_info->desc_list_entry_size = 128; |
680 | } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) { |
681 | llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; |
682 | llq_info->desc_list_entry_size = 192; |
683 | } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) { |
684 | llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; |
685 | llq_info->desc_list_entry_size = 256; |
686 | } else { |
687 | netdev_err(dev: ena_dev->net_device, |
688 | format: "Invalid entry_size_ctrl, supported: 0x%x\n" , supported_feat); |
689 | return -EINVAL; |
690 | } |
691 | |
692 | netdev_err(dev: ena_dev->net_device, |
693 | format: "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n" , |
694 | llq_default_cfg->llq_ring_entry_size, supported_feat, |
695 | llq_info->desc_list_entry_size); |
696 | } |
697 | if (unlikely(llq_info->desc_list_entry_size & 0x7)) { |
698 | /* The desc list entry size should be whole multiply of 8 |
699 | * This requirement comes from __iowrite64_copy() |
700 | */ |
701 | netdev_err(dev: ena_dev->net_device, format: "Illegal entry size %d\n" , |
702 | llq_info->desc_list_entry_size); |
703 | return -EINVAL; |
704 | } |
705 | |
706 | if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) |
707 | llq_info->descs_per_entry = llq_info->desc_list_entry_size / |
708 | sizeof(struct ena_eth_io_tx_desc); |
709 | else |
710 | llq_info->descs_per_entry = 1; |
711 | |
712 | supported_feat = llq_features->desc_num_before_header_supported; |
713 | if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { |
714 | llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; |
715 | } else { |
716 | if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) { |
717 | llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; |
718 | } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) { |
719 | llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; |
720 | } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) { |
721 | llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; |
722 | } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { |
723 | llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; |
724 | } else { |
725 | netdev_err(dev: ena_dev->net_device, |
726 | format: "Invalid descs_num_before_header, supported: 0x%x\n" , |
727 | supported_feat); |
728 | return -EINVAL; |
729 | } |
730 | |
731 | netdev_err(dev: ena_dev->net_device, |
732 | format: "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n" , |
733 | llq_default_cfg->llq_num_decs_before_header, supported_feat, |
734 | llq_info->descs_num_before_header); |
735 | } |
736 | /* Check for accelerated queue supported */ |
737 | llq_accel_mode_get = llq_features->accel_mode.u.get; |
738 | |
739 | llq_info->disable_meta_caching = |
740 | !!(llq_accel_mode_get.supported_flags & |
741 | BIT(ENA_ADMIN_DISABLE_META_CACHING)); |
742 | |
743 | if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST)) |
744 | llq_info->max_entries_in_tx_burst = |
745 | llq_accel_mode_get.max_tx_burst_size / |
746 | llq_default_cfg->llq_ring_entry_size_value; |
747 | |
748 | rc = ena_com_set_llq(ena_dev); |
749 | if (rc) |
750 | netdev_err(dev: ena_dev->net_device, format: "Cannot set LLQ configuration: %d\n" , rc); |
751 | |
752 | return rc; |
753 | } |
754 | |
755 | static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, |
756 | struct ena_com_admin_queue *admin_queue) |
757 | { |
758 | unsigned long flags = 0; |
759 | int ret; |
760 | |
761 | wait_for_completion_timeout(x: &comp_ctx->wait_event, |
762 | timeout: usecs_to_jiffies(u: admin_queue->completion_timeout)); |
763 | |
764 | /* In case the command wasn't completed find out the root cause. |
765 | * There might be 2 kinds of errors |
766 | * 1) No completion (timeout reached) |
767 | * 2) There is completion but the device didn't get any msi-x interrupt. |
768 | */ |
769 | if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { |
770 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
771 | ena_com_handle_admin_completion(admin_queue); |
772 | admin_queue->stats.no_completion++; |
773 | spin_unlock_irqrestore(lock: &admin_queue->q_lock, flags); |
774 | |
775 | if (comp_ctx->status == ENA_CMD_COMPLETED) { |
776 | netdev_err(dev: admin_queue->ena_dev->net_device, |
777 | format: "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n" , |
778 | comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF" ); |
779 | /* Check if fallback to polling is enabled */ |
780 | if (admin_queue->auto_polling) |
781 | admin_queue->polling = true; |
782 | } else { |
783 | netdev_err(dev: admin_queue->ena_dev->net_device, |
784 | format: "The ena device didn't send a completion for the admin cmd %d status %d\n" , |
785 | comp_ctx->cmd_opcode, comp_ctx->status); |
786 | } |
787 | /* Check if shifted to polling mode. |
788 | * This will happen if there is a completion without an interrupt |
789 | * and autopolling mode is enabled. Continuing normal execution in such case |
790 | */ |
791 | if (!admin_queue->polling) { |
792 | admin_queue->running_state = false; |
793 | ret = -ETIME; |
794 | goto err; |
795 | } |
796 | } |
797 | |
798 | ret = ena_com_comp_status_to_errno(admin_queue, comp_status: comp_ctx->comp_status); |
799 | err: |
800 | comp_ctxt_release(queue: admin_queue, comp_ctx); |
801 | return ret; |
802 | } |
803 | |
804 | /* This method read the hardware device register through posting writes |
805 | * and waiting for response |
806 | * On timeout the function will return ENA_MMIO_READ_TIMEOUT |
807 | */ |
808 | static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) |
809 | { |
810 | struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; |
811 | volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = |
812 | mmio_read->read_resp; |
813 | u32 mmio_read_reg, ret, i; |
814 | unsigned long flags = 0; |
815 | u32 timeout = mmio_read->reg_read_to; |
816 | |
817 | might_sleep(); |
818 | |
819 | if (timeout == 0) |
820 | timeout = ENA_REG_READ_TIMEOUT; |
821 | |
822 | /* If readless is disabled, perform regular read */ |
823 | if (!mmio_read->readless_supported) |
824 | return readl(addr: ena_dev->reg_bar + offset); |
825 | |
826 | spin_lock_irqsave(&mmio_read->lock, flags); |
827 | mmio_read->seq_num++; |
828 | |
829 | read_resp->req_id = mmio_read->seq_num + 0xDEAD; |
830 | mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & |
831 | ENA_REGS_MMIO_REG_READ_REG_OFF_MASK; |
832 | mmio_read_reg |= mmio_read->seq_num & |
833 | ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; |
834 | |
835 | writel(val: mmio_read_reg, addr: ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); |
836 | |
837 | for (i = 0; i < timeout; i++) { |
838 | if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num) |
839 | break; |
840 | |
841 | udelay(1); |
842 | } |
843 | |
844 | if (unlikely(i == timeout)) { |
845 | netdev_err(dev: ena_dev->net_device, |
846 | format: "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n" , |
847 | mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off); |
848 | ret = ENA_MMIO_READ_TIMEOUT; |
849 | goto err; |
850 | } |
851 | |
852 | if (read_resp->reg_off != offset) { |
853 | netdev_err(dev: ena_dev->net_device, format: "Read failure: wrong offset provided\n" ); |
854 | ret = ENA_MMIO_READ_TIMEOUT; |
855 | } else { |
856 | ret = read_resp->reg_val; |
857 | } |
858 | err: |
859 | spin_unlock_irqrestore(lock: &mmio_read->lock, flags); |
860 | |
861 | return ret; |
862 | } |
863 | |
864 | /* There are two types to wait for completion. |
865 | * Polling mode - wait until the completion is available. |
866 | * Async mode - wait on wait queue until the completion is ready |
867 | * (or the timeout expired). |
868 | * It is expected that the IRQ called ena_com_handle_admin_completion |
869 | * to mark the completions. |
870 | */ |
871 | static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, |
872 | struct ena_com_admin_queue *admin_queue) |
873 | { |
874 | if (admin_queue->polling) |
875 | return ena_com_wait_and_process_admin_cq_polling(comp_ctx, |
876 | admin_queue); |
877 | |
878 | return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx, |
879 | admin_queue); |
880 | } |
881 | |
882 | static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, |
883 | struct ena_com_io_sq *io_sq) |
884 | { |
885 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
886 | struct ena_admin_aq_destroy_sq_cmd destroy_cmd; |
887 | struct ena_admin_acq_destroy_sq_resp_desc destroy_resp; |
888 | u8 direction; |
889 | int ret; |
890 | |
891 | memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); |
892 | |
893 | if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) |
894 | direction = ENA_ADMIN_SQ_DIRECTION_TX; |
895 | else |
896 | direction = ENA_ADMIN_SQ_DIRECTION_RX; |
897 | |
898 | destroy_cmd.sq.sq_identity |= (direction << |
899 | ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & |
900 | ENA_ADMIN_SQ_SQ_DIRECTION_MASK; |
901 | |
902 | destroy_cmd.sq.sq_idx = io_sq->idx; |
903 | destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; |
904 | |
905 | ret = ena_com_execute_admin_command(admin_queue, |
906 | cmd: (struct ena_admin_aq_entry *)&destroy_cmd, |
907 | cmd_size: sizeof(destroy_cmd), |
908 | cmd_comp: (struct ena_admin_acq_entry *)&destroy_resp, |
909 | cmd_comp_size: sizeof(destroy_resp)); |
910 | |
911 | if (unlikely(ret && (ret != -ENODEV))) |
912 | netdev_err(dev: ena_dev->net_device, format: "Failed to destroy io sq error: %d\n" , ret); |
913 | |
914 | return ret; |
915 | } |
916 | |
917 | static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, |
918 | struct ena_com_io_sq *io_sq, |
919 | struct ena_com_io_cq *io_cq) |
920 | { |
921 | size_t size; |
922 | |
923 | if (io_cq->cdesc_addr.virt_addr) { |
924 | size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; |
925 | |
926 | dma_free_coherent(dev: ena_dev->dmadev, size, cpu_addr: io_cq->cdesc_addr.virt_addr, |
927 | dma_handle: io_cq->cdesc_addr.phys_addr); |
928 | |
929 | io_cq->cdesc_addr.virt_addr = NULL; |
930 | } |
931 | |
932 | if (io_sq->desc_addr.virt_addr) { |
933 | size = io_sq->desc_entry_size * io_sq->q_depth; |
934 | |
935 | dma_free_coherent(dev: ena_dev->dmadev, size, cpu_addr: io_sq->desc_addr.virt_addr, |
936 | dma_handle: io_sq->desc_addr.phys_addr); |
937 | |
938 | io_sq->desc_addr.virt_addr = NULL; |
939 | } |
940 | |
941 | if (io_sq->bounce_buf_ctrl.base_buffer) { |
942 | devm_kfree(dev: ena_dev->dmadev, p: io_sq->bounce_buf_ctrl.base_buffer); |
943 | io_sq->bounce_buf_ctrl.base_buffer = NULL; |
944 | } |
945 | } |
946 | |
947 | static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, |
948 | u16 exp_state) |
949 | { |
950 | u32 val, exp = 0; |
951 | unsigned long timeout_stamp; |
952 | |
953 | /* Convert timeout from resolution of 100ms to us resolution. */ |
954 | timeout_stamp = jiffies + usecs_to_jiffies(u: 100 * 1000 * timeout); |
955 | |
956 | while (1) { |
957 | val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); |
958 | |
959 | if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { |
960 | netdev_err(dev: ena_dev->net_device, format: "Reg read timeout occurred\n" ); |
961 | return -ETIME; |
962 | } |
963 | |
964 | if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == |
965 | exp_state) |
966 | return 0; |
967 | |
968 | if (time_is_before_jiffies(timeout_stamp)) |
969 | return -ETIME; |
970 | |
971 | ena_delay_exponential_backoff_us(exp: exp++, delay_us: ena_dev->ena_min_poll_delay_us); |
972 | } |
973 | } |
974 | |
975 | static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, |
976 | enum ena_admin_aq_feature_id feature_id) |
977 | { |
978 | u32 feature_mask = 1 << feature_id; |
979 | |
980 | /* Device attributes is always supported */ |
981 | if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) && |
982 | !(ena_dev->supported_features & feature_mask)) |
983 | return false; |
984 | |
985 | return true; |
986 | } |
987 | |
988 | static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, |
989 | struct ena_admin_get_feat_resp *get_resp, |
990 | enum ena_admin_aq_feature_id feature_id, |
991 | dma_addr_t control_buf_dma_addr, |
992 | u32 control_buff_size, |
993 | u8 feature_ver) |
994 | { |
995 | struct ena_com_admin_queue *admin_queue; |
996 | struct ena_admin_get_feat_cmd get_cmd; |
997 | int ret; |
998 | |
999 | if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { |
1000 | netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n" , feature_id); |
1001 | return -EOPNOTSUPP; |
1002 | } |
1003 | |
1004 | memset(&get_cmd, 0x0, sizeof(get_cmd)); |
1005 | admin_queue = &ena_dev->admin_queue; |
1006 | |
1007 | get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE; |
1008 | |
1009 | if (control_buff_size) |
1010 | get_cmd.aq_common_descriptor.flags = |
1011 | ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; |
1012 | else |
1013 | get_cmd.aq_common_descriptor.flags = 0; |
1014 | |
1015 | ret = ena_com_mem_addr_set(ena_dev, |
1016 | ena_addr: &get_cmd.control_buffer.address, |
1017 | addr: control_buf_dma_addr); |
1018 | if (unlikely(ret)) { |
1019 | netdev_err(dev: ena_dev->net_device, format: "Memory address set failed\n" ); |
1020 | return ret; |
1021 | } |
1022 | |
1023 | get_cmd.control_buffer.length = control_buff_size; |
1024 | get_cmd.feat_common.feature_version = feature_ver; |
1025 | get_cmd.feat_common.feature_id = feature_id; |
1026 | |
1027 | ret = ena_com_execute_admin_command(admin_queue, |
1028 | cmd: (struct ena_admin_aq_entry *) |
1029 | &get_cmd, |
1030 | cmd_size: sizeof(get_cmd), |
1031 | cmd_comp: (struct ena_admin_acq_entry *) |
1032 | get_resp, |
1033 | cmd_comp_size: sizeof(*get_resp)); |
1034 | |
1035 | if (unlikely(ret)) |
1036 | netdev_err(dev: ena_dev->net_device, |
1037 | format: "Failed to submit get_feature command %d error: %d\n" , feature_id, ret); |
1038 | |
1039 | return ret; |
1040 | } |
1041 | |
1042 | static int ena_com_get_feature(struct ena_com_dev *ena_dev, |
1043 | struct ena_admin_get_feat_resp *get_resp, |
1044 | enum ena_admin_aq_feature_id feature_id, |
1045 | u8 feature_ver) |
1046 | { |
1047 | return ena_com_get_feature_ex(ena_dev, |
1048 | get_resp, |
1049 | feature_id, |
1050 | control_buf_dma_addr: 0, |
1051 | control_buff_size: 0, |
1052 | feature_ver); |
1053 | } |
1054 | |
1055 | int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev) |
1056 | { |
1057 | return ena_dev->rss.hash_func; |
1058 | } |
1059 | |
1060 | static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev) |
1061 | { |
1062 | struct ena_admin_feature_rss_flow_hash_control *hash_key = |
1063 | (ena_dev->rss).hash_key; |
1064 | |
1065 | netdev_rss_key_fill(buffer: &hash_key->key, len: sizeof(hash_key->key)); |
1066 | /* The key buffer is stored in the device in an array of |
1067 | * uint32 elements. |
1068 | */ |
1069 | hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS; |
1070 | } |
1071 | |
1072 | static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) |
1073 | { |
1074 | struct ena_rss * = &ena_dev->rss; |
1075 | |
1076 | if (!ena_com_check_supported_feature_id(ena_dev, feature_id: ENA_ADMIN_RSS_HASH_FUNCTION)) |
1077 | return -EOPNOTSUPP; |
1078 | |
1079 | rss->hash_key = dma_alloc_coherent(dev: ena_dev->dmadev, size: sizeof(*rss->hash_key), |
1080 | dma_handle: &rss->hash_key_dma_addr, GFP_KERNEL); |
1081 | |
1082 | if (unlikely(!rss->hash_key)) |
1083 | return -ENOMEM; |
1084 | |
1085 | return 0; |
1086 | } |
1087 | |
1088 | static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) |
1089 | { |
1090 | struct ena_rss * = &ena_dev->rss; |
1091 | |
1092 | if (rss->hash_key) |
1093 | dma_free_coherent(dev: ena_dev->dmadev, size: sizeof(*rss->hash_key), cpu_addr: rss->hash_key, |
1094 | dma_handle: rss->hash_key_dma_addr); |
1095 | rss->hash_key = NULL; |
1096 | } |
1097 | |
1098 | static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) |
1099 | { |
1100 | struct ena_rss * = &ena_dev->rss; |
1101 | |
1102 | rss->hash_ctrl = dma_alloc_coherent(dev: ena_dev->dmadev, size: sizeof(*rss->hash_ctrl), |
1103 | dma_handle: &rss->hash_ctrl_dma_addr, GFP_KERNEL); |
1104 | |
1105 | if (unlikely(!rss->hash_ctrl)) |
1106 | return -ENOMEM; |
1107 | |
1108 | return 0; |
1109 | } |
1110 | |
1111 | static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) |
1112 | { |
1113 | struct ena_rss * = &ena_dev->rss; |
1114 | |
1115 | if (rss->hash_ctrl) |
1116 | dma_free_coherent(dev: ena_dev->dmadev, size: sizeof(*rss->hash_ctrl), cpu_addr: rss->hash_ctrl, |
1117 | dma_handle: rss->hash_ctrl_dma_addr); |
1118 | rss->hash_ctrl = NULL; |
1119 | } |
1120 | |
1121 | static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, |
1122 | u16 log_size) |
1123 | { |
1124 | struct ena_rss * = &ena_dev->rss; |
1125 | struct ena_admin_get_feat_resp get_resp; |
1126 | size_t tbl_size; |
1127 | int ret; |
1128 | |
1129 | ret = ena_com_get_feature(ena_dev, get_resp: &get_resp, |
1130 | feature_id: ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, feature_ver: 0); |
1131 | if (unlikely(ret)) |
1132 | return ret; |
1133 | |
1134 | if ((get_resp.u.ind_table.min_size > log_size) || |
1135 | (get_resp.u.ind_table.max_size < log_size)) { |
1136 | netdev_err(dev: ena_dev->net_device, |
1137 | format: "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n" , |
1138 | 1 << log_size, 1 << get_resp.u.ind_table.min_size, |
1139 | 1 << get_resp.u.ind_table.max_size); |
1140 | return -EINVAL; |
1141 | } |
1142 | |
1143 | tbl_size = (1ULL << log_size) * |
1144 | sizeof(struct ena_admin_rss_ind_table_entry); |
1145 | |
1146 | rss->rss_ind_tbl = dma_alloc_coherent(dev: ena_dev->dmadev, size: tbl_size, dma_handle: &rss->rss_ind_tbl_dma_addr, |
1147 | GFP_KERNEL); |
1148 | if (unlikely(!rss->rss_ind_tbl)) |
1149 | goto mem_err1; |
1150 | |
1151 | tbl_size = (1ULL << log_size) * sizeof(u16); |
1152 | rss->host_rss_ind_tbl = devm_kzalloc(dev: ena_dev->dmadev, size: tbl_size, GFP_KERNEL); |
1153 | if (unlikely(!rss->host_rss_ind_tbl)) |
1154 | goto mem_err2; |
1155 | |
1156 | rss->tbl_log_size = log_size; |
1157 | |
1158 | return 0; |
1159 | |
1160 | mem_err2: |
1161 | tbl_size = (1ULL << log_size) * |
1162 | sizeof(struct ena_admin_rss_ind_table_entry); |
1163 | |
1164 | dma_free_coherent(dev: ena_dev->dmadev, size: tbl_size, cpu_addr: rss->rss_ind_tbl, dma_handle: rss->rss_ind_tbl_dma_addr); |
1165 | rss->rss_ind_tbl = NULL; |
1166 | mem_err1: |
1167 | rss->tbl_log_size = 0; |
1168 | return -ENOMEM; |
1169 | } |
1170 | |
1171 | static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) |
1172 | { |
1173 | struct ena_rss * = &ena_dev->rss; |
1174 | size_t tbl_size = (1ULL << rss->tbl_log_size) * |
1175 | sizeof(struct ena_admin_rss_ind_table_entry); |
1176 | |
1177 | if (rss->rss_ind_tbl) |
1178 | dma_free_coherent(dev: ena_dev->dmadev, size: tbl_size, cpu_addr: rss->rss_ind_tbl, |
1179 | dma_handle: rss->rss_ind_tbl_dma_addr); |
1180 | rss->rss_ind_tbl = NULL; |
1181 | |
1182 | if (rss->host_rss_ind_tbl) |
1183 | devm_kfree(dev: ena_dev->dmadev, p: rss->host_rss_ind_tbl); |
1184 | rss->host_rss_ind_tbl = NULL; |
1185 | } |
1186 | |
1187 | static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, |
1188 | struct ena_com_io_sq *io_sq, u16 cq_idx) |
1189 | { |
1190 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
1191 | struct ena_admin_aq_create_sq_cmd create_cmd; |
1192 | struct ena_admin_acq_create_sq_resp_desc cmd_completion; |
1193 | u8 direction; |
1194 | int ret; |
1195 | |
1196 | memset(&create_cmd, 0x0, sizeof(create_cmd)); |
1197 | |
1198 | create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; |
1199 | |
1200 | if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) |
1201 | direction = ENA_ADMIN_SQ_DIRECTION_TX; |
1202 | else |
1203 | direction = ENA_ADMIN_SQ_DIRECTION_RX; |
1204 | |
1205 | create_cmd.sq_identity |= (direction << |
1206 | ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & |
1207 | ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; |
1208 | |
1209 | create_cmd.sq_caps_2 |= io_sq->mem_queue_type & |
1210 | ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; |
1211 | |
1212 | create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC << |
1213 | ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & |
1214 | ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; |
1215 | |
1216 | create_cmd.sq_caps_3 |= |
1217 | ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; |
1218 | |
1219 | create_cmd.cq_idx = cq_idx; |
1220 | create_cmd.sq_depth = io_sq->q_depth; |
1221 | |
1222 | if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { |
1223 | ret = ena_com_mem_addr_set(ena_dev, |
1224 | ena_addr: &create_cmd.sq_ba, |
1225 | addr: io_sq->desc_addr.phys_addr); |
1226 | if (unlikely(ret)) { |
1227 | netdev_err(dev: ena_dev->net_device, format: "Memory address set failed\n" ); |
1228 | return ret; |
1229 | } |
1230 | } |
1231 | |
1232 | ret = ena_com_execute_admin_command(admin_queue, |
1233 | cmd: (struct ena_admin_aq_entry *)&create_cmd, |
1234 | cmd_size: sizeof(create_cmd), |
1235 | cmd_comp: (struct ena_admin_acq_entry *)&cmd_completion, |
1236 | cmd_comp_size: sizeof(cmd_completion)); |
1237 | if (unlikely(ret)) { |
1238 | netdev_err(dev: ena_dev->net_device, format: "Failed to create IO SQ. error: %d\n" , ret); |
1239 | return ret; |
1240 | } |
1241 | |
1242 | io_sq->idx = cmd_completion.sq_idx; |
1243 | |
1244 | io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + |
1245 | (uintptr_t)cmd_completion.sq_doorbell_offset); |
1246 | |
1247 | if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
1248 | io_sq->desc_addr.pbuf_dev_addr = |
1249 | (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + |
1250 | cmd_completion.llq_descriptors_offset); |
1251 | } |
1252 | |
1253 | netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n" , io_sq->idx, io_sq->q_depth); |
1254 | |
1255 | return ret; |
1256 | } |
1257 | |
1258 | static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) |
1259 | { |
1260 | struct ena_rss * = &ena_dev->rss; |
1261 | struct ena_com_io_sq *io_sq; |
1262 | u16 qid; |
1263 | int i; |
1264 | |
1265 | for (i = 0; i < 1 << rss->tbl_log_size; i++) { |
1266 | qid = rss->host_rss_ind_tbl[i]; |
1267 | if (qid >= ENA_TOTAL_NUM_QUEUES) |
1268 | return -EINVAL; |
1269 | |
1270 | io_sq = &ena_dev->io_sq_queues[qid]; |
1271 | |
1272 | if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) |
1273 | return -EINVAL; |
1274 | |
1275 | rss->rss_ind_tbl[i].cq_idx = io_sq->idx; |
1276 | } |
1277 | |
1278 | return 0; |
1279 | } |
1280 | |
1281 | static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, |
1282 | u16 intr_delay_resolution) |
1283 | { |
1284 | u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution; |
1285 | |
1286 | if (unlikely(!intr_delay_resolution)) { |
1287 | netdev_err(dev: ena_dev->net_device, |
1288 | format: "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n" ); |
1289 | intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; |
1290 | } |
1291 | |
1292 | /* update Rx */ |
1293 | ena_dev->intr_moder_rx_interval = |
1294 | ena_dev->intr_moder_rx_interval * |
1295 | prev_intr_delay_resolution / |
1296 | intr_delay_resolution; |
1297 | |
1298 | /* update Tx */ |
1299 | ena_dev->intr_moder_tx_interval = |
1300 | ena_dev->intr_moder_tx_interval * |
1301 | prev_intr_delay_resolution / |
1302 | intr_delay_resolution; |
1303 | |
1304 | ena_dev->intr_delay_resolution = intr_delay_resolution; |
1305 | } |
1306 | |
1307 | /*****************************************************************************/ |
1308 | /******************************* API ******************************/ |
1309 | /*****************************************************************************/ |
1310 | |
1311 | int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, |
1312 | struct ena_admin_aq_entry *cmd, |
1313 | size_t cmd_size, |
1314 | struct ena_admin_acq_entry *comp, |
1315 | size_t comp_size) |
1316 | { |
1317 | struct ena_comp_ctx *comp_ctx; |
1318 | int ret; |
1319 | |
1320 | comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size_in_bytes: cmd_size, |
1321 | comp, comp_size_in_bytes: comp_size); |
1322 | if (IS_ERR(ptr: comp_ctx)) { |
1323 | ret = PTR_ERR(ptr: comp_ctx); |
1324 | if (ret == -ENODEV) |
1325 | netdev_dbg(admin_queue->ena_dev->net_device, |
1326 | "Failed to submit command [%d]\n" , ret); |
1327 | else |
1328 | netdev_err(dev: admin_queue->ena_dev->net_device, |
1329 | format: "Failed to submit command [%d]\n" , ret); |
1330 | |
1331 | return ret; |
1332 | } |
1333 | |
1334 | ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); |
1335 | if (unlikely(ret)) { |
1336 | if (admin_queue->running_state) |
1337 | netdev_err(dev: admin_queue->ena_dev->net_device, |
1338 | format: "Failed to process command. ret = %d\n" , ret); |
1339 | else |
1340 | netdev_dbg(admin_queue->ena_dev->net_device, |
1341 | "Failed to process command. ret = %d\n" , ret); |
1342 | } |
1343 | return ret; |
1344 | } |
1345 | |
1346 | int ena_com_create_io_cq(struct ena_com_dev *ena_dev, |
1347 | struct ena_com_io_cq *io_cq) |
1348 | { |
1349 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
1350 | struct ena_admin_aq_create_cq_cmd create_cmd; |
1351 | struct ena_admin_acq_create_cq_resp_desc cmd_completion; |
1352 | int ret; |
1353 | |
1354 | memset(&create_cmd, 0x0, sizeof(create_cmd)); |
1355 | |
1356 | create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; |
1357 | |
1358 | create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & |
1359 | ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; |
1360 | create_cmd.cq_caps_1 |= |
1361 | ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; |
1362 | |
1363 | create_cmd.msix_vector = io_cq->msix_vector; |
1364 | create_cmd.cq_depth = io_cq->q_depth; |
1365 | |
1366 | ret = ena_com_mem_addr_set(ena_dev, |
1367 | ena_addr: &create_cmd.cq_ba, |
1368 | addr: io_cq->cdesc_addr.phys_addr); |
1369 | if (unlikely(ret)) { |
1370 | netdev_err(dev: ena_dev->net_device, format: "Memory address set failed\n" ); |
1371 | return ret; |
1372 | } |
1373 | |
1374 | ret = ena_com_execute_admin_command(admin_queue, |
1375 | cmd: (struct ena_admin_aq_entry *)&create_cmd, |
1376 | cmd_size: sizeof(create_cmd), |
1377 | comp: (struct ena_admin_acq_entry *)&cmd_completion, |
1378 | comp_size: sizeof(cmd_completion)); |
1379 | if (unlikely(ret)) { |
1380 | netdev_err(dev: ena_dev->net_device, format: "Failed to create IO CQ. error: %d\n" , ret); |
1381 | return ret; |
1382 | } |
1383 | |
1384 | io_cq->idx = cmd_completion.cq_idx; |
1385 | |
1386 | io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + |
1387 | cmd_completion.cq_interrupt_unmask_register_offset); |
1388 | |
1389 | if (cmd_completion.numa_node_register_offset) |
1390 | io_cq->numa_node_cfg_reg = |
1391 | (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + |
1392 | cmd_completion.numa_node_register_offset); |
1393 | |
1394 | netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n" , io_cq->idx, io_cq->q_depth); |
1395 | |
1396 | return ret; |
1397 | } |
1398 | |
1399 | int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, |
1400 | struct ena_com_io_sq **io_sq, |
1401 | struct ena_com_io_cq **io_cq) |
1402 | { |
1403 | if (qid >= ENA_TOTAL_NUM_QUEUES) { |
1404 | netdev_err(dev: ena_dev->net_device, format: "Invalid queue number %d but the max is %d\n" , qid, |
1405 | ENA_TOTAL_NUM_QUEUES); |
1406 | return -EINVAL; |
1407 | } |
1408 | |
1409 | *io_sq = &ena_dev->io_sq_queues[qid]; |
1410 | *io_cq = &ena_dev->io_cq_queues[qid]; |
1411 | |
1412 | return 0; |
1413 | } |
1414 | |
1415 | void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) |
1416 | { |
1417 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
1418 | struct ena_comp_ctx *comp_ctx; |
1419 | u16 i; |
1420 | |
1421 | if (!admin_queue->comp_ctx) |
1422 | return; |
1423 | |
1424 | for (i = 0; i < admin_queue->q_depth; i++) { |
1425 | comp_ctx = get_comp_ctxt(admin_queue, command_id: i, capture: false); |
1426 | if (unlikely(!comp_ctx)) |
1427 | break; |
1428 | |
1429 | comp_ctx->status = ENA_CMD_ABORTED; |
1430 | |
1431 | complete(&comp_ctx->wait_event); |
1432 | } |
1433 | } |
1434 | |
1435 | void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) |
1436 | { |
1437 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
1438 | unsigned long flags = 0; |
1439 | u32 exp = 0; |
1440 | |
1441 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
1442 | while (atomic_read(v: &admin_queue->outstanding_cmds) != 0) { |
1443 | spin_unlock_irqrestore(lock: &admin_queue->q_lock, flags); |
1444 | ena_delay_exponential_backoff_us(exp: exp++, delay_us: ena_dev->ena_min_poll_delay_us); |
1445 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
1446 | } |
1447 | spin_unlock_irqrestore(lock: &admin_queue->q_lock, flags); |
1448 | } |
1449 | |
1450 | int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, |
1451 | struct ena_com_io_cq *io_cq) |
1452 | { |
1453 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
1454 | struct ena_admin_aq_destroy_cq_cmd destroy_cmd; |
1455 | struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; |
1456 | int ret; |
1457 | |
1458 | memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); |
1459 | |
1460 | destroy_cmd.cq_idx = io_cq->idx; |
1461 | destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; |
1462 | |
1463 | ret = ena_com_execute_admin_command(admin_queue, |
1464 | cmd: (struct ena_admin_aq_entry *)&destroy_cmd, |
1465 | cmd_size: sizeof(destroy_cmd), |
1466 | comp: (struct ena_admin_acq_entry *)&destroy_resp, |
1467 | comp_size: sizeof(destroy_resp)); |
1468 | |
1469 | if (unlikely(ret && (ret != -ENODEV))) |
1470 | netdev_err(dev: ena_dev->net_device, format: "Failed to destroy IO CQ. error: %d\n" , ret); |
1471 | |
1472 | return ret; |
1473 | } |
1474 | |
1475 | bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) |
1476 | { |
1477 | return ena_dev->admin_queue.running_state; |
1478 | } |
1479 | |
1480 | void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) |
1481 | { |
1482 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
1483 | unsigned long flags = 0; |
1484 | |
1485 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
1486 | ena_dev->admin_queue.running_state = state; |
1487 | spin_unlock_irqrestore(lock: &admin_queue->q_lock, flags); |
1488 | } |
1489 | |
1490 | void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) |
1491 | { |
1492 | u16 depth = ena_dev->aenq.q_depth; |
1493 | |
1494 | WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n" ); |
1495 | |
1496 | /* Init head_db to mark that all entries in the queue |
1497 | * are initially available |
1498 | */ |
1499 | writel(val: depth, addr: ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); |
1500 | } |
1501 | |
1502 | int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) |
1503 | { |
1504 | struct ena_com_admin_queue *admin_queue; |
1505 | struct ena_admin_set_feat_cmd cmd; |
1506 | struct ena_admin_set_feat_resp resp; |
1507 | struct ena_admin_get_feat_resp get_resp; |
1508 | int ret; |
1509 | |
1510 | ret = ena_com_get_feature(ena_dev, get_resp: &get_resp, feature_id: ENA_ADMIN_AENQ_CONFIG, feature_ver: 0); |
1511 | if (ret) { |
1512 | dev_info(ena_dev->dmadev, "Can't get aenq configuration\n" ); |
1513 | return ret; |
1514 | } |
1515 | |
1516 | if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { |
1517 | netdev_warn(dev: ena_dev->net_device, |
1518 | format: "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n" , |
1519 | get_resp.u.aenq.supported_groups, groups_flag); |
1520 | return -EOPNOTSUPP; |
1521 | } |
1522 | |
1523 | memset(&cmd, 0x0, sizeof(cmd)); |
1524 | admin_queue = &ena_dev->admin_queue; |
1525 | |
1526 | cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; |
1527 | cmd.aq_common_descriptor.flags = 0; |
1528 | cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG; |
1529 | cmd.u.aenq.enabled_groups = groups_flag; |
1530 | |
1531 | ret = ena_com_execute_admin_command(admin_queue, |
1532 | cmd: (struct ena_admin_aq_entry *)&cmd, |
1533 | cmd_size: sizeof(cmd), |
1534 | comp: (struct ena_admin_acq_entry *)&resp, |
1535 | comp_size: sizeof(resp)); |
1536 | |
1537 | if (unlikely(ret)) |
1538 | netdev_err(dev: ena_dev->net_device, format: "Failed to config AENQ ret: %d\n" , ret); |
1539 | |
1540 | return ret; |
1541 | } |
1542 | |
1543 | int ena_com_get_dma_width(struct ena_com_dev *ena_dev) |
1544 | { |
1545 | u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); |
1546 | u32 width; |
1547 | |
1548 | if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { |
1549 | netdev_err(dev: ena_dev->net_device, format: "Reg read timeout occurred\n" ); |
1550 | return -ETIME; |
1551 | } |
1552 | |
1553 | width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> |
1554 | ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; |
1555 | |
1556 | netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n" , width); |
1557 | |
1558 | if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { |
1559 | netdev_err(dev: ena_dev->net_device, format: "DMA width illegal value: %d\n" , width); |
1560 | return -EINVAL; |
1561 | } |
1562 | |
1563 | ena_dev->dma_addr_bits = width; |
1564 | |
1565 | return width; |
1566 | } |
1567 | |
1568 | int ena_com_validate_version(struct ena_com_dev *ena_dev) |
1569 | { |
1570 | u32 ver; |
1571 | u32 ctrl_ver; |
1572 | u32 ctrl_ver_masked; |
1573 | |
1574 | /* Make sure the ENA version and the controller version are at least |
1575 | * as the driver expects |
1576 | */ |
1577 | ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF); |
1578 | ctrl_ver = ena_com_reg_bar_read32(ena_dev, |
1579 | ENA_REGS_CONTROLLER_VERSION_OFF); |
1580 | |
1581 | if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { |
1582 | netdev_err(dev: ena_dev->net_device, format: "Reg read timeout occurred\n" ); |
1583 | return -ETIME; |
1584 | } |
1585 | |
1586 | dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n" , |
1587 | (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, |
1588 | ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); |
1589 | |
1590 | dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n" , |
1591 | (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> |
1592 | ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, |
1593 | (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> |
1594 | ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, |
1595 | (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), |
1596 | (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> |
1597 | ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); |
1598 | |
1599 | ctrl_ver_masked = |
1600 | (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | |
1601 | (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | |
1602 | (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); |
1603 | |
1604 | /* Validate the ctrl version without the implementation ID */ |
1605 | if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { |
1606 | netdev_err(dev: ena_dev->net_device, |
1607 | format: "ENA ctrl version is lower than the minimal ctrl version the driver supports\n" ); |
1608 | return -1; |
1609 | } |
1610 | |
1611 | return 0; |
1612 | } |
1613 | |
1614 | static void |
1615 | ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev, |
1616 | struct ena_com_admin_queue *admin_queue) |
1617 | |
1618 | { |
1619 | if (!admin_queue->comp_ctx) |
1620 | return; |
1621 | |
1622 | devm_kfree(dev: ena_dev->dmadev, p: admin_queue->comp_ctx); |
1623 | |
1624 | admin_queue->comp_ctx = NULL; |
1625 | } |
1626 | |
1627 | void ena_com_admin_destroy(struct ena_com_dev *ena_dev) |
1628 | { |
1629 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
1630 | struct ena_com_admin_cq *cq = &admin_queue->cq; |
1631 | struct ena_com_admin_sq *sq = &admin_queue->sq; |
1632 | struct ena_com_aenq *aenq = &ena_dev->aenq; |
1633 | u16 size; |
1634 | |
1635 | ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue); |
1636 | |
1637 | size = ADMIN_SQ_SIZE(admin_queue->q_depth); |
1638 | if (sq->entries) |
1639 | dma_free_coherent(dev: ena_dev->dmadev, size, cpu_addr: sq->entries, dma_handle: sq->dma_addr); |
1640 | sq->entries = NULL; |
1641 | |
1642 | size = ADMIN_CQ_SIZE(admin_queue->q_depth); |
1643 | if (cq->entries) |
1644 | dma_free_coherent(dev: ena_dev->dmadev, size, cpu_addr: cq->entries, dma_handle: cq->dma_addr); |
1645 | cq->entries = NULL; |
1646 | |
1647 | size = ADMIN_AENQ_SIZE(aenq->q_depth); |
1648 | if (ena_dev->aenq.entries) |
1649 | dma_free_coherent(dev: ena_dev->dmadev, size, cpu_addr: aenq->entries, dma_handle: aenq->dma_addr); |
1650 | aenq->entries = NULL; |
1651 | } |
1652 | |
1653 | void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) |
1654 | { |
1655 | u32 mask_value = 0; |
1656 | |
1657 | if (polling) |
1658 | mask_value = ENA_REGS_ADMIN_INTR_MASK; |
1659 | |
1660 | writel(val: mask_value, addr: ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); |
1661 | ena_dev->admin_queue.polling = polling; |
1662 | } |
1663 | |
1664 | void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, |
1665 | bool polling) |
1666 | { |
1667 | ena_dev->admin_queue.auto_polling = polling; |
1668 | } |
1669 | |
1670 | int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) |
1671 | { |
1672 | struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; |
1673 | |
1674 | spin_lock_init(&mmio_read->lock); |
1675 | mmio_read->read_resp = dma_alloc_coherent(dev: ena_dev->dmadev, size: sizeof(*mmio_read->read_resp), |
1676 | dma_handle: &mmio_read->read_resp_dma_addr, GFP_KERNEL); |
1677 | if (unlikely(!mmio_read->read_resp)) |
1678 | goto err; |
1679 | |
1680 | ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); |
1681 | |
1682 | mmio_read->read_resp->req_id = 0x0; |
1683 | mmio_read->seq_num = 0x0; |
1684 | mmio_read->readless_supported = true; |
1685 | |
1686 | return 0; |
1687 | |
1688 | err: |
1689 | |
1690 | return -ENOMEM; |
1691 | } |
1692 | |
1693 | void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) |
1694 | { |
1695 | struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; |
1696 | |
1697 | mmio_read->readless_supported = readless_supported; |
1698 | } |
1699 | |
1700 | void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) |
1701 | { |
1702 | struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; |
1703 | |
1704 | writel(val: 0x0, addr: ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); |
1705 | writel(val: 0x0, addr: ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); |
1706 | |
1707 | dma_free_coherent(dev: ena_dev->dmadev, size: sizeof(*mmio_read->read_resp), cpu_addr: mmio_read->read_resp, |
1708 | dma_handle: mmio_read->read_resp_dma_addr); |
1709 | |
1710 | mmio_read->read_resp = NULL; |
1711 | } |
1712 | |
1713 | void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) |
1714 | { |
1715 | struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; |
1716 | u32 addr_low, addr_high; |
1717 | |
1718 | addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); |
1719 | addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); |
1720 | |
1721 | writel(val: addr_low, addr: ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); |
1722 | writel(val: addr_high, addr: ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); |
1723 | } |
1724 | |
1725 | int ena_com_admin_init(struct ena_com_dev *ena_dev, |
1726 | struct ena_aenq_handlers *aenq_handlers) |
1727 | { |
1728 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
1729 | u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; |
1730 | int ret; |
1731 | |
1732 | dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); |
1733 | |
1734 | if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { |
1735 | netdev_err(dev: ena_dev->net_device, format: "Reg read timeout occurred\n" ); |
1736 | return -ETIME; |
1737 | } |
1738 | |
1739 | if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { |
1740 | netdev_err(dev: ena_dev->net_device, format: "Device isn't ready, abort com init\n" ); |
1741 | return -ENODEV; |
1742 | } |
1743 | |
1744 | admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; |
1745 | |
1746 | admin_queue->q_dmadev = ena_dev->dmadev; |
1747 | admin_queue->polling = false; |
1748 | admin_queue->curr_cmd_id = 0; |
1749 | |
1750 | atomic_set(v: &admin_queue->outstanding_cmds, i: 0); |
1751 | |
1752 | spin_lock_init(&admin_queue->q_lock); |
1753 | |
1754 | ret = ena_com_init_comp_ctxt(admin_queue); |
1755 | if (ret) |
1756 | goto error; |
1757 | |
1758 | ret = ena_com_admin_init_sq(admin_queue); |
1759 | if (ret) |
1760 | goto error; |
1761 | |
1762 | ret = ena_com_admin_init_cq(admin_queue); |
1763 | if (ret) |
1764 | goto error; |
1765 | |
1766 | admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + |
1767 | ENA_REGS_AQ_DB_OFF); |
1768 | |
1769 | addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); |
1770 | addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); |
1771 | |
1772 | writel(val: addr_low, addr: ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); |
1773 | writel(val: addr_high, addr: ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); |
1774 | |
1775 | addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); |
1776 | addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); |
1777 | |
1778 | writel(val: addr_low, addr: ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); |
1779 | writel(val: addr_high, addr: ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); |
1780 | |
1781 | aq_caps = 0; |
1782 | aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; |
1783 | aq_caps |= (sizeof(struct ena_admin_aq_entry) << |
1784 | ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & |
1785 | ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; |
1786 | |
1787 | acq_caps = 0; |
1788 | acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; |
1789 | acq_caps |= (sizeof(struct ena_admin_acq_entry) << |
1790 | ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & |
1791 | ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; |
1792 | |
1793 | writel(val: aq_caps, addr: ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); |
1794 | writel(val: acq_caps, addr: ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); |
1795 | ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); |
1796 | if (ret) |
1797 | goto error; |
1798 | |
1799 | admin_queue->ena_dev = ena_dev; |
1800 | admin_queue->running_state = true; |
1801 | |
1802 | return 0; |
1803 | error: |
1804 | ena_com_admin_destroy(ena_dev); |
1805 | |
1806 | return ret; |
1807 | } |
1808 | |
1809 | int ena_com_create_io_queue(struct ena_com_dev *ena_dev, |
1810 | struct ena_com_create_io_ctx *ctx) |
1811 | { |
1812 | struct ena_com_io_sq *io_sq; |
1813 | struct ena_com_io_cq *io_cq; |
1814 | int ret; |
1815 | |
1816 | if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { |
1817 | netdev_err(dev: ena_dev->net_device, format: "Qid (%d) is bigger than max num of queues (%d)\n" , |
1818 | ctx->qid, ENA_TOTAL_NUM_QUEUES); |
1819 | return -EINVAL; |
1820 | } |
1821 | |
1822 | io_sq = &ena_dev->io_sq_queues[ctx->qid]; |
1823 | io_cq = &ena_dev->io_cq_queues[ctx->qid]; |
1824 | |
1825 | memset(io_sq, 0x0, sizeof(*io_sq)); |
1826 | memset(io_cq, 0x0, sizeof(*io_cq)); |
1827 | |
1828 | /* Init CQ */ |
1829 | io_cq->q_depth = ctx->queue_size; |
1830 | io_cq->direction = ctx->direction; |
1831 | io_cq->qid = ctx->qid; |
1832 | |
1833 | io_cq->msix_vector = ctx->msix_vector; |
1834 | |
1835 | io_sq->q_depth = ctx->queue_size; |
1836 | io_sq->direction = ctx->direction; |
1837 | io_sq->qid = ctx->qid; |
1838 | |
1839 | io_sq->mem_queue_type = ctx->mem_queue_type; |
1840 | |
1841 | if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) |
1842 | /* header length is limited to 8 bits */ |
1843 | io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256); |
1844 | |
1845 | ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); |
1846 | if (ret) |
1847 | goto error; |
1848 | ret = ena_com_init_io_cq(ena_dev, ctx, io_cq); |
1849 | if (ret) |
1850 | goto error; |
1851 | |
1852 | ret = ena_com_create_io_cq(ena_dev, io_cq); |
1853 | if (ret) |
1854 | goto error; |
1855 | |
1856 | ret = ena_com_create_io_sq(ena_dev, io_sq, cq_idx: io_cq->idx); |
1857 | if (ret) |
1858 | goto destroy_io_cq; |
1859 | |
1860 | return 0; |
1861 | |
1862 | destroy_io_cq: |
1863 | ena_com_destroy_io_cq(ena_dev, io_cq); |
1864 | error: |
1865 | ena_com_io_queue_free(ena_dev, io_sq, io_cq); |
1866 | return ret; |
1867 | } |
1868 | |
1869 | void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) |
1870 | { |
1871 | struct ena_com_io_sq *io_sq; |
1872 | struct ena_com_io_cq *io_cq; |
1873 | |
1874 | if (qid >= ENA_TOTAL_NUM_QUEUES) { |
1875 | netdev_err(dev: ena_dev->net_device, format: "Qid (%d) is bigger than max num of queues (%d)\n" , |
1876 | qid, ENA_TOTAL_NUM_QUEUES); |
1877 | return; |
1878 | } |
1879 | |
1880 | io_sq = &ena_dev->io_sq_queues[qid]; |
1881 | io_cq = &ena_dev->io_cq_queues[qid]; |
1882 | |
1883 | ena_com_destroy_io_sq(ena_dev, io_sq); |
1884 | ena_com_destroy_io_cq(ena_dev, io_cq); |
1885 | |
1886 | ena_com_io_queue_free(ena_dev, io_sq, io_cq); |
1887 | } |
1888 | |
1889 | int ena_com_get_link_params(struct ena_com_dev *ena_dev, |
1890 | struct ena_admin_get_feat_resp *resp) |
1891 | { |
1892 | return ena_com_get_feature(ena_dev, get_resp: resp, feature_id: ENA_ADMIN_LINK_CONFIG, feature_ver: 0); |
1893 | } |
1894 | |
1895 | int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, |
1896 | struct ena_com_dev_get_features_ctx *get_feat_ctx) |
1897 | { |
1898 | struct ena_admin_get_feat_resp get_resp; |
1899 | int rc; |
1900 | |
1901 | rc = ena_com_get_feature(ena_dev, get_resp: &get_resp, |
1902 | feature_id: ENA_ADMIN_DEVICE_ATTRIBUTES, feature_ver: 0); |
1903 | if (rc) |
1904 | return rc; |
1905 | |
1906 | memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, |
1907 | sizeof(get_resp.u.dev_attr)); |
1908 | |
1909 | ena_dev->supported_features = get_resp.u.dev_attr.supported_features; |
1910 | ena_dev->capabilities = get_resp.u.dev_attr.capabilities; |
1911 | |
1912 | if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { |
1913 | rc = ena_com_get_feature(ena_dev, get_resp: &get_resp, |
1914 | feature_id: ENA_ADMIN_MAX_QUEUES_EXT, |
1915 | ENA_FEATURE_MAX_QUEUE_EXT_VER); |
1916 | if (rc) |
1917 | return rc; |
1918 | |
1919 | if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER) |
1920 | return -EINVAL; |
1921 | |
1922 | memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext, |
1923 | sizeof(get_resp.u.max_queue_ext)); |
1924 | ena_dev->tx_max_header_size = |
1925 | get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size; |
1926 | } else { |
1927 | rc = ena_com_get_feature(ena_dev, get_resp: &get_resp, |
1928 | feature_id: ENA_ADMIN_MAX_QUEUES_NUM, feature_ver: 0); |
1929 | memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, |
1930 | sizeof(get_resp.u.max_queue)); |
1931 | ena_dev->tx_max_header_size = |
1932 | get_resp.u.max_queue.max_header_size; |
1933 | |
1934 | if (rc) |
1935 | return rc; |
1936 | } |
1937 | |
1938 | rc = ena_com_get_feature(ena_dev, get_resp: &get_resp, |
1939 | feature_id: ENA_ADMIN_AENQ_CONFIG, feature_ver: 0); |
1940 | if (rc) |
1941 | return rc; |
1942 | |
1943 | memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, |
1944 | sizeof(get_resp.u.aenq)); |
1945 | |
1946 | rc = ena_com_get_feature(ena_dev, get_resp: &get_resp, |
1947 | feature_id: ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, feature_ver: 0); |
1948 | if (rc) |
1949 | return rc; |
1950 | |
1951 | memcpy(&get_feat_ctx->offload, &get_resp.u.offload, |
1952 | sizeof(get_resp.u.offload)); |
1953 | |
1954 | /* Driver hints isn't mandatory admin command. So in case the |
1955 | * command isn't supported set driver hints to 0 |
1956 | */ |
1957 | rc = ena_com_get_feature(ena_dev, get_resp: &get_resp, feature_id: ENA_ADMIN_HW_HINTS, feature_ver: 0); |
1958 | |
1959 | if (!rc) |
1960 | memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints)); |
1961 | else if (rc == -EOPNOTSUPP) |
1962 | memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints)); |
1963 | else |
1964 | return rc; |
1965 | |
1966 | rc = ena_com_get_feature(ena_dev, get_resp: &get_resp, feature_id: ENA_ADMIN_LLQ, feature_ver: 0); |
1967 | if (!rc) |
1968 | memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq)); |
1969 | else if (rc == -EOPNOTSUPP) |
1970 | memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); |
1971 | else |
1972 | return rc; |
1973 | |
1974 | return 0; |
1975 | } |
1976 | |
1977 | void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) |
1978 | { |
1979 | ena_com_handle_admin_completion(admin_queue: &ena_dev->admin_queue); |
1980 | } |
1981 | |
1982 | /* ena_handle_specific_aenq_event: |
1983 | * return the handler that is relevant to the specific event group |
1984 | */ |
1985 | static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev, |
1986 | u16 group) |
1987 | { |
1988 | struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers; |
1989 | |
1990 | if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) |
1991 | return aenq_handlers->handlers[group]; |
1992 | |
1993 | return aenq_handlers->unimplemented_handler; |
1994 | } |
1995 | |
1996 | /* ena_aenq_intr_handler: |
1997 | * handles the aenq incoming events. |
1998 | * pop events from the queue and apply the specific handler |
1999 | */ |
2000 | void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data) |
2001 | { |
2002 | struct ena_admin_aenq_entry *aenq_e; |
2003 | struct ena_admin_aenq_common_desc *aenq_common; |
2004 | struct ena_com_aenq *aenq = &ena_dev->aenq; |
2005 | u64 timestamp; |
2006 | ena_aenq_handler handler_cb; |
2007 | u16 masked_head, processed = 0; |
2008 | u8 phase; |
2009 | |
2010 | masked_head = aenq->head & (aenq->q_depth - 1); |
2011 | phase = aenq->phase; |
2012 | aenq_e = &aenq->entries[masked_head]; /* Get first entry */ |
2013 | aenq_common = &aenq_e->aenq_common_desc; |
2014 | |
2015 | /* Go over all the events */ |
2016 | while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { |
2017 | /* Make sure the phase bit (ownership) is as expected before |
2018 | * reading the rest of the descriptor. |
2019 | */ |
2020 | dma_rmb(); |
2021 | |
2022 | timestamp = (u64)aenq_common->timestamp_low | |
2023 | ((u64)aenq_common->timestamp_high << 32); |
2024 | |
2025 | netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n" , |
2026 | aenq_common->group, aenq_common->syndrome, timestamp); |
2027 | |
2028 | /* Handle specific event*/ |
2029 | handler_cb = ena_com_get_specific_aenq_cb(ena_dev, |
2030 | group: aenq_common->group); |
2031 | handler_cb(data, aenq_e); /* call the actual event handler*/ |
2032 | |
2033 | /* Get next event entry */ |
2034 | masked_head++; |
2035 | processed++; |
2036 | |
2037 | if (unlikely(masked_head == aenq->q_depth)) { |
2038 | masked_head = 0; |
2039 | phase = !phase; |
2040 | } |
2041 | aenq_e = &aenq->entries[masked_head]; |
2042 | aenq_common = &aenq_e->aenq_common_desc; |
2043 | } |
2044 | |
2045 | aenq->head += processed; |
2046 | aenq->phase = phase; |
2047 | |
2048 | /* Don't update aenq doorbell if there weren't any processed events */ |
2049 | if (!processed) |
2050 | return; |
2051 | |
2052 | /* write the aenq doorbell after all AENQ descriptors were read */ |
2053 | mb(); |
2054 | writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); |
2055 | } |
2056 | |
2057 | int ena_com_dev_reset(struct ena_com_dev *ena_dev, |
2058 | enum ena_regs_reset_reason_types reset_reason) |
2059 | { |
2060 | u32 stat, timeout, cap, reset_val; |
2061 | int rc; |
2062 | |
2063 | stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); |
2064 | cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); |
2065 | |
2066 | if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) { |
2067 | netdev_err(dev: ena_dev->net_device, format: "Reg read32 timeout occurred\n" ); |
2068 | return -ETIME; |
2069 | } |
2070 | |
2071 | if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { |
2072 | netdev_err(dev: ena_dev->net_device, format: "Device isn't ready, can't reset device\n" ); |
2073 | return -EINVAL; |
2074 | } |
2075 | |
2076 | timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> |
2077 | ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; |
2078 | if (timeout == 0) { |
2079 | netdev_err(dev: ena_dev->net_device, format: "Invalid timeout value\n" ); |
2080 | return -EINVAL; |
2081 | } |
2082 | |
2083 | /* start reset */ |
2084 | reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; |
2085 | reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) & |
2086 | ENA_REGS_DEV_CTL_RESET_REASON_MASK; |
2087 | writel(val: reset_val, addr: ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); |
2088 | |
2089 | /* Write again the MMIO read request address */ |
2090 | ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); |
2091 | |
2092 | rc = wait_for_reset_state(ena_dev, timeout, |
2093 | ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); |
2094 | if (rc != 0) { |
2095 | netdev_err(dev: ena_dev->net_device, format: "Reset indication didn't turn on\n" ); |
2096 | return rc; |
2097 | } |
2098 | |
2099 | /* reset done */ |
2100 | writel(val: 0, addr: ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); |
2101 | rc = wait_for_reset_state(ena_dev, timeout, exp_state: 0); |
2102 | if (rc != 0) { |
2103 | netdev_err(dev: ena_dev->net_device, format: "Reset indication didn't turn off\n" ); |
2104 | return rc; |
2105 | } |
2106 | |
2107 | timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >> |
2108 | ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT; |
2109 | if (timeout) |
2110 | /* the resolution of timeout reg is 100ms */ |
2111 | ena_dev->admin_queue.completion_timeout = timeout * 100000; |
2112 | else |
2113 | ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US; |
2114 | |
2115 | return 0; |
2116 | } |
2117 | |
2118 | static int ena_get_dev_stats(struct ena_com_dev *ena_dev, |
2119 | struct ena_com_stats_ctx *ctx, |
2120 | enum ena_admin_get_stats_type type) |
2121 | { |
2122 | struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; |
2123 | struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; |
2124 | struct ena_com_admin_queue *admin_queue; |
2125 | int ret; |
2126 | |
2127 | admin_queue = &ena_dev->admin_queue; |
2128 | |
2129 | get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; |
2130 | get_cmd->aq_common_descriptor.flags = 0; |
2131 | get_cmd->type = type; |
2132 | |
2133 | ret = ena_com_execute_admin_command(admin_queue, |
2134 | cmd: (struct ena_admin_aq_entry *)get_cmd, |
2135 | cmd_size: sizeof(*get_cmd), |
2136 | comp: (struct ena_admin_acq_entry *)get_resp, |
2137 | comp_size: sizeof(*get_resp)); |
2138 | |
2139 | if (unlikely(ret)) |
2140 | netdev_err(dev: ena_dev->net_device, format: "Failed to get stats. error: %d\n" , ret); |
2141 | |
2142 | return ret; |
2143 | } |
2144 | |
2145 | int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, |
2146 | struct ena_admin_eni_stats *stats) |
2147 | { |
2148 | struct ena_com_stats_ctx ctx; |
2149 | int ret; |
2150 | |
2151 | if (!ena_com_get_cap(ena_dev, cap_id: ENA_ADMIN_ENI_STATS)) { |
2152 | netdev_err(dev: ena_dev->net_device, format: "Capability %d isn't supported\n" , |
2153 | ENA_ADMIN_ENI_STATS); |
2154 | return -EOPNOTSUPP; |
2155 | } |
2156 | |
2157 | memset(&ctx, 0x0, sizeof(ctx)); |
2158 | ret = ena_get_dev_stats(ena_dev, ctx: &ctx, type: ENA_ADMIN_GET_STATS_TYPE_ENI); |
2159 | if (likely(ret == 0)) |
2160 | memcpy(stats, &ctx.get_resp.u.eni_stats, |
2161 | sizeof(ctx.get_resp.u.eni_stats)); |
2162 | |
2163 | return ret; |
2164 | } |
2165 | |
2166 | int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, |
2167 | struct ena_admin_basic_stats *stats) |
2168 | { |
2169 | struct ena_com_stats_ctx ctx; |
2170 | int ret; |
2171 | |
2172 | memset(&ctx, 0x0, sizeof(ctx)); |
2173 | ret = ena_get_dev_stats(ena_dev, ctx: &ctx, type: ENA_ADMIN_GET_STATS_TYPE_BASIC); |
2174 | if (likely(ret == 0)) |
2175 | memcpy(stats, &ctx.get_resp.u.basic_stats, |
2176 | sizeof(ctx.get_resp.u.basic_stats)); |
2177 | |
2178 | return ret; |
2179 | } |
2180 | |
2181 | int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu) |
2182 | { |
2183 | struct ena_com_admin_queue *admin_queue; |
2184 | struct ena_admin_set_feat_cmd cmd; |
2185 | struct ena_admin_set_feat_resp resp; |
2186 | int ret; |
2187 | |
2188 | if (!ena_com_check_supported_feature_id(ena_dev, feature_id: ENA_ADMIN_MTU)) { |
2189 | netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n" , ENA_ADMIN_MTU); |
2190 | return -EOPNOTSUPP; |
2191 | } |
2192 | |
2193 | memset(&cmd, 0x0, sizeof(cmd)); |
2194 | admin_queue = &ena_dev->admin_queue; |
2195 | |
2196 | cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; |
2197 | cmd.aq_common_descriptor.flags = 0; |
2198 | cmd.feat_common.feature_id = ENA_ADMIN_MTU; |
2199 | cmd.u.mtu.mtu = mtu; |
2200 | |
2201 | ret = ena_com_execute_admin_command(admin_queue, |
2202 | cmd: (struct ena_admin_aq_entry *)&cmd, |
2203 | cmd_size: sizeof(cmd), |
2204 | comp: (struct ena_admin_acq_entry *)&resp, |
2205 | comp_size: sizeof(resp)); |
2206 | |
2207 | if (unlikely(ret)) |
2208 | netdev_err(dev: ena_dev->net_device, format: "Failed to set mtu %d. error: %d\n" , mtu, ret); |
2209 | |
2210 | return ret; |
2211 | } |
2212 | |
2213 | int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, |
2214 | struct ena_admin_feature_offload_desc *offload) |
2215 | { |
2216 | int ret; |
2217 | struct ena_admin_get_feat_resp resp; |
2218 | |
2219 | ret = ena_com_get_feature(ena_dev, get_resp: &resp, |
2220 | feature_id: ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, feature_ver: 0); |
2221 | if (unlikely(ret)) { |
2222 | netdev_err(dev: ena_dev->net_device, format: "Failed to get offload capabilities %d\n" , ret); |
2223 | return ret; |
2224 | } |
2225 | |
2226 | memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); |
2227 | |
2228 | return 0; |
2229 | } |
2230 | |
2231 | int ena_com_set_hash_function(struct ena_com_dev *ena_dev) |
2232 | { |
2233 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
2234 | struct ena_rss * = &ena_dev->rss; |
2235 | struct ena_admin_set_feat_cmd cmd; |
2236 | struct ena_admin_set_feat_resp resp; |
2237 | struct ena_admin_get_feat_resp get_resp; |
2238 | int ret; |
2239 | |
2240 | if (!ena_com_check_supported_feature_id(ena_dev, feature_id: ENA_ADMIN_RSS_HASH_FUNCTION)) { |
2241 | netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n" , |
2242 | ENA_ADMIN_RSS_HASH_FUNCTION); |
2243 | return -EOPNOTSUPP; |
2244 | } |
2245 | |
2246 | /* Validate hash function is supported */ |
2247 | ret = ena_com_get_feature(ena_dev, get_resp: &get_resp, |
2248 | feature_id: ENA_ADMIN_RSS_HASH_FUNCTION, feature_ver: 0); |
2249 | if (unlikely(ret)) |
2250 | return ret; |
2251 | |
2252 | if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { |
2253 | netdev_err(dev: ena_dev->net_device, format: "Func hash %d isn't supported by device, abort\n" , |
2254 | rss->hash_func); |
2255 | return -EOPNOTSUPP; |
2256 | } |
2257 | |
2258 | memset(&cmd, 0x0, sizeof(cmd)); |
2259 | |
2260 | cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; |
2261 | cmd.aq_common_descriptor.flags = |
2262 | ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; |
2263 | cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION; |
2264 | cmd.u.flow_hash_func.init_val = rss->hash_init_val; |
2265 | cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; |
2266 | |
2267 | ret = ena_com_mem_addr_set(ena_dev, |
2268 | ena_addr: &cmd.control_buffer.address, |
2269 | addr: rss->hash_key_dma_addr); |
2270 | if (unlikely(ret)) { |
2271 | netdev_err(dev: ena_dev->net_device, format: "Memory address set failed\n" ); |
2272 | return ret; |
2273 | } |
2274 | |
2275 | cmd.control_buffer.length = sizeof(*rss->hash_key); |
2276 | |
2277 | ret = ena_com_execute_admin_command(admin_queue, |
2278 | cmd: (struct ena_admin_aq_entry *)&cmd, |
2279 | cmd_size: sizeof(cmd), |
2280 | comp: (struct ena_admin_acq_entry *)&resp, |
2281 | comp_size: sizeof(resp)); |
2282 | if (unlikely(ret)) { |
2283 | netdev_err(dev: ena_dev->net_device, format: "Failed to set hash function %d. error: %d\n" , |
2284 | rss->hash_func, ret); |
2285 | return -EINVAL; |
2286 | } |
2287 | |
2288 | return 0; |
2289 | } |
2290 | |
2291 | int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, |
2292 | enum ena_admin_hash_functions func, |
2293 | const u8 *key, u16 key_len, u32 init_val) |
2294 | { |
2295 | struct ena_admin_feature_rss_flow_hash_control *hash_key; |
2296 | struct ena_admin_get_feat_resp get_resp; |
2297 | enum ena_admin_hash_functions old_func; |
2298 | struct ena_rss * = &ena_dev->rss; |
2299 | int rc; |
2300 | |
2301 | hash_key = rss->hash_key; |
2302 | |
2303 | /* Make sure size is a mult of DWs */ |
2304 | if (unlikely(key_len & 0x3)) |
2305 | return -EINVAL; |
2306 | |
2307 | rc = ena_com_get_feature_ex(ena_dev, get_resp: &get_resp, |
2308 | feature_id: ENA_ADMIN_RSS_HASH_FUNCTION, |
2309 | control_buf_dma_addr: rss->hash_key_dma_addr, |
2310 | control_buff_size: sizeof(*rss->hash_key), feature_ver: 0); |
2311 | if (unlikely(rc)) |
2312 | return rc; |
2313 | |
2314 | if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) { |
2315 | netdev_err(dev: ena_dev->net_device, format: "Flow hash function %d isn't supported\n" , func); |
2316 | return -EOPNOTSUPP; |
2317 | } |
2318 | |
2319 | if ((func == ENA_ADMIN_TOEPLITZ) && key) { |
2320 | if (key_len != sizeof(hash_key->key)) { |
2321 | netdev_err(dev: ena_dev->net_device, |
2322 | format: "key len (%u) doesn't equal the supported size (%zu)\n" , key_len, |
2323 | sizeof(hash_key->key)); |
2324 | return -EINVAL; |
2325 | } |
2326 | memcpy(hash_key->key, key, key_len); |
2327 | hash_key->key_parts = key_len / sizeof(hash_key->key[0]); |
2328 | } |
2329 | |
2330 | rss->hash_init_val = init_val; |
2331 | old_func = rss->hash_func; |
2332 | rss->hash_func = func; |
2333 | rc = ena_com_set_hash_function(ena_dev); |
2334 | |
2335 | /* Restore the old function */ |
2336 | if (unlikely(rc)) |
2337 | rss->hash_func = old_func; |
2338 | |
2339 | return rc; |
2340 | } |
2341 | |
2342 | int ena_com_get_hash_function(struct ena_com_dev *ena_dev, |
2343 | enum ena_admin_hash_functions *func) |
2344 | { |
2345 | struct ena_rss * = &ena_dev->rss; |
2346 | struct ena_admin_get_feat_resp get_resp; |
2347 | int rc; |
2348 | |
2349 | if (unlikely(!func)) |
2350 | return -EINVAL; |
2351 | |
2352 | rc = ena_com_get_feature_ex(ena_dev, get_resp: &get_resp, |
2353 | feature_id: ENA_ADMIN_RSS_HASH_FUNCTION, |
2354 | control_buf_dma_addr: rss->hash_key_dma_addr, |
2355 | control_buff_size: sizeof(*rss->hash_key), feature_ver: 0); |
2356 | if (unlikely(rc)) |
2357 | return rc; |
2358 | |
2359 | /* ffs() returns 1 in case the lsb is set */ |
2360 | rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func); |
2361 | if (rss->hash_func) |
2362 | rss->hash_func--; |
2363 | |
2364 | *func = rss->hash_func; |
2365 | |
2366 | return 0; |
2367 | } |
2368 | |
2369 | int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key) |
2370 | { |
2371 | struct ena_admin_feature_rss_flow_hash_control *hash_key = |
2372 | ena_dev->rss.hash_key; |
2373 | |
2374 | if (key) |
2375 | memcpy(key, hash_key->key, |
2376 | (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0])); |
2377 | |
2378 | return 0; |
2379 | } |
2380 | |
2381 | int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, |
2382 | enum ena_admin_flow_hash_proto proto, |
2383 | u16 *fields) |
2384 | { |
2385 | struct ena_rss * = &ena_dev->rss; |
2386 | struct ena_admin_get_feat_resp get_resp; |
2387 | int rc; |
2388 | |
2389 | rc = ena_com_get_feature_ex(ena_dev, get_resp: &get_resp, |
2390 | feature_id: ENA_ADMIN_RSS_HASH_INPUT, |
2391 | control_buf_dma_addr: rss->hash_ctrl_dma_addr, |
2392 | control_buff_size: sizeof(*rss->hash_ctrl), feature_ver: 0); |
2393 | if (unlikely(rc)) |
2394 | return rc; |
2395 | |
2396 | if (fields) |
2397 | *fields = rss->hash_ctrl->selected_fields[proto].fields; |
2398 | |
2399 | return 0; |
2400 | } |
2401 | |
2402 | int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) |
2403 | { |
2404 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
2405 | struct ena_rss * = &ena_dev->rss; |
2406 | struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; |
2407 | struct ena_admin_set_feat_cmd cmd; |
2408 | struct ena_admin_set_feat_resp resp; |
2409 | int ret; |
2410 | |
2411 | if (!ena_com_check_supported_feature_id(ena_dev, feature_id: ENA_ADMIN_RSS_HASH_INPUT)) { |
2412 | netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n" , |
2413 | ENA_ADMIN_RSS_HASH_INPUT); |
2414 | return -EOPNOTSUPP; |
2415 | } |
2416 | |
2417 | memset(&cmd, 0x0, sizeof(cmd)); |
2418 | |
2419 | cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; |
2420 | cmd.aq_common_descriptor.flags = |
2421 | ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; |
2422 | cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT; |
2423 | cmd.u.flow_hash_input.enabled_input_sort = |
2424 | ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK | |
2425 | ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; |
2426 | |
2427 | ret = ena_com_mem_addr_set(ena_dev, |
2428 | ena_addr: &cmd.control_buffer.address, |
2429 | addr: rss->hash_ctrl_dma_addr); |
2430 | if (unlikely(ret)) { |
2431 | netdev_err(dev: ena_dev->net_device, format: "Memory address set failed\n" ); |
2432 | return ret; |
2433 | } |
2434 | cmd.control_buffer.length = sizeof(*hash_ctrl); |
2435 | |
2436 | ret = ena_com_execute_admin_command(admin_queue, |
2437 | cmd: (struct ena_admin_aq_entry *)&cmd, |
2438 | cmd_size: sizeof(cmd), |
2439 | comp: (struct ena_admin_acq_entry *)&resp, |
2440 | comp_size: sizeof(resp)); |
2441 | if (unlikely(ret)) |
2442 | netdev_err(dev: ena_dev->net_device, format: "Failed to set hash input. error: %d\n" , ret); |
2443 | |
2444 | return ret; |
2445 | } |
2446 | |
2447 | int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) |
2448 | { |
2449 | struct ena_rss * = &ena_dev->rss; |
2450 | struct ena_admin_feature_rss_hash_control *hash_ctrl = |
2451 | rss->hash_ctrl; |
2452 | u16 available_fields = 0; |
2453 | int rc, i; |
2454 | |
2455 | /* Get the supported hash input */ |
2456 | rc = ena_com_get_hash_ctrl(ena_dev, proto: 0, NULL); |
2457 | if (unlikely(rc)) |
2458 | return rc; |
2459 | |
2460 | hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = |
2461 | ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | |
2462 | ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; |
2463 | |
2464 | hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = |
2465 | ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | |
2466 | ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; |
2467 | |
2468 | hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = |
2469 | ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | |
2470 | ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; |
2471 | |
2472 | hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = |
2473 | ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | |
2474 | ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; |
2475 | |
2476 | hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = |
2477 | ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; |
2478 | |
2479 | hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = |
2480 | ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; |
2481 | |
2482 | hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = |
2483 | ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; |
2484 | |
2485 | hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = |
2486 | ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; |
2487 | |
2488 | for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { |
2489 | available_fields = hash_ctrl->selected_fields[i].fields & |
2490 | hash_ctrl->supported_fields[i].fields; |
2491 | if (available_fields != hash_ctrl->selected_fields[i].fields) { |
2492 | netdev_err(dev: ena_dev->net_device, |
2493 | format: "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n" , |
2494 | i, hash_ctrl->supported_fields[i].fields, |
2495 | hash_ctrl->selected_fields[i].fields); |
2496 | return -EOPNOTSUPP; |
2497 | } |
2498 | } |
2499 | |
2500 | rc = ena_com_set_hash_ctrl(ena_dev); |
2501 | |
2502 | /* In case of failure, restore the old hash ctrl */ |
2503 | if (unlikely(rc)) |
2504 | ena_com_get_hash_ctrl(ena_dev, proto: 0, NULL); |
2505 | |
2506 | return rc; |
2507 | } |
2508 | |
2509 | int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, |
2510 | enum ena_admin_flow_hash_proto proto, |
2511 | u16 hash_fields) |
2512 | { |
2513 | struct ena_rss * = &ena_dev->rss; |
2514 | struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; |
2515 | u16 supported_fields; |
2516 | int rc; |
2517 | |
2518 | if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { |
2519 | netdev_err(dev: ena_dev->net_device, format: "Invalid proto num (%u)\n" , proto); |
2520 | return -EINVAL; |
2521 | } |
2522 | |
2523 | /* Get the ctrl table */ |
2524 | rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL); |
2525 | if (unlikely(rc)) |
2526 | return rc; |
2527 | |
2528 | /* Make sure all the fields are supported */ |
2529 | supported_fields = hash_ctrl->supported_fields[proto].fields; |
2530 | if ((hash_fields & supported_fields) != hash_fields) { |
2531 | netdev_err(dev: ena_dev->net_device, |
2532 | format: "Proto %d doesn't support the required fields %x. supports only: %x\n" , |
2533 | proto, hash_fields, supported_fields); |
2534 | } |
2535 | |
2536 | hash_ctrl->selected_fields[proto].fields = hash_fields; |
2537 | |
2538 | rc = ena_com_set_hash_ctrl(ena_dev); |
2539 | |
2540 | /* In case of failure, restore the old hash ctrl */ |
2541 | if (unlikely(rc)) |
2542 | ena_com_get_hash_ctrl(ena_dev, proto: 0, NULL); |
2543 | |
2544 | return 0; |
2545 | } |
2546 | |
2547 | int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, |
2548 | u16 entry_idx, u16 entry_value) |
2549 | { |
2550 | struct ena_rss * = &ena_dev->rss; |
2551 | |
2552 | if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) |
2553 | return -EINVAL; |
2554 | |
2555 | if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES))) |
2556 | return -EINVAL; |
2557 | |
2558 | rss->host_rss_ind_tbl[entry_idx] = entry_value; |
2559 | |
2560 | return 0; |
2561 | } |
2562 | |
2563 | int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) |
2564 | { |
2565 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
2566 | struct ena_rss * = &ena_dev->rss; |
2567 | struct ena_admin_set_feat_cmd cmd; |
2568 | struct ena_admin_set_feat_resp resp; |
2569 | int ret; |
2570 | |
2571 | if (!ena_com_check_supported_feature_id(ena_dev, feature_id: ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) { |
2572 | netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n" , |
2573 | ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG); |
2574 | return -EOPNOTSUPP; |
2575 | } |
2576 | |
2577 | ret = ena_com_ind_tbl_convert_to_device(ena_dev); |
2578 | if (ret) { |
2579 | netdev_err(dev: ena_dev->net_device, |
2580 | format: "Failed to convert host indirection table to device table\n" ); |
2581 | return ret; |
2582 | } |
2583 | |
2584 | memset(&cmd, 0x0, sizeof(cmd)); |
2585 | |
2586 | cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; |
2587 | cmd.aq_common_descriptor.flags = |
2588 | ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; |
2589 | cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG; |
2590 | cmd.u.ind_table.size = rss->tbl_log_size; |
2591 | cmd.u.ind_table.inline_index = 0xFFFFFFFF; |
2592 | |
2593 | ret = ena_com_mem_addr_set(ena_dev, |
2594 | ena_addr: &cmd.control_buffer.address, |
2595 | addr: rss->rss_ind_tbl_dma_addr); |
2596 | if (unlikely(ret)) { |
2597 | netdev_err(dev: ena_dev->net_device, format: "Memory address set failed\n" ); |
2598 | return ret; |
2599 | } |
2600 | |
2601 | cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * |
2602 | sizeof(struct ena_admin_rss_ind_table_entry); |
2603 | |
2604 | ret = ena_com_execute_admin_command(admin_queue, |
2605 | cmd: (struct ena_admin_aq_entry *)&cmd, |
2606 | cmd_size: sizeof(cmd), |
2607 | comp: (struct ena_admin_acq_entry *)&resp, |
2608 | comp_size: sizeof(resp)); |
2609 | |
2610 | if (unlikely(ret)) |
2611 | netdev_err(dev: ena_dev->net_device, format: "Failed to set indirect table. error: %d\n" , ret); |
2612 | |
2613 | return ret; |
2614 | } |
2615 | |
2616 | int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) |
2617 | { |
2618 | struct ena_rss * = &ena_dev->rss; |
2619 | struct ena_admin_get_feat_resp get_resp; |
2620 | u32 tbl_size; |
2621 | int i, rc; |
2622 | |
2623 | tbl_size = (1ULL << rss->tbl_log_size) * |
2624 | sizeof(struct ena_admin_rss_ind_table_entry); |
2625 | |
2626 | rc = ena_com_get_feature_ex(ena_dev, get_resp: &get_resp, |
2627 | feature_id: ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, |
2628 | control_buf_dma_addr: rss->rss_ind_tbl_dma_addr, |
2629 | control_buff_size: tbl_size, feature_ver: 0); |
2630 | if (unlikely(rc)) |
2631 | return rc; |
2632 | |
2633 | if (!ind_tbl) |
2634 | return 0; |
2635 | |
2636 | for (i = 0; i < (1 << rss->tbl_log_size); i++) |
2637 | ind_tbl[i] = rss->host_rss_ind_tbl[i]; |
2638 | |
2639 | return 0; |
2640 | } |
2641 | |
2642 | int (struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) |
2643 | { |
2644 | int rc; |
2645 | |
2646 | memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); |
2647 | |
2648 | rc = ena_com_indirect_table_allocate(ena_dev, log_size: indr_tbl_log_size); |
2649 | if (unlikely(rc)) |
2650 | goto err_indr_tbl; |
2651 | |
2652 | /* The following function might return unsupported in case the |
2653 | * device doesn't support setting the key / hash function. We can safely |
2654 | * ignore this error and have indirection table support only. |
2655 | */ |
2656 | rc = ena_com_hash_key_allocate(ena_dev); |
2657 | if (likely(!rc)) |
2658 | ena_com_hash_key_fill_default_key(ena_dev); |
2659 | else if (rc != -EOPNOTSUPP) |
2660 | goto err_hash_key; |
2661 | |
2662 | rc = ena_com_hash_ctrl_init(ena_dev); |
2663 | if (unlikely(rc)) |
2664 | goto err_hash_ctrl; |
2665 | |
2666 | return 0; |
2667 | |
2668 | err_hash_ctrl: |
2669 | ena_com_hash_key_destroy(ena_dev); |
2670 | err_hash_key: |
2671 | ena_com_indirect_table_destroy(ena_dev); |
2672 | err_indr_tbl: |
2673 | |
2674 | return rc; |
2675 | } |
2676 | |
2677 | void (struct ena_com_dev *ena_dev) |
2678 | { |
2679 | ena_com_indirect_table_destroy(ena_dev); |
2680 | ena_com_hash_key_destroy(ena_dev); |
2681 | ena_com_hash_ctrl_destroy(ena_dev); |
2682 | |
2683 | memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); |
2684 | } |
2685 | |
2686 | int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) |
2687 | { |
2688 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; |
2689 | |
2690 | host_attr->host_info = dma_alloc_coherent(dev: ena_dev->dmadev, SZ_4K, |
2691 | dma_handle: &host_attr->host_info_dma_addr, GFP_KERNEL); |
2692 | if (unlikely(!host_attr->host_info)) |
2693 | return -ENOMEM; |
2694 | |
2695 | host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << |
2696 | ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | |
2697 | (ENA_COMMON_SPEC_VERSION_MINOR)); |
2698 | |
2699 | return 0; |
2700 | } |
2701 | |
2702 | int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, |
2703 | u32 debug_area_size) |
2704 | { |
2705 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; |
2706 | |
2707 | host_attr->debug_area_virt_addr = |
2708 | dma_alloc_coherent(dev: ena_dev->dmadev, size: debug_area_size, |
2709 | dma_handle: &host_attr->debug_area_dma_addr, GFP_KERNEL); |
2710 | if (unlikely(!host_attr->debug_area_virt_addr)) { |
2711 | host_attr->debug_area_size = 0; |
2712 | return -ENOMEM; |
2713 | } |
2714 | |
2715 | host_attr->debug_area_size = debug_area_size; |
2716 | |
2717 | return 0; |
2718 | } |
2719 | |
2720 | void ena_com_delete_host_info(struct ena_com_dev *ena_dev) |
2721 | { |
2722 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; |
2723 | |
2724 | if (host_attr->host_info) { |
2725 | dma_free_coherent(dev: ena_dev->dmadev, SZ_4K, cpu_addr: host_attr->host_info, |
2726 | dma_handle: host_attr->host_info_dma_addr); |
2727 | host_attr->host_info = NULL; |
2728 | } |
2729 | } |
2730 | |
2731 | void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) |
2732 | { |
2733 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; |
2734 | |
2735 | if (host_attr->debug_area_virt_addr) { |
2736 | dma_free_coherent(dev: ena_dev->dmadev, size: host_attr->debug_area_size, |
2737 | cpu_addr: host_attr->debug_area_virt_addr, dma_handle: host_attr->debug_area_dma_addr); |
2738 | host_attr->debug_area_virt_addr = NULL; |
2739 | } |
2740 | } |
2741 | |
2742 | int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) |
2743 | { |
2744 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; |
2745 | struct ena_com_admin_queue *admin_queue; |
2746 | struct ena_admin_set_feat_cmd cmd; |
2747 | struct ena_admin_set_feat_resp resp; |
2748 | |
2749 | int ret; |
2750 | |
2751 | /* Host attribute config is called before ena_com_get_dev_attr_feat |
2752 | * so ena_com can't check if the feature is supported. |
2753 | */ |
2754 | |
2755 | memset(&cmd, 0x0, sizeof(cmd)); |
2756 | admin_queue = &ena_dev->admin_queue; |
2757 | |
2758 | cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; |
2759 | cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG; |
2760 | |
2761 | ret = ena_com_mem_addr_set(ena_dev, |
2762 | ena_addr: &cmd.u.host_attr.debug_ba, |
2763 | addr: host_attr->debug_area_dma_addr); |
2764 | if (unlikely(ret)) { |
2765 | netdev_err(dev: ena_dev->net_device, format: "Memory address set failed\n" ); |
2766 | return ret; |
2767 | } |
2768 | |
2769 | ret = ena_com_mem_addr_set(ena_dev, |
2770 | ena_addr: &cmd.u.host_attr.os_info_ba, |
2771 | addr: host_attr->host_info_dma_addr); |
2772 | if (unlikely(ret)) { |
2773 | netdev_err(dev: ena_dev->net_device, format: "Memory address set failed\n" ); |
2774 | return ret; |
2775 | } |
2776 | |
2777 | cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; |
2778 | |
2779 | ret = ena_com_execute_admin_command(admin_queue, |
2780 | cmd: (struct ena_admin_aq_entry *)&cmd, |
2781 | cmd_size: sizeof(cmd), |
2782 | comp: (struct ena_admin_acq_entry *)&resp, |
2783 | comp_size: sizeof(resp)); |
2784 | |
2785 | if (unlikely(ret)) |
2786 | netdev_err(dev: ena_dev->net_device, format: "Failed to set host attributes: %d\n" , ret); |
2787 | |
2788 | return ret; |
2789 | } |
2790 | |
2791 | /* Interrupt moderation */ |
2792 | bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) |
2793 | { |
2794 | return ena_com_check_supported_feature_id(ena_dev, |
2795 | feature_id: ENA_ADMIN_INTERRUPT_MODERATION); |
2796 | } |
2797 | |
2798 | static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev, |
2799 | u32 coalesce_usecs, |
2800 | u32 intr_delay_resolution, |
2801 | u32 *intr_moder_interval) |
2802 | { |
2803 | if (!intr_delay_resolution) { |
2804 | netdev_err(dev: ena_dev->net_device, format: "Illegal interrupt delay granularity value\n" ); |
2805 | return -EFAULT; |
2806 | } |
2807 | |
2808 | *intr_moder_interval = coalesce_usecs / intr_delay_resolution; |
2809 | |
2810 | return 0; |
2811 | } |
2812 | |
2813 | int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, |
2814 | u32 tx_coalesce_usecs) |
2815 | { |
2816 | return ena_com_update_nonadaptive_moderation_interval(ena_dev, |
2817 | coalesce_usecs: tx_coalesce_usecs, |
2818 | intr_delay_resolution: ena_dev->intr_delay_resolution, |
2819 | intr_moder_interval: &ena_dev->intr_moder_tx_interval); |
2820 | } |
2821 | |
2822 | int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, |
2823 | u32 rx_coalesce_usecs) |
2824 | { |
2825 | return ena_com_update_nonadaptive_moderation_interval(ena_dev, |
2826 | coalesce_usecs: rx_coalesce_usecs, |
2827 | intr_delay_resolution: ena_dev->intr_delay_resolution, |
2828 | intr_moder_interval: &ena_dev->intr_moder_rx_interval); |
2829 | } |
2830 | |
2831 | int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) |
2832 | { |
2833 | struct ena_admin_get_feat_resp get_resp; |
2834 | u16 delay_resolution; |
2835 | int rc; |
2836 | |
2837 | rc = ena_com_get_feature(ena_dev, get_resp: &get_resp, |
2838 | feature_id: ENA_ADMIN_INTERRUPT_MODERATION, feature_ver: 0); |
2839 | |
2840 | if (rc) { |
2841 | if (rc == -EOPNOTSUPP) { |
2842 | netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n" , |
2843 | ENA_ADMIN_INTERRUPT_MODERATION); |
2844 | rc = 0; |
2845 | } else { |
2846 | netdev_err(dev: ena_dev->net_device, |
2847 | format: "Failed to get interrupt moderation admin cmd. rc: %d\n" , rc); |
2848 | } |
2849 | |
2850 | /* no moderation supported, disable adaptive support */ |
2851 | ena_com_disable_adaptive_moderation(ena_dev); |
2852 | return rc; |
2853 | } |
2854 | |
2855 | /* if moderation is supported by device we set adaptive moderation */ |
2856 | delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; |
2857 | ena_com_update_intr_delay_resolution(ena_dev, intr_delay_resolution: delay_resolution); |
2858 | |
2859 | /* Disable adaptive moderation by default - can be enabled later */ |
2860 | ena_com_disable_adaptive_moderation(ena_dev); |
2861 | |
2862 | return 0; |
2863 | } |
2864 | |
2865 | unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) |
2866 | { |
2867 | return ena_dev->intr_moder_tx_interval; |
2868 | } |
2869 | |
2870 | unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) |
2871 | { |
2872 | return ena_dev->intr_moder_rx_interval; |
2873 | } |
2874 | |
2875 | int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, |
2876 | struct ena_admin_feature_llq_desc *llq_features, |
2877 | struct ena_llq_configurations *llq_default_cfg) |
2878 | { |
2879 | struct ena_com_llq_info *llq_info = &ena_dev->llq_info; |
2880 | int rc; |
2881 | |
2882 | if (!llq_features->max_llq_num) { |
2883 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; |
2884 | return 0; |
2885 | } |
2886 | |
2887 | rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg); |
2888 | if (rc) |
2889 | return rc; |
2890 | |
2891 | ena_dev->tx_max_header_size = llq_info->desc_list_entry_size - |
2892 | (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); |
2893 | |
2894 | if (unlikely(ena_dev->tx_max_header_size == 0)) { |
2895 | netdev_err(dev: ena_dev->net_device, format: "The size of the LLQ entry is smaller than needed\n" ); |
2896 | return -EINVAL; |
2897 | } |
2898 | |
2899 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; |
2900 | |
2901 | return 0; |
2902 | } |
2903 | |