1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/* Copyright (c) 2018-2021, Mellanox Technologies inc. All rights reserved. */
3
4#ifndef __LIB_MLX5_EQ_H__
5#define __LIB_MLX5_EQ_H__
6#include <linux/mlx5/driver.h>
7#include <linux/mlx5/eq.h>
8#include <linux/mlx5/cq.h>
9
10#define MLX5_EQE_SIZE (sizeof(struct mlx5_eqe))
11
12struct mlx5_eq_tasklet {
13 struct list_head list;
14 struct list_head process_list;
15 struct tasklet_struct task;
16 spinlock_t lock; /* lock completion tasklet list */
17};
18
19struct mlx5_cq_table {
20 spinlock_t lock; /* protect radix tree */
21 struct radix_tree_root tree;
22};
23
24struct mlx5_eq {
25 struct mlx5_frag_buf_ctrl fbc;
26 struct mlx5_frag_buf frag_buf;
27 struct mlx5_core_dev *dev;
28 struct mlx5_cq_table cq_table;
29 __be32 __iomem *doorbell;
30 u32 cons_index;
31 unsigned int vecidx;
32 unsigned int irqn;
33 u8 eqn;
34 struct mlx5_rsc_debug *dbg;
35 struct mlx5_irq *irq;
36};
37
38struct mlx5_eq_async {
39 struct mlx5_eq core;
40 struct notifier_block irq_nb;
41 spinlock_t lock; /* To avoid irq EQ handle races with resiliency flows */
42};
43
44struct mlx5_eq_comp {
45 struct mlx5_eq core;
46 struct notifier_block irq_nb;
47 struct mlx5_eq_tasklet tasklet_ctx;
48 struct list_head list;
49};
50
51static inline u32 eq_get_size(struct mlx5_eq *eq)
52{
53 return eq->fbc.sz_m1 + 1;
54}
55
56static inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
57{
58 return mlx5_frag_buf_get_wqe(fbc: &eq->fbc, ix: entry);
59}
60
61static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
62{
63 struct mlx5_eqe *eqe = get_eqe(eq, entry: eq->cons_index & eq->fbc.sz_m1);
64
65 return (eqe->owner ^ (eq->cons_index >> eq->fbc.log_sz)) & 1 ? NULL : eqe;
66}
67
68static inline void eq_update_ci(struct mlx5_eq *eq, int arm)
69{
70 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
71 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
72
73 __raw_writel(val: (__force u32)cpu_to_be32(val), addr);
74 /* We still want ordering, just not swabbing, so add a barrier */
75 mb();
76}
77
78int mlx5_eq_table_init(struct mlx5_core_dev *dev);
79void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev);
80int mlx5_eq_table_create(struct mlx5_core_dev *dev);
81void mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
82
83int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
84void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
85struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
86struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
87void mlx5_cq_tasklet_cb(struct tasklet_struct *t);
88
89u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
90void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev);
91void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev);
92void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev);
93
94int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
95void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
96void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
97void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
98
99/* This function should only be called after mlx5_cmd_force_teardown_hca */
100void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
101
102#ifdef CONFIG_RFS_ACCEL
103struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
104#endif
105
106int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
107
108#endif
109

source code of linux/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h