1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Marvell MCS driver |
3 | * |
4 | * Copyright (C) 2022 Marvell. |
5 | */ |
6 | |
7 | #include "mcs.h" |
8 | #include "mcs_reg.h" |
9 | |
10 | static struct mcs_ops cnf10kb_mcs_ops = { |
11 | .mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities, |
12 | .mcs_parser_cfg = cnf10kb_mcs_parser_cfg, |
13 | .mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write, |
14 | .mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write, |
15 | .mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map, |
16 | .mcs_bbe_intr_handler = cnf10kb_mcs_bbe_intr_handler, |
17 | .mcs_pab_intr_handler = cnf10kb_mcs_pab_intr_handler, |
18 | }; |
19 | |
20 | struct mcs_ops *cnf10kb_get_mac_ops(void) |
21 | { |
22 | return &cnf10kb_mcs_ops; |
23 | } |
24 | |
25 | void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs) |
26 | { |
27 | struct hwinfo *hw = mcs->hw; |
28 | |
29 | hw->tcam_entries = 64; /* TCAM entries */ |
30 | hw->secy_entries = 64; /* SecY entries */ |
31 | hw->sc_entries = 64; /* SC CAM entries */ |
32 | hw->sa_entries = 128; /* SA entries */ |
33 | hw->lmac_cnt = 4; /* lmacs/ports per mcs block */ |
34 | hw->mcs_x2p_intf = 1; /* x2p clabration intf */ |
35 | hw->mcs_blks = 7; /* MCS blocks */ |
36 | hw->ip_vec = MCS_CNF10KB_INT_VEC_IP; /* IP vector */ |
37 | } |
38 | |
39 | void cnf10kb_mcs_parser_cfg(struct mcs *mcs) |
40 | { |
41 | u64 reg, val; |
42 | |
43 | /* VLAN Ctag */ |
44 | val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22); |
45 | |
46 | reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0); |
47 | mcs_reg_write(mcs, offset: reg, val); |
48 | |
49 | reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0); |
50 | mcs_reg_write(mcs, offset: reg, val); |
51 | |
52 | /* VLAN STag */ |
53 | val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23); |
54 | |
55 | /* RX */ |
56 | reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1); |
57 | mcs_reg_write(mcs, offset: reg, val); |
58 | |
59 | /* TX */ |
60 | reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1); |
61 | mcs_reg_write(mcs, offset: reg, val); |
62 | |
63 | /* Enable custom tage 0 and 1 and sectag */ |
64 | val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12); |
65 | |
66 | reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE; |
67 | mcs_reg_write(mcs, offset: reg, val); |
68 | |
69 | reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE; |
70 | mcs_reg_write(mcs, offset: reg, val); |
71 | } |
72 | |
73 | void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir) |
74 | { |
75 | u64 reg, val; |
76 | |
77 | val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6; |
78 | if (dir == MCS_RX) { |
79 | reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id); |
80 | } else { |
81 | reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id); |
82 | mcs_reg_write(mcs, offset: reg, val: map->sci); |
83 | val |= (map->sc & 0x3F) << 7; |
84 | reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id); |
85 | } |
86 | |
87 | mcs_reg_write(mcs, offset: reg, val); |
88 | } |
89 | |
90 | void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map) |
91 | { |
92 | u64 reg, val; |
93 | |
94 | val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7; |
95 | |
96 | reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id); |
97 | mcs_reg_write(mcs, offset: reg, val); |
98 | |
99 | reg = MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0; |
100 | val = mcs_reg_read(mcs, offset: reg); |
101 | |
102 | if (map->rekey_ena) |
103 | val |= BIT_ULL(map->sc_id); |
104 | else |
105 | val &= ~BIT_ULL(map->sc_id); |
106 | |
107 | mcs_reg_write(mcs, offset: reg, val); |
108 | |
109 | mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), val: map->sa_index0_vld); |
110 | mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), val: map->sa_index1_vld); |
111 | |
112 | mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), val: map->tx_sa_active); |
113 | } |
114 | |
115 | void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map) |
116 | { |
117 | u64 val, reg; |
118 | |
119 | val = (map->sa_index & 0x7F) | (map->sa_in_use << 7); |
120 | |
121 | reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an); |
122 | mcs_reg_write(mcs, offset: reg, val); |
123 | } |
124 | |
125 | int mcs_set_force_clk_en(struct mcs *mcs, bool set) |
126 | { |
127 | unsigned long timeout = jiffies + usecs_to_jiffies(u: 2000); |
128 | u64 val; |
129 | |
130 | val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL); |
131 | |
132 | if (set) { |
133 | val |= BIT_ULL(4); |
134 | mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val); |
135 | |
136 | /* Poll till mcsx_mil_ip_gbl_status.mcs_ip_stats_ready value is 1 */ |
137 | while (!(mcs_reg_read(mcs, MCSX_MIL_IP_GBL_STATUS) & BIT_ULL(0))) { |
138 | if (time_after(jiffies, timeout)) { |
139 | dev_err(mcs->dev, "MCS set force clk enable failed\n" ); |
140 | break; |
141 | } |
142 | } |
143 | } else { |
144 | val &= ~BIT_ULL(4); |
145 | mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val); |
146 | } |
147 | |
148 | return 0; |
149 | } |
150 | |
151 | /* TX SA interrupt is raised only if autorekey is enabled. |
152 | * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if |
153 | * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies |
154 | * SA in SA_index1 got expired else SA in SA_index0 got expired. |
155 | */ |
156 | void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs) |
157 | { |
158 | struct mcs_intr_event event; |
159 | struct rsrc_bmap *sc_bmap; |
160 | unsigned long rekey_ena; |
161 | u64 val, sa_status; |
162 | int sc; |
163 | |
164 | sc_bmap = &mcs->tx.sc; |
165 | |
166 | event.mcs_id = mcs->mcs_id; |
167 | event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT; |
168 | |
169 | rekey_ena = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0); |
170 | |
171 | for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) { |
172 | /* Auto rekey is enable */ |
173 | if (!test_bit(sc, &rekey_ena)) |
174 | continue; |
175 | sa_status = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(sc)); |
176 | /* Check if tx_sa_active status had changed */ |
177 | if (sa_status == mcs->tx_sa_active[sc]) |
178 | continue; |
179 | |
180 | /* SA_index0 is expired */ |
181 | val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc)); |
182 | if (sa_status) |
183 | event.sa_id = val & 0x7F; |
184 | else |
185 | event.sa_id = (val >> 7) & 0x7F; |
186 | |
187 | event.pcifunc = mcs->tx.sa2pf_map[event.sa_id]; |
188 | mcs_add_intr_wq_entry(mcs, event: &event); |
189 | } |
190 | } |
191 | |
192 | void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs) |
193 | { |
194 | struct mcs_intr_event event = { 0 }; |
195 | struct rsrc_bmap *sc_bmap; |
196 | u64 val; |
197 | int sc; |
198 | |
199 | sc_bmap = &mcs->tx.sc; |
200 | |
201 | event.mcs_id = mcs->mcs_id; |
202 | event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT; |
203 | |
204 | for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) { |
205 | val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc)); |
206 | |
207 | if (mcs->tx_sa_active[sc]) |
208 | /* SA_index1 was used and got expired */ |
209 | event.sa_id = (val >> 7) & 0x7F; |
210 | else |
211 | /* SA_index0 was used and got expired */ |
212 | event.sa_id = val & 0x7F; |
213 | |
214 | event.pcifunc = mcs->tx.sa2pf_map[event.sa_id]; |
215 | mcs_add_intr_wq_entry(mcs, event: &event); |
216 | } |
217 | } |
218 | |
219 | void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, |
220 | enum mcs_direction dir) |
221 | { |
222 | struct mcs_intr_event event = { 0 }; |
223 | int i; |
224 | |
225 | if (!(intr & MCS_BBE_INT_MASK)) |
226 | return; |
227 | |
228 | event.mcs_id = mcs->mcs_id; |
229 | event.pcifunc = mcs->pf_map[0]; |
230 | |
231 | for (i = 0; i < MCS_MAX_BBE_INT; i++) { |
232 | if (!(intr & BIT_ULL(i))) |
233 | continue; |
234 | |
235 | /* Lower nibble denotes data fifo overflow interrupts and |
236 | * upper nibble indicates policy fifo overflow interrupts. |
237 | */ |
238 | if (intr & 0xFULL) |
239 | event.intr_mask = (dir == MCS_RX) ? |
240 | MCS_BBE_RX_DFIFO_OVERFLOW_INT : |
241 | MCS_BBE_TX_DFIFO_OVERFLOW_INT; |
242 | else |
243 | event.intr_mask = (dir == MCS_RX) ? |
244 | MCS_BBE_RX_PLFIFO_OVERFLOW_INT : |
245 | MCS_BBE_TX_PLFIFO_OVERFLOW_INT; |
246 | |
247 | /* Notify the lmac_id info which ran into BBE fatal error */ |
248 | event.lmac_id = i & 0x3ULL; |
249 | mcs_add_intr_wq_entry(mcs, event: &event); |
250 | } |
251 | } |
252 | |
253 | void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, |
254 | enum mcs_direction dir) |
255 | { |
256 | struct mcs_intr_event event = { 0 }; |
257 | int i; |
258 | |
259 | if (!(intr & MCS_PAB_INT_MASK)) |
260 | return; |
261 | |
262 | event.mcs_id = mcs->mcs_id; |
263 | event.pcifunc = mcs->pf_map[0]; |
264 | |
265 | for (i = 0; i < MCS_MAX_PAB_INT; i++) { |
266 | if (!(intr & BIT_ULL(i))) |
267 | continue; |
268 | |
269 | event.intr_mask = (dir == MCS_RX) ? |
270 | MCS_PAB_RX_CHAN_OVERFLOW_INT : |
271 | MCS_PAB_TX_CHAN_OVERFLOW_INT; |
272 | |
273 | /* Notify the lmac_id info which ran into PAB fatal error */ |
274 | event.lmac_id = i; |
275 | mcs_add_intr_wq_entry(mcs, event: &event); |
276 | } |
277 | } |
278 | |