1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ |
3 | #ifndef _IDXD_H_ |
4 | #define _IDXD_H_ |
5 | |
6 | #include <linux/sbitmap.h> |
7 | #include <linux/dmaengine.h> |
8 | #include <linux/percpu-rwsem.h> |
9 | #include <linux/wait.h> |
10 | #include <linux/cdev.h> |
11 | #include <linux/idr.h> |
12 | #include <linux/pci.h> |
13 | #include <linux/bitmap.h> |
14 | #include <linux/perf_event.h> |
15 | #include <linux/iommu.h> |
16 | #include <linux/crypto.h> |
17 | #include <uapi/linux/idxd.h> |
18 | #include "registers.h" |
19 | |
20 | #define IDXD_DRIVER_VERSION "1.00" |
21 | |
22 | extern struct kmem_cache *idxd_desc_pool; |
23 | extern bool tc_override; |
24 | |
25 | struct idxd_wq; |
26 | struct idxd_dev; |
27 | |
28 | enum idxd_dev_type { |
29 | IDXD_DEV_NONE = -1, |
30 | IDXD_DEV_DSA = 0, |
31 | IDXD_DEV_IAX, |
32 | IDXD_DEV_WQ, |
33 | IDXD_DEV_GROUP, |
34 | IDXD_DEV_ENGINE, |
35 | IDXD_DEV_CDEV, |
36 | IDXD_DEV_CDEV_FILE, |
37 | IDXD_DEV_MAX_TYPE, |
38 | }; |
39 | |
40 | struct idxd_dev { |
41 | struct device conf_dev; |
42 | enum idxd_dev_type type; |
43 | }; |
44 | |
45 | #define IDXD_REG_TIMEOUT 50 |
46 | #define IDXD_DRAIN_TIMEOUT 5000 |
47 | |
48 | enum idxd_type { |
49 | IDXD_TYPE_UNKNOWN = -1, |
50 | IDXD_TYPE_DSA = 0, |
51 | IDXD_TYPE_IAX, |
52 | IDXD_TYPE_MAX, |
53 | }; |
54 | |
55 | #define IDXD_NAME_SIZE 128 |
56 | #define IDXD_PMU_EVENT_MAX 64 |
57 | |
58 | #define IDXD_ENQCMDS_RETRIES 32 |
59 | #define IDXD_ENQCMDS_MAX_RETRIES 64 |
60 | |
61 | enum idxd_complete_type { |
62 | IDXD_COMPLETE_NORMAL = 0, |
63 | IDXD_COMPLETE_ABORT, |
64 | IDXD_COMPLETE_DEV_FAIL, |
65 | }; |
66 | |
67 | struct idxd_desc; |
68 | |
69 | struct idxd_device_driver { |
70 | const char *name; |
71 | enum idxd_dev_type *type; |
72 | int (*probe)(struct idxd_dev *idxd_dev); |
73 | void (*remove)(struct idxd_dev *idxd_dev); |
74 | void (*desc_complete)(struct idxd_desc *desc, |
75 | enum idxd_complete_type comp_type, |
76 | bool free_desc, |
77 | void *ctx, u32 *status); |
78 | struct device_driver drv; |
79 | }; |
80 | |
81 | extern struct idxd_device_driver dsa_drv; |
82 | extern struct idxd_device_driver idxd_drv; |
83 | extern struct idxd_device_driver idxd_dmaengine_drv; |
84 | extern struct idxd_device_driver idxd_user_drv; |
85 | |
86 | #define INVALID_INT_HANDLE -1 |
87 | struct idxd_irq_entry { |
88 | int id; |
89 | int vector; |
90 | struct llist_head pending_llist; |
91 | struct list_head work_list; |
92 | /* |
93 | * Lock to protect access between irq thread process descriptor |
94 | * and irq thread processing error descriptor. |
95 | */ |
96 | spinlock_t list_lock; |
97 | int int_handle; |
98 | ioasid_t pasid; |
99 | }; |
100 | |
101 | struct idxd_group { |
102 | struct idxd_dev idxd_dev; |
103 | struct idxd_device *idxd; |
104 | struct grpcfg grpcfg; |
105 | int id; |
106 | int num_engines; |
107 | int num_wqs; |
108 | bool use_rdbuf_limit; |
109 | u8 rdbufs_allowed; |
110 | u8 rdbufs_reserved; |
111 | int tc_a; |
112 | int tc_b; |
113 | int desc_progress_limit; |
114 | int batch_progress_limit; |
115 | }; |
116 | |
117 | struct idxd_pmu { |
118 | struct idxd_device *idxd; |
119 | |
120 | struct perf_event *event_list[IDXD_PMU_EVENT_MAX]; |
121 | int n_events; |
122 | |
123 | DECLARE_BITMAP(used_mask, IDXD_PMU_EVENT_MAX); |
124 | |
125 | struct pmu pmu; |
126 | char name[IDXD_NAME_SIZE]; |
127 | int cpu; |
128 | |
129 | int n_counters; |
130 | int counter_width; |
131 | int n_event_categories; |
132 | |
133 | bool per_counter_caps_supported; |
134 | unsigned long supported_event_categories; |
135 | |
136 | unsigned long supported_filters; |
137 | int n_filters; |
138 | |
139 | struct hlist_node cpuhp_node; |
140 | }; |
141 | |
142 | #define IDXD_MAX_PRIORITY 0xf |
143 | |
144 | enum { |
145 | COUNTER_FAULTS = 0, |
146 | COUNTER_FAULT_FAILS, |
147 | COUNTER_MAX |
148 | }; |
149 | |
150 | enum idxd_wq_state { |
151 | IDXD_WQ_DISABLED = 0, |
152 | IDXD_WQ_ENABLED, |
153 | }; |
154 | |
155 | enum idxd_wq_flag { |
156 | WQ_FLAG_DEDICATED = 0, |
157 | WQ_FLAG_BLOCK_ON_FAULT, |
158 | WQ_FLAG_ATS_DISABLE, |
159 | WQ_FLAG_PRS_DISABLE, |
160 | }; |
161 | |
162 | enum idxd_wq_type { |
163 | IDXD_WQT_NONE = 0, |
164 | IDXD_WQT_KERNEL, |
165 | IDXD_WQT_USER, |
166 | }; |
167 | |
168 | struct idxd_cdev { |
169 | struct idxd_wq *wq; |
170 | struct cdev cdev; |
171 | struct idxd_dev idxd_dev; |
172 | int minor; |
173 | }; |
174 | |
175 | #define DRIVER_NAME_SIZE 128 |
176 | |
177 | #define IDXD_ALLOCATED_BATCH_SIZE 128U |
178 | #define WQ_NAME_SIZE 1024 |
179 | #define WQ_TYPE_SIZE 10 |
180 | |
181 | #define WQ_DEFAULT_QUEUE_DEPTH 16 |
182 | #define WQ_DEFAULT_MAX_XFER SZ_2M |
183 | #define WQ_DEFAULT_MAX_BATCH 32 |
184 | |
185 | enum idxd_op_type { |
186 | IDXD_OP_BLOCK = 0, |
187 | IDXD_OP_NONBLOCK = 1, |
188 | }; |
189 | |
190 | struct idxd_dma_chan { |
191 | struct dma_chan chan; |
192 | struct idxd_wq *wq; |
193 | }; |
194 | |
195 | struct idxd_wq { |
196 | void __iomem *portal; |
197 | u32 portal_offset; |
198 | unsigned int enqcmds_retries; |
199 | struct percpu_ref wq_active; |
200 | struct completion wq_dead; |
201 | struct completion wq_resurrect; |
202 | struct idxd_dev idxd_dev; |
203 | struct idxd_cdev *idxd_cdev; |
204 | struct wait_queue_head err_queue; |
205 | struct workqueue_struct *wq; |
206 | struct idxd_device *idxd; |
207 | int id; |
208 | struct idxd_irq_entry ie; |
209 | enum idxd_wq_type type; |
210 | struct idxd_group *group; |
211 | int client_count; |
212 | struct mutex wq_lock; /* mutex for workqueue */ |
213 | u32 size; |
214 | u32 threshold; |
215 | u32 priority; |
216 | enum idxd_wq_state state; |
217 | unsigned long flags; |
218 | union wqcfg *wqcfg; |
219 | unsigned long *opcap_bmap; |
220 | |
221 | struct dsa_hw_desc **hw_descs; |
222 | int num_descs; |
223 | union { |
224 | struct dsa_completion_record *compls; |
225 | struct iax_completion_record *iax_compls; |
226 | }; |
227 | dma_addr_t compls_addr; |
228 | int compls_size; |
229 | struct idxd_desc **descs; |
230 | struct sbitmap_queue sbq; |
231 | struct idxd_dma_chan *idxd_chan; |
232 | char name[WQ_NAME_SIZE + 1]; |
233 | u64 max_xfer_bytes; |
234 | u32 max_batch_size; |
235 | |
236 | /* Lock to protect upasid_xa access. */ |
237 | struct mutex uc_lock; |
238 | struct xarray upasid_xa; |
239 | |
240 | char driver_name[DRIVER_NAME_SIZE + 1]; |
241 | }; |
242 | |
243 | struct idxd_engine { |
244 | struct idxd_dev idxd_dev; |
245 | int id; |
246 | struct idxd_group *group; |
247 | struct idxd_device *idxd; |
248 | }; |
249 | |
250 | /* shadow registers */ |
251 | struct idxd_hw { |
252 | u32 version; |
253 | union gen_cap_reg gen_cap; |
254 | union wq_cap_reg wq_cap; |
255 | union group_cap_reg group_cap; |
256 | union engine_cap_reg engine_cap; |
257 | struct opcap opcap; |
258 | u32 cmd_cap; |
259 | union iaa_cap_reg iaa_cap; |
260 | }; |
261 | |
262 | enum idxd_device_state { |
263 | IDXD_DEV_HALTED = -1, |
264 | IDXD_DEV_DISABLED = 0, |
265 | IDXD_DEV_ENABLED, |
266 | }; |
267 | |
268 | enum idxd_device_flag { |
269 | IDXD_FLAG_CONFIGURABLE = 0, |
270 | IDXD_FLAG_CMD_RUNNING, |
271 | IDXD_FLAG_PASID_ENABLED, |
272 | IDXD_FLAG_USER_PASID_ENABLED, |
273 | }; |
274 | |
275 | struct idxd_dma_dev { |
276 | struct idxd_device *idxd; |
277 | struct dma_device dma; |
278 | }; |
279 | |
280 | typedef int (*load_device_defaults_fn_t) (struct idxd_device *idxd); |
281 | |
282 | struct idxd_driver_data { |
283 | const char *name_prefix; |
284 | enum idxd_type type; |
285 | const struct device_type *dev_type; |
286 | int compl_size; |
287 | int align; |
288 | int evl_cr_off; |
289 | int cr_status_off; |
290 | int cr_result_off; |
291 | load_device_defaults_fn_t load_device_defaults; |
292 | }; |
293 | |
294 | struct idxd_evl { |
295 | /* Lock to protect event log access. */ |
296 | spinlock_t lock; |
297 | void *log; |
298 | dma_addr_t dma; |
299 | /* Total size of event log = number of entries * entry size. */ |
300 | unsigned int log_size; |
301 | /* The number of entries in the event log. */ |
302 | u16 size; |
303 | unsigned long *bmap; |
304 | bool batch_fail[IDXD_MAX_BATCH_IDENT]; |
305 | }; |
306 | |
307 | struct idxd_evl_fault { |
308 | struct work_struct work; |
309 | struct idxd_wq *wq; |
310 | u8 status; |
311 | |
312 | /* make this last member always */ |
313 | struct __evl_entry entry[]; |
314 | }; |
315 | |
316 | struct idxd_device { |
317 | struct idxd_dev idxd_dev; |
318 | struct idxd_driver_data *data; |
319 | struct list_head list; |
320 | struct idxd_hw hw; |
321 | enum idxd_device_state state; |
322 | unsigned long flags; |
323 | int id; |
324 | int major; |
325 | u32 cmd_status; |
326 | struct idxd_irq_entry ie; /* misc irq, msix 0 */ |
327 | |
328 | struct pci_dev *pdev; |
329 | void __iomem *reg_base; |
330 | |
331 | spinlock_t dev_lock; /* spinlock for device */ |
332 | spinlock_t cmd_lock; /* spinlock for device commands */ |
333 | struct completion *cmd_done; |
334 | struct idxd_group **groups; |
335 | struct idxd_wq **wqs; |
336 | struct idxd_engine **engines; |
337 | |
338 | struct iommu_sva *sva; |
339 | unsigned int pasid; |
340 | |
341 | int num_groups; |
342 | int irq_cnt; |
343 | bool request_int_handles; |
344 | |
345 | u32 msix_perm_offset; |
346 | u32 wqcfg_offset; |
347 | u32 grpcfg_offset; |
348 | u32 perfmon_offset; |
349 | |
350 | u64 max_xfer_bytes; |
351 | u32 max_batch_size; |
352 | int max_groups; |
353 | int max_engines; |
354 | int max_rdbufs; |
355 | int max_wqs; |
356 | int max_wq_size; |
357 | int rdbuf_limit; |
358 | int nr_rdbufs; /* non-reserved read buffers */ |
359 | unsigned int wqcfg_size; |
360 | unsigned long *wq_enable_map; |
361 | |
362 | union sw_err_reg sw_err; |
363 | wait_queue_head_t cmd_waitq; |
364 | |
365 | struct idxd_dma_dev *idxd_dma; |
366 | struct workqueue_struct *wq; |
367 | struct work_struct work; |
368 | |
369 | struct idxd_pmu *idxd_pmu; |
370 | |
371 | unsigned long *opcap_bmap; |
372 | struct idxd_evl *evl; |
373 | struct kmem_cache *evl_cache; |
374 | |
375 | struct dentry *dbgfs_dir; |
376 | struct dentry *dbgfs_evl_file; |
377 | }; |
378 | |
379 | static inline unsigned int evl_ent_size(struct idxd_device *idxd) |
380 | { |
381 | return idxd->hw.gen_cap.evl_support ? |
382 | (32 * (1 << idxd->hw.gen_cap.evl_support)) : 0; |
383 | } |
384 | |
385 | static inline unsigned int evl_size(struct idxd_device *idxd) |
386 | { |
387 | return idxd->evl->size * evl_ent_size(idxd); |
388 | } |
389 | |
390 | struct crypto_ctx { |
391 | struct acomp_req *req; |
392 | struct crypto_tfm *tfm; |
393 | dma_addr_t src_addr; |
394 | dma_addr_t dst_addr; |
395 | bool compress; |
396 | }; |
397 | |
398 | /* IDXD software descriptor */ |
399 | struct idxd_desc { |
400 | union { |
401 | struct dsa_hw_desc *hw; |
402 | struct iax_hw_desc *iax_hw; |
403 | }; |
404 | dma_addr_t desc_dma; |
405 | union { |
406 | struct dsa_completion_record *completion; |
407 | struct iax_completion_record *iax_completion; |
408 | }; |
409 | dma_addr_t compl_dma; |
410 | union { |
411 | struct dma_async_tx_descriptor txd; |
412 | struct crypto_ctx crypto; |
413 | }; |
414 | struct llist_node llnode; |
415 | struct list_head list; |
416 | int id; |
417 | int cpu; |
418 | struct idxd_wq *wq; |
419 | }; |
420 | |
421 | /* |
422 | * This is software defined error for the completion status. We overload the error code |
423 | * that will never appear in completion status and only SWERR register. |
424 | */ |
425 | enum idxd_completion_status { |
426 | IDXD_COMP_DESC_ABORT = 0xff, |
427 | }; |
428 | |
429 | #define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev |
430 | #define wq_confdev(wq) &wq->idxd_dev.conf_dev |
431 | #define engine_confdev(engine) &engine->idxd_dev.conf_dev |
432 | #define group_confdev(group) &group->idxd_dev.conf_dev |
433 | #define cdev_dev(cdev) &cdev->idxd_dev.conf_dev |
434 | #define user_ctx_dev(ctx) (&(ctx)->idxd_dev.conf_dev) |
435 | |
436 | #define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev) |
437 | #define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev) |
438 | #define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev) |
439 | |
440 | static inline struct idxd_device_driver *wq_to_idxd_drv(struct idxd_wq *wq) |
441 | { |
442 | struct device *dev = wq_confdev(wq); |
443 | struct idxd_device_driver *idxd_drv = |
444 | container_of(dev->driver, struct idxd_device_driver, drv); |
445 | |
446 | return idxd_drv; |
447 | } |
448 | |
449 | static inline struct idxd_device *confdev_to_idxd(struct device *dev) |
450 | { |
451 | struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); |
452 | |
453 | return idxd_dev_to_idxd(idxd_dev); |
454 | } |
455 | |
456 | static inline struct idxd_wq *confdev_to_wq(struct device *dev) |
457 | { |
458 | struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); |
459 | |
460 | return idxd_dev_to_wq(idxd_dev); |
461 | } |
462 | |
463 | static inline struct idxd_engine *confdev_to_engine(struct device *dev) |
464 | { |
465 | struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); |
466 | |
467 | return container_of(idxd_dev, struct idxd_engine, idxd_dev); |
468 | } |
469 | |
470 | static inline struct idxd_group *confdev_to_group(struct device *dev) |
471 | { |
472 | struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); |
473 | |
474 | return container_of(idxd_dev, struct idxd_group, idxd_dev); |
475 | } |
476 | |
477 | static inline struct idxd_cdev *dev_to_cdev(struct device *dev) |
478 | { |
479 | struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); |
480 | |
481 | return container_of(idxd_dev, struct idxd_cdev, idxd_dev); |
482 | } |
483 | |
484 | static inline void idxd_dev_set_type(struct idxd_dev *idev, int type) |
485 | { |
486 | if (type >= IDXD_DEV_MAX_TYPE) { |
487 | idev->type = IDXD_DEV_NONE; |
488 | return; |
489 | } |
490 | |
491 | idev->type = type; |
492 | } |
493 | |
494 | static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx) |
495 | { |
496 | return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie; |
497 | } |
498 | |
499 | static inline struct idxd_wq *ie_to_wq(struct idxd_irq_entry *ie) |
500 | { |
501 | return container_of(ie, struct idxd_wq, ie); |
502 | } |
503 | |
504 | static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie) |
505 | { |
506 | return container_of(ie, struct idxd_device, ie); |
507 | } |
508 | |
509 | static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable) |
510 | { |
511 | union gencfg_reg reg; |
512 | |
513 | reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); |
514 | reg.user_int_en = enable; |
515 | iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); |
516 | } |
517 | |
518 | extern const struct bus_type dsa_bus_type; |
519 | |
520 | extern bool support_enqcmd; |
521 | extern struct ida idxd_ida; |
522 | extern const struct device_type dsa_device_type; |
523 | extern const struct device_type iax_device_type; |
524 | extern const struct device_type idxd_wq_device_type; |
525 | extern const struct device_type idxd_engine_device_type; |
526 | extern const struct device_type idxd_group_device_type; |
527 | |
528 | static inline bool is_dsa_dev(struct idxd_dev *idxd_dev) |
529 | { |
530 | return idxd_dev->type == IDXD_DEV_DSA; |
531 | } |
532 | |
533 | static inline bool is_iax_dev(struct idxd_dev *idxd_dev) |
534 | { |
535 | return idxd_dev->type == IDXD_DEV_IAX; |
536 | } |
537 | |
538 | static inline bool is_idxd_dev(struct idxd_dev *idxd_dev) |
539 | { |
540 | return is_dsa_dev(idxd_dev) || is_iax_dev(idxd_dev); |
541 | } |
542 | |
543 | static inline bool is_idxd_wq_dev(struct idxd_dev *idxd_dev) |
544 | { |
545 | return idxd_dev->type == IDXD_DEV_WQ; |
546 | } |
547 | |
548 | static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) |
549 | { |
550 | if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine" ) == 0) |
551 | return true; |
552 | return false; |
553 | } |
554 | |
555 | static inline bool is_idxd_wq_user(struct idxd_wq *wq) |
556 | { |
557 | return wq->type == IDXD_WQT_USER; |
558 | } |
559 | |
560 | static inline bool is_idxd_wq_kernel(struct idxd_wq *wq) |
561 | { |
562 | return wq->type == IDXD_WQT_KERNEL; |
563 | } |
564 | |
565 | static inline bool wq_dedicated(struct idxd_wq *wq) |
566 | { |
567 | return test_bit(WQ_FLAG_DEDICATED, &wq->flags); |
568 | } |
569 | |
570 | static inline bool wq_shared(struct idxd_wq *wq) |
571 | { |
572 | return !test_bit(WQ_FLAG_DEDICATED, &wq->flags); |
573 | } |
574 | |
575 | static inline bool device_pasid_enabled(struct idxd_device *idxd) |
576 | { |
577 | return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); |
578 | } |
579 | |
580 | static inline bool device_user_pasid_enabled(struct idxd_device *idxd) |
581 | { |
582 | return test_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); |
583 | } |
584 | |
585 | static inline bool wq_pasid_enabled(struct idxd_wq *wq) |
586 | { |
587 | return (is_idxd_wq_kernel(wq) && device_pasid_enabled(idxd: wq->idxd)) || |
588 | (is_idxd_wq_user(wq) && device_user_pasid_enabled(idxd: wq->idxd)); |
589 | } |
590 | |
591 | static inline bool wq_shared_supported(struct idxd_wq *wq) |
592 | { |
593 | return (support_enqcmd && wq_pasid_enabled(wq)); |
594 | } |
595 | |
596 | enum idxd_portal_prot { |
597 | IDXD_PORTAL_UNLIMITED = 0, |
598 | IDXD_PORTAL_LIMITED, |
599 | }; |
600 | |
601 | enum idxd_interrupt_type { |
602 | IDXD_IRQ_MSIX = 0, |
603 | IDXD_IRQ_IMS, |
604 | }; |
605 | |
606 | static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot) |
607 | { |
608 | return prot * 0x1000; |
609 | } |
610 | |
611 | static inline int idxd_get_wq_portal_full_offset(int wq_id, |
612 | enum idxd_portal_prot prot) |
613 | { |
614 | return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot); |
615 | } |
616 | |
617 | #define IDXD_PORTAL_MASK (PAGE_SIZE - 1) |
618 | |
619 | /* |
620 | * Even though this function can be accessed by multiple threads, it is safe to use. |
621 | * At worst the address gets used more than once before it gets incremented. We don't |
622 | * hit a threshold until iops becomes many million times a second. So the occasional |
623 | * reuse of the same address is tolerable compare to using an atomic variable. This is |
624 | * safe on a system that has atomic load/store for 32bit integers. Given that this is an |
625 | * Intel iEP device, that should not be a problem. |
626 | */ |
627 | static inline void __iomem *idxd_wq_portal_addr(struct idxd_wq *wq) |
628 | { |
629 | int ofs = wq->portal_offset; |
630 | |
631 | wq->portal_offset = (ofs + sizeof(struct dsa_raw_desc)) & IDXD_PORTAL_MASK; |
632 | return wq->portal + ofs; |
633 | } |
634 | |
635 | static inline void idxd_wq_get(struct idxd_wq *wq) |
636 | { |
637 | wq->client_count++; |
638 | } |
639 | |
640 | static inline void idxd_wq_put(struct idxd_wq *wq) |
641 | { |
642 | wq->client_count--; |
643 | } |
644 | |
645 | static inline int idxd_wq_refcount(struct idxd_wq *wq) |
646 | { |
647 | return wq->client_count; |
648 | }; |
649 | |
650 | static inline void idxd_wq_set_private(struct idxd_wq *wq, void *private) |
651 | { |
652 | dev_set_drvdata(wq_confdev(wq), data: private); |
653 | } |
654 | |
655 | static inline void *idxd_wq_get_private(struct idxd_wq *wq) |
656 | { |
657 | return dev_get_drvdata(wq_confdev(wq)); |
658 | } |
659 | |
660 | /* |
661 | * Intel IAA does not support batch processing. |
662 | * The max batch size of device, max batch size of wq and |
663 | * max batch shift of wqcfg should be always 0 on IAA. |
664 | */ |
665 | static inline void idxd_set_max_batch_size(int idxd_type, struct idxd_device *idxd, |
666 | u32 max_batch_size) |
667 | { |
668 | if (idxd_type == IDXD_TYPE_IAX) |
669 | idxd->max_batch_size = 0; |
670 | else |
671 | idxd->max_batch_size = max_batch_size; |
672 | } |
673 | |
674 | static inline void idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq, |
675 | u32 max_batch_size) |
676 | { |
677 | if (idxd_type == IDXD_TYPE_IAX) |
678 | wq->max_batch_size = 0; |
679 | else |
680 | wq->max_batch_size = max_batch_size; |
681 | } |
682 | |
683 | static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wqcfg, |
684 | u32 max_batch_shift) |
685 | { |
686 | if (idxd_type == IDXD_TYPE_IAX) |
687 | wqcfg->max_batch_shift = 0; |
688 | else |
689 | wqcfg->max_batch_shift = max_batch_shift; |
690 | } |
691 | |
692 | static inline int idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *dev) |
693 | { |
694 | return (strncmp(wq->driver_name, dev->driver->name, strlen(dev->driver->name)) == 0); |
695 | } |
696 | |
697 | #define MODULE_ALIAS_IDXD_DEVICE(type) MODULE_ALIAS("idxd:t" __stringify(type) "*") |
698 | #define IDXD_DEVICES_MODALIAS_FMT "idxd:t%d" |
699 | |
700 | int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv, |
701 | struct module *module, const char *mod_name); |
702 | #define idxd_driver_register(driver) \ |
703 | __idxd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) |
704 | |
705 | void idxd_driver_unregister(struct idxd_device_driver *idxd_drv); |
706 | |
707 | #define module_idxd_driver(__idxd_driver) \ |
708 | module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister) |
709 | |
710 | void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); |
711 | void idxd_dma_complete_txd(struct idxd_desc *desc, |
712 | enum idxd_complete_type comp_type, |
713 | bool free_desc, void *ctx, u32 *status); |
714 | |
715 | static inline void idxd_desc_complete(struct idxd_desc *desc, |
716 | enum idxd_complete_type comp_type, |
717 | bool free_desc) |
718 | { |
719 | struct idxd_device_driver *drv; |
720 | u32 status; |
721 | |
722 | drv = wq_to_idxd_drv(wq: desc->wq); |
723 | if (drv->desc_complete) |
724 | drv->desc_complete(desc, comp_type, free_desc, |
725 | &desc->txd, &status); |
726 | } |
727 | |
728 | int idxd_register_bus_type(void); |
729 | void idxd_unregister_bus_type(void); |
730 | int idxd_register_devices(struct idxd_device *idxd); |
731 | void idxd_unregister_devices(struct idxd_device *idxd); |
732 | void idxd_wqs_quiesce(struct idxd_device *idxd); |
733 | bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc); |
734 | void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count); |
735 | int idxd_load_iaa_device_defaults(struct idxd_device *idxd); |
736 | |
737 | /* device interrupt control */ |
738 | irqreturn_t idxd_misc_thread(int vec, void *data); |
739 | irqreturn_t idxd_wq_thread(int irq, void *data); |
740 | void idxd_mask_error_interrupts(struct idxd_device *idxd); |
741 | void idxd_unmask_error_interrupts(struct idxd_device *idxd); |
742 | |
743 | /* device control */ |
744 | int idxd_device_drv_probe(struct idxd_dev *idxd_dev); |
745 | void idxd_device_drv_remove(struct idxd_dev *idxd_dev); |
746 | int idxd_drv_enable_wq(struct idxd_wq *wq); |
747 | void idxd_drv_disable_wq(struct idxd_wq *wq); |
748 | int idxd_device_init_reset(struct idxd_device *idxd); |
749 | int idxd_device_enable(struct idxd_device *idxd); |
750 | int idxd_device_disable(struct idxd_device *idxd); |
751 | void idxd_device_reset(struct idxd_device *idxd); |
752 | void idxd_device_clear_state(struct idxd_device *idxd); |
753 | int idxd_device_config(struct idxd_device *idxd); |
754 | void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid); |
755 | int idxd_device_load_config(struct idxd_device *idxd); |
756 | int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle, |
757 | enum idxd_interrupt_type irq_type); |
758 | int idxd_device_release_int_handle(struct idxd_device *idxd, int handle, |
759 | enum idxd_interrupt_type irq_type); |
760 | |
761 | /* work queue control */ |
762 | void idxd_wqs_unmap_portal(struct idxd_device *idxd); |
763 | int idxd_wq_alloc_resources(struct idxd_wq *wq); |
764 | void idxd_wq_free_resources(struct idxd_wq *wq); |
765 | int idxd_wq_enable(struct idxd_wq *wq); |
766 | int idxd_wq_disable(struct idxd_wq *wq, bool reset_config); |
767 | void idxd_wq_drain(struct idxd_wq *wq); |
768 | void idxd_wq_reset(struct idxd_wq *wq); |
769 | int idxd_wq_map_portal(struct idxd_wq *wq); |
770 | void idxd_wq_unmap_portal(struct idxd_wq *wq); |
771 | int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid); |
772 | int idxd_wq_disable_pasid(struct idxd_wq *wq); |
773 | void __idxd_wq_quiesce(struct idxd_wq *wq); |
774 | void idxd_wq_quiesce(struct idxd_wq *wq); |
775 | int idxd_wq_init_percpu_ref(struct idxd_wq *wq); |
776 | void idxd_wq_free_irq(struct idxd_wq *wq); |
777 | int idxd_wq_request_irq(struct idxd_wq *wq); |
778 | |
779 | /* submission */ |
780 | int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); |
781 | struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype); |
782 | int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc); |
783 | |
784 | /* dmaengine */ |
785 | int idxd_register_dma_device(struct idxd_device *idxd); |
786 | void idxd_unregister_dma_device(struct idxd_device *idxd); |
787 | |
788 | /* cdev */ |
789 | int idxd_cdev_register(void); |
790 | void idxd_cdev_remove(void); |
791 | int idxd_cdev_get_major(struct idxd_device *idxd); |
792 | int idxd_wq_add_cdev(struct idxd_wq *wq); |
793 | void idxd_wq_del_cdev(struct idxd_wq *wq); |
794 | int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr, |
795 | void *buf, int len); |
796 | void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index); |
797 | |
798 | /* perfmon */ |
799 | #if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON) |
800 | int perfmon_pmu_init(struct idxd_device *idxd); |
801 | void perfmon_pmu_remove(struct idxd_device *idxd); |
802 | void perfmon_counter_overflow(struct idxd_device *idxd); |
803 | void perfmon_init(void); |
804 | void perfmon_exit(void); |
805 | #else |
806 | static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; } |
807 | static inline void perfmon_pmu_remove(struct idxd_device *idxd) {} |
808 | static inline void perfmon_counter_overflow(struct idxd_device *idxd) {} |
809 | static inline void perfmon_init(void) {} |
810 | static inline void perfmon_exit(void) {} |
811 | #endif |
812 | |
813 | /* debugfs */ |
814 | int idxd_device_init_debugfs(struct idxd_device *idxd); |
815 | void idxd_device_remove_debugfs(struct idxd_device *idxd); |
816 | int idxd_init_debugfs(void); |
817 | void idxd_remove_debugfs(void); |
818 | |
819 | #endif |
820 | |