1 | /* |
2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
21 | * SOFTWARE. |
22 | * |
23 | * Authors: |
24 | * Kevin Tian <kevin.tian@intel.com> |
25 | * Eddie Dong <eddie.dong@intel.com> |
26 | * |
27 | * Contributors: |
28 | * Niu Bing <bing.niu@intel.com> |
29 | * Zhi Wang <zhi.a.wang@intel.com> |
30 | * |
31 | */ |
32 | |
33 | #ifndef _GVT_H_ |
34 | #define _GVT_H_ |
35 | |
36 | #include <uapi/linux/pci_regs.h> |
37 | #include <linux/vfio.h> |
38 | #include <linux/mdev.h> |
39 | |
40 | #include <asm/kvm_page_track.h> |
41 | |
42 | #include "gt/intel_gt.h" |
43 | #include "intel_gvt.h" |
44 | |
45 | #include "debug.h" |
46 | #include "mmio.h" |
47 | #include "reg.h" |
48 | #include "interrupt.h" |
49 | #include "gtt.h" |
50 | #include "display.h" |
51 | #include "edid.h" |
52 | #include "execlist.h" |
53 | #include "scheduler.h" |
54 | #include "sched_policy.h" |
55 | #include "mmio_context.h" |
56 | #include "cmd_parser.h" |
57 | #include "fb_decoder.h" |
58 | #include "dmabuf.h" |
59 | #include "page_track.h" |
60 | |
61 | #define GVT_MAX_VGPU 8 |
62 | |
63 | struct engine_mmio; |
64 | |
65 | /* Describe per-platform limitations. */ |
66 | struct intel_gvt_device_info { |
67 | u32 max_support_vgpus; |
68 | u32 cfg_space_size; |
69 | u32 mmio_size; |
70 | u32 mmio_bar; |
71 | unsigned long msi_cap_offset; |
72 | u32 gtt_start_offset; |
73 | u32 gtt_entry_size; |
74 | u32 gtt_entry_size_shift; |
75 | int gmadr_bytes_in_cmd; |
76 | u32 max_surface_size; |
77 | }; |
78 | |
79 | /* GM resources owned by a vGPU */ |
80 | struct intel_vgpu_gm { |
81 | u64 aperture_sz; |
82 | u64 hidden_sz; |
83 | struct drm_mm_node low_gm_node; |
84 | struct drm_mm_node high_gm_node; |
85 | }; |
86 | |
87 | #define INTEL_GVT_MAX_NUM_FENCES 32 |
88 | |
89 | /* Fences owned by a vGPU */ |
90 | struct intel_vgpu_fence { |
91 | struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; |
92 | u32 size; |
93 | }; |
94 | |
95 | struct intel_vgpu_mmio { |
96 | void *vreg; |
97 | }; |
98 | |
99 | #define INTEL_GVT_MAX_BAR_NUM 4 |
100 | |
101 | struct intel_vgpu_pci_bar { |
102 | u64 size; |
103 | bool tracked; |
104 | }; |
105 | |
106 | struct intel_vgpu_cfg_space { |
107 | unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE]; |
108 | struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM]; |
109 | u32 pmcsr_off; |
110 | }; |
111 | |
112 | #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) |
113 | |
114 | struct intel_vgpu_irq { |
115 | bool irq_warn_once[INTEL_GVT_EVENT_MAX]; |
116 | DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES], |
117 | INTEL_GVT_EVENT_MAX); |
118 | }; |
119 | |
120 | struct intel_vgpu_opregion { |
121 | void *va; |
122 | u32 gfn[INTEL_GVT_OPREGION_PAGES]; |
123 | }; |
124 | |
125 | #define vgpu_opregion(vgpu) (&(vgpu->opregion)) |
126 | |
127 | struct intel_vgpu_display { |
128 | struct intel_vgpu_i2c_edid i2c_edid; |
129 | struct intel_vgpu_port ports[I915_MAX_PORTS]; |
130 | struct intel_vgpu_sbi sbi; |
131 | enum port port_num; |
132 | }; |
133 | |
134 | struct vgpu_sched_ctl { |
135 | int weight; |
136 | }; |
137 | |
138 | enum { |
139 | INTEL_VGPU_EXECLIST_SUBMISSION = 1, |
140 | INTEL_VGPU_GUC_SUBMISSION, |
141 | }; |
142 | |
143 | struct intel_vgpu_submission_ops { |
144 | const char *name; |
145 | int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); |
146 | void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); |
147 | void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); |
148 | }; |
149 | |
150 | struct intel_vgpu_submission { |
151 | struct intel_vgpu_execlist execlist[I915_NUM_ENGINES]; |
152 | struct list_head workload_q_head[I915_NUM_ENGINES]; |
153 | struct intel_context *shadow[I915_NUM_ENGINES]; |
154 | struct kmem_cache *workloads; |
155 | atomic_t running_workload_num; |
156 | union { |
157 | u64 i915_context_pml4; |
158 | u64 i915_context_pdps[GEN8_3LVL_PDPES]; |
159 | }; |
160 | DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); |
161 | DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); |
162 | void *ring_scan_buffer[I915_NUM_ENGINES]; |
163 | int ring_scan_buffer_size[I915_NUM_ENGINES]; |
164 | const struct intel_vgpu_submission_ops *ops; |
165 | int virtual_submission_interface; |
166 | bool active; |
167 | struct { |
168 | u32 lrca; |
169 | bool valid; |
170 | u64 ring_context_gpa; |
171 | } last_ctx[I915_NUM_ENGINES]; |
172 | }; |
173 | |
174 | #define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries" |
175 | |
176 | enum { |
177 | INTEL_VGPU_STATUS_ATTACHED = 0, |
178 | INTEL_VGPU_STATUS_ACTIVE, |
179 | INTEL_VGPU_STATUS_NR_BITS, |
180 | }; |
181 | |
182 | struct intel_vgpu { |
183 | struct vfio_device vfio_device; |
184 | struct intel_gvt *gvt; |
185 | struct mutex vgpu_lock; |
186 | int id; |
187 | DECLARE_BITMAP(status, INTEL_VGPU_STATUS_NR_BITS); |
188 | bool pv_notified; |
189 | bool failsafe; |
190 | unsigned int resetting_eng; |
191 | |
192 | /* Both sched_data and sched_ctl can be seen a part of the global gvt |
193 | * scheduler structure. So below 2 vgpu data are protected |
194 | * by sched_lock, not vgpu_lock. |
195 | */ |
196 | void *sched_data; |
197 | struct vgpu_sched_ctl sched_ctl; |
198 | |
199 | struct intel_vgpu_fence fence; |
200 | struct intel_vgpu_gm gm; |
201 | struct intel_vgpu_cfg_space cfg_space; |
202 | struct intel_vgpu_mmio mmio; |
203 | struct intel_vgpu_irq irq; |
204 | struct intel_vgpu_gtt gtt; |
205 | struct intel_vgpu_opregion opregion; |
206 | struct intel_vgpu_display display; |
207 | struct intel_vgpu_submission submission; |
208 | struct radix_tree_root page_track_tree; |
209 | u32 hws_pga[I915_NUM_ENGINES]; |
210 | /* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */ |
211 | bool d3_entered; |
212 | |
213 | struct dentry *debugfs; |
214 | |
215 | struct list_head dmabuf_obj_list_head; |
216 | struct mutex dmabuf_lock; |
217 | struct idr object_idr; |
218 | struct intel_vgpu_vblank_timer vblank_timer; |
219 | |
220 | u32 scan_nonprivbb; |
221 | |
222 | struct vfio_region *region; |
223 | int num_regions; |
224 | struct eventfd_ctx *msi_trigger; |
225 | |
226 | /* |
227 | * Two caches are used to avoid mapping duplicated pages (eg. |
228 | * scratch pages). This help to reduce dma setup overhead. |
229 | */ |
230 | struct rb_root gfn_cache; |
231 | struct rb_root dma_addr_cache; |
232 | unsigned long nr_cache_entries; |
233 | struct mutex cache_lock; |
234 | |
235 | struct kvm_page_track_notifier_node track_node; |
236 | #define NR_BKT (1 << 18) |
237 | struct hlist_head ptable[NR_BKT]; |
238 | #undef NR_BKT |
239 | }; |
240 | |
241 | /* validating GM healthy status*/ |
242 | #define vgpu_is_vm_unhealthy(ret_val) \ |
243 | (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT)) |
244 | |
245 | struct intel_gvt_gm { |
246 | unsigned long vgpu_allocated_low_gm_size; |
247 | unsigned long vgpu_allocated_high_gm_size; |
248 | }; |
249 | |
250 | struct intel_gvt_fence { |
251 | unsigned long vgpu_allocated_fence_num; |
252 | }; |
253 | |
254 | /* Special MMIO blocks. */ |
255 | struct gvt_mmio_block { |
256 | i915_reg_t offset; |
257 | unsigned int size; |
258 | gvt_mmio_func read; |
259 | gvt_mmio_func write; |
260 | }; |
261 | |
262 | #define INTEL_GVT_MMIO_HASH_BITS 11 |
263 | |
264 | struct intel_gvt_mmio { |
265 | u16 *mmio_attribute; |
266 | /* Register contains RO bits */ |
267 | #define F_RO (1 << 0) |
268 | /* Register contains graphics address */ |
269 | #define F_GMADR (1 << 1) |
270 | /* Mode mask registers with high 16 bits as the mask bits */ |
271 | #define F_MODE_MASK (1 << 2) |
272 | /* This reg can be accessed by GPU commands */ |
273 | #define F_CMD_ACCESS (1 << 3) |
274 | /* This reg has been accessed by a VM */ |
275 | #define F_ACCESSED (1 << 4) |
276 | /* This reg requires save & restore during host PM suspend/resume */ |
277 | #define F_PM_SAVE (1 << 5) |
278 | /* This reg could be accessed by unaligned address */ |
279 | #define F_UNALIGN (1 << 6) |
280 | /* This reg is in GVT's mmio save-restor list and in hardware |
281 | * logical context image |
282 | */ |
283 | #define F_SR_IN_CTX (1 << 7) |
284 | /* Value of command write of this reg needs to be patched */ |
285 | #define F_CMD_WRITE_PATCH (1 << 8) |
286 | |
287 | struct gvt_mmio_block *mmio_block; |
288 | unsigned int num_mmio_block; |
289 | |
290 | DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); |
291 | unsigned long num_tracked_mmio; |
292 | }; |
293 | |
294 | struct intel_gvt_firmware { |
295 | void *cfg_space; |
296 | void *mmio; |
297 | bool firmware_loaded; |
298 | }; |
299 | |
300 | struct intel_vgpu_config { |
301 | unsigned int low_mm; |
302 | unsigned int high_mm; |
303 | unsigned int fence; |
304 | |
305 | /* |
306 | * A vGPU with a weight of 8 will get twice as much GPU as a vGPU with |
307 | * a weight of 4 on a contended host, different vGPU type has different |
308 | * weight set. Legal weights range from 1 to 16. |
309 | */ |
310 | unsigned int weight; |
311 | enum intel_vgpu_edid edid; |
312 | const char *name; |
313 | }; |
314 | |
315 | struct intel_vgpu_type { |
316 | struct mdev_type type; |
317 | char name[16]; |
318 | const struct intel_vgpu_config *conf; |
319 | }; |
320 | |
321 | struct intel_gvt { |
322 | /* GVT scope lock, protect GVT itself, and all resource currently |
323 | * not yet protected by special locks(vgpu and scheduler lock). |
324 | */ |
325 | struct mutex lock; |
326 | /* scheduler scope lock, protect gvt and vgpu schedule related data */ |
327 | struct mutex sched_lock; |
328 | |
329 | struct intel_gt *gt; |
330 | struct idr vgpu_idr; /* vGPU IDR pool */ |
331 | |
332 | struct intel_gvt_device_info device_info; |
333 | struct intel_gvt_gm gm; |
334 | struct intel_gvt_fence fence; |
335 | struct intel_gvt_mmio mmio; |
336 | struct intel_gvt_firmware firmware; |
337 | struct intel_gvt_irq irq; |
338 | struct intel_gvt_gtt gtt; |
339 | struct intel_gvt_workload_scheduler scheduler; |
340 | struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES]; |
341 | DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); |
342 | struct mdev_parent parent; |
343 | struct mdev_type **mdev_types; |
344 | struct intel_vgpu_type *types; |
345 | unsigned int num_types; |
346 | struct intel_vgpu *idle_vgpu; |
347 | |
348 | struct task_struct *service_thread; |
349 | wait_queue_head_t service_thread_wq; |
350 | |
351 | /* service_request is always used in bit operation, we should always |
352 | * use it with atomic bit ops so that no need to use gvt big lock. |
353 | */ |
354 | unsigned long service_request; |
355 | |
356 | struct { |
357 | struct engine_mmio *mmio; |
358 | int ctx_mmio_count[I915_NUM_ENGINES]; |
359 | u32 *tlb_mmio_offset_list; |
360 | u32 tlb_mmio_offset_list_cnt; |
361 | u32 *mocs_mmio_offset_list; |
362 | u32 mocs_mmio_offset_list_cnt; |
363 | } engine_mmio_list; |
364 | bool is_reg_whitelist_updated; |
365 | |
366 | struct dentry *debugfs_root; |
367 | }; |
368 | |
369 | enum { |
370 | /* Scheduling trigger by timer */ |
371 | INTEL_GVT_REQUEST_SCHED = 0, |
372 | |
373 | /* Scheduling trigger by event */ |
374 | INTEL_GVT_REQUEST_EVENT_SCHED = 1, |
375 | |
376 | /* per-vGPU vblank emulation request */ |
377 | INTEL_GVT_REQUEST_EMULATE_VBLANK = 2, |
378 | INTEL_GVT_REQUEST_EMULATE_VBLANK_MAX = INTEL_GVT_REQUEST_EMULATE_VBLANK |
379 | + GVT_MAX_VGPU, |
380 | }; |
381 | |
382 | static inline void intel_gvt_request_service(struct intel_gvt *gvt, |
383 | int service) |
384 | { |
385 | set_bit(nr: service, addr: (void *)&gvt->service_request); |
386 | wake_up(&gvt->service_thread_wq); |
387 | } |
388 | |
389 | void intel_gvt_free_firmware(struct intel_gvt *gvt); |
390 | int intel_gvt_load_firmware(struct intel_gvt *gvt); |
391 | |
392 | /* Aperture/GM space definitions for GVT device */ |
393 | #define MB_TO_BYTES(mb) ((mb) << 20ULL) |
394 | #define BYTES_TO_MB(b) ((b) >> 20ULL) |
395 | |
396 | #define HOST_LOW_GM_SIZE MB_TO_BYTES(128) |
397 | #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) |
398 | #define HOST_FENCE 4 |
399 | |
400 | #define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt) |
401 | |
402 | /* Aperture/GM space definitions for GVT device */ |
403 | #define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end |
404 | #define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start |
405 | |
406 | #define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total |
407 | #define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3) |
408 | #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) |
409 | |
410 | #define gvt_aperture_gmadr_base(gvt) (0) |
411 | #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \ |
412 | + gvt_aperture_sz(gvt) - 1) |
413 | |
414 | #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \ |
415 | + gvt_aperture_sz(gvt)) |
416 | #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \ |
417 | + gvt_hidden_sz(gvt) - 1) |
418 | |
419 | #define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences) |
420 | |
421 | /* Aperture/GM space definitions for vGPU */ |
422 | #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) |
423 | #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start) |
424 | #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz) |
425 | #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz) |
426 | |
427 | #define vgpu_aperture_pa_base(vgpu) \ |
428 | (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu)) |
429 | |
430 | #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz) |
431 | |
432 | #define vgpu_aperture_pa_end(vgpu) \ |
433 | (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) |
434 | |
435 | #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu)) |
436 | #define vgpu_aperture_gmadr_end(vgpu) \ |
437 | (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) |
438 | |
439 | #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu)) |
440 | #define vgpu_hidden_gmadr_end(vgpu) \ |
441 | (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1) |
442 | |
443 | #define vgpu_fence_sz(vgpu) (vgpu->fence.size) |
444 | |
445 | /* ring context size i.e. the first 0x50 dwords*/ |
446 | #define RING_CTX_SIZE 320 |
447 | |
448 | int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, |
449 | const struct intel_vgpu_config *conf); |
450 | void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); |
451 | void intel_vgpu_free_resource(struct intel_vgpu *vgpu); |
452 | void intel_vgpu_write_fence(struct intel_vgpu *vgpu, |
453 | u32 fence, u64 value); |
454 | |
455 | /* Macros for easily accessing vGPU virtual/shadow register. |
456 | Explicitly seperate use for typed MMIO reg or real offset.*/ |
457 | #define vgpu_vreg_t(vgpu, reg) \ |
458 | (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg))) |
459 | #define vgpu_vreg(vgpu, offset) \ |
460 | (*(u32 *)(vgpu->mmio.vreg + (offset))) |
461 | #define vgpu_vreg64_t(vgpu, reg) \ |
462 | (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg))) |
463 | #define vgpu_vreg64(vgpu, offset) \ |
464 | (*(u64 *)(vgpu->mmio.vreg + (offset))) |
465 | |
466 | #define for_each_active_vgpu(gvt, vgpu, id) \ |
467 | idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ |
468 | for_each_if(test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status)) |
469 | |
470 | static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, |
471 | u32 offset, u32 val, bool low) |
472 | { |
473 | u32 *pval; |
474 | |
475 | /* BAR offset should be 32 bits algiend */ |
476 | offset = rounddown(offset, 4); |
477 | pval = (u32 *)(vgpu_cfg_space(vgpu) + offset); |
478 | |
479 | if (low) { |
480 | /* |
481 | * only update bit 31 - bit 4, |
482 | * leave the bit 3 - bit 0 unchanged. |
483 | */ |
484 | *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0)); |
485 | } else { |
486 | *pval = val; |
487 | } |
488 | } |
489 | |
490 | int intel_gvt_init_vgpu_types(struct intel_gvt *gvt); |
491 | void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); |
492 | |
493 | struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt); |
494 | void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu); |
495 | int intel_gvt_create_vgpu(struct intel_vgpu *vgpu, |
496 | const struct intel_vgpu_config *conf); |
497 | void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); |
498 | void intel_gvt_release_vgpu(struct intel_vgpu *vgpu); |
499 | void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, |
500 | intel_engine_mask_t engine_mask); |
501 | void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); |
502 | void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu); |
503 | void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu); |
504 | |
505 | int intel_gvt_set_opregion(struct intel_vgpu *vgpu); |
506 | int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num); |
507 | |
508 | /* validating GM functions */ |
509 | #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ |
510 | ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \ |
511 | (gmadr <= vgpu_aperture_gmadr_end(vgpu))) |
512 | |
513 | #define vgpu_gmadr_is_hidden(vgpu, gmadr) \ |
514 | ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \ |
515 | (gmadr <= vgpu_hidden_gmadr_end(vgpu))) |
516 | |
517 | #define vgpu_gmadr_is_valid(vgpu, gmadr) \ |
518 | ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \ |
519 | (vgpu_gmadr_is_hidden(vgpu, gmadr)))) |
520 | |
521 | #define gvt_gmadr_is_aperture(gvt, gmadr) \ |
522 | ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \ |
523 | (gmadr <= gvt_aperture_gmadr_end(gvt))) |
524 | |
525 | #define gvt_gmadr_is_hidden(gvt, gmadr) \ |
526 | ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \ |
527 | (gmadr <= gvt_hidden_gmadr_end(gvt))) |
528 | |
529 | #define gvt_gmadr_is_valid(gvt, gmadr) \ |
530 | (gvt_gmadr_is_aperture(gvt, gmadr) || \ |
531 | gvt_gmadr_is_hidden(gvt, gmadr)) |
532 | |
533 | bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size); |
534 | int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr); |
535 | int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr); |
536 | int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, |
537 | unsigned long *h_index); |
538 | int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, |
539 | unsigned long *g_index); |
540 | |
541 | void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, |
542 | bool primary); |
543 | void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); |
544 | |
545 | int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, |
546 | void *p_data, unsigned int bytes); |
547 | |
548 | int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, |
549 | void *p_data, unsigned int bytes); |
550 | |
551 | void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected); |
552 | |
553 | static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar) |
554 | { |
555 | /* We are 64bit bar. */ |
556 | return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & |
557 | PCI_BASE_ADDRESS_MEM_MASK; |
558 | } |
559 | |
560 | void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); |
561 | int intel_vgpu_init_opregion(struct intel_vgpu *vgpu); |
562 | int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa); |
563 | |
564 | int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); |
565 | void populate_pvinfo_page(struct intel_vgpu *vgpu); |
566 | |
567 | int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); |
568 | void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason); |
569 | void intel_vgpu_detach_regions(struct intel_vgpu *vgpu); |
570 | |
571 | enum { |
572 | GVT_FAILSAFE_UNSUPPORTED_GUEST, |
573 | GVT_FAILSAFE_INSUFFICIENT_RESOURCE, |
574 | GVT_FAILSAFE_GUEST_ERR, |
575 | }; |
576 | |
577 | static inline void mmio_hw_access_pre(struct intel_gt *gt) |
578 | { |
579 | intel_runtime_pm_get(rpm: gt->uncore->rpm); |
580 | } |
581 | |
582 | static inline void mmio_hw_access_post(struct intel_gt *gt) |
583 | { |
584 | intel_runtime_pm_put_unchecked(rpm: gt->uncore->rpm); |
585 | } |
586 | |
587 | /** |
588 | * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed |
589 | * @gvt: a GVT device |
590 | * @offset: register offset |
591 | * |
592 | */ |
593 | static inline void intel_gvt_mmio_set_accessed( |
594 | struct intel_gvt *gvt, unsigned int offset) |
595 | { |
596 | gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED; |
597 | } |
598 | |
599 | /** |
600 | * intel_gvt_mmio_is_cmd_accessible - if a MMIO could be accessed by command |
601 | * @gvt: a GVT device |
602 | * @offset: register offset |
603 | * |
604 | * Returns: |
605 | * True if an MMIO is able to be accessed by GPU commands |
606 | */ |
607 | static inline bool intel_gvt_mmio_is_cmd_accessible( |
608 | struct intel_gvt *gvt, unsigned int offset) |
609 | { |
610 | return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS; |
611 | } |
612 | |
613 | /** |
614 | * intel_gvt_mmio_set_cmd_accessible - |
615 | * mark a MMIO could be accessible by command |
616 | * @gvt: a GVT device |
617 | * @offset: register offset |
618 | * |
619 | */ |
620 | static inline void intel_gvt_mmio_set_cmd_accessible( |
621 | struct intel_gvt *gvt, unsigned int offset) |
622 | { |
623 | gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS; |
624 | } |
625 | |
626 | /** |
627 | * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned |
628 | * @gvt: a GVT device |
629 | * @offset: register offset |
630 | * |
631 | */ |
632 | static inline bool intel_gvt_mmio_is_unalign( |
633 | struct intel_gvt *gvt, unsigned int offset) |
634 | { |
635 | return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN; |
636 | } |
637 | |
638 | /** |
639 | * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask |
640 | * @gvt: a GVT device |
641 | * @offset: register offset |
642 | * |
643 | * Returns: |
644 | * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't. |
645 | * |
646 | */ |
647 | static inline bool intel_gvt_mmio_has_mode_mask( |
648 | struct intel_gvt *gvt, unsigned int offset) |
649 | { |
650 | return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK; |
651 | } |
652 | |
653 | /** |
654 | * intel_gvt_mmio_is_sr_in_ctx - |
655 | * check if an MMIO has F_SR_IN_CTX mask |
656 | * @gvt: a GVT device |
657 | * @offset: register offset |
658 | * |
659 | * Returns: |
660 | * True if an MMIO has an F_SR_IN_CTX mask, false if it isn't. |
661 | * |
662 | */ |
663 | static inline bool intel_gvt_mmio_is_sr_in_ctx( |
664 | struct intel_gvt *gvt, unsigned int offset) |
665 | { |
666 | return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX; |
667 | } |
668 | |
669 | /** |
670 | * intel_gvt_mmio_set_sr_in_ctx - |
671 | * mask an MMIO in GVT's mmio save-restore list and also |
672 | * in hardware logical context image |
673 | * @gvt: a GVT device |
674 | * @offset: register offset |
675 | * |
676 | */ |
677 | static inline void intel_gvt_mmio_set_sr_in_ctx( |
678 | struct intel_gvt *gvt, unsigned int offset) |
679 | { |
680 | gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX; |
681 | } |
682 | |
683 | void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); |
684 | /** |
685 | * intel_gvt_mmio_set_cmd_write_patch - |
686 | * mark an MMIO if its cmd write needs to be |
687 | * patched |
688 | * @gvt: a GVT device |
689 | * @offset: register offset |
690 | * |
691 | */ |
692 | static inline void intel_gvt_mmio_set_cmd_write_patch( |
693 | struct intel_gvt *gvt, unsigned int offset) |
694 | { |
695 | gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH; |
696 | } |
697 | |
698 | /** |
699 | * intel_gvt_mmio_is_cmd_write_patch - check if an mmio's cmd access needs to |
700 | * be patched |
701 | * @gvt: a GVT device |
702 | * @offset: register offset |
703 | * |
704 | * Returns: |
705 | * True if GPU commmand write to an MMIO should be patched |
706 | */ |
707 | static inline bool intel_gvt_mmio_is_cmd_write_patch( |
708 | struct intel_gvt *gvt, unsigned int offset) |
709 | { |
710 | return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH; |
711 | } |
712 | |
713 | /** |
714 | * intel_gvt_read_gpa - copy data from GPA to host data buffer |
715 | * @vgpu: a vGPU |
716 | * @gpa: guest physical address |
717 | * @buf: host data buffer |
718 | * @len: data length |
719 | * |
720 | * Returns: |
721 | * Zero on success, negative error code if failed. |
722 | */ |
723 | static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, |
724 | void *buf, unsigned long len) |
725 | { |
726 | if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) |
727 | return -ESRCH; |
728 | return vfio_dma_rw(device: &vgpu->vfio_device, iova: gpa, data: buf, len, write: false); |
729 | } |
730 | |
731 | /** |
732 | * intel_gvt_write_gpa - copy data from host data buffer to GPA |
733 | * @vgpu: a vGPU |
734 | * @gpa: guest physical address |
735 | * @buf: host data buffer |
736 | * @len: data length |
737 | * |
738 | * Returns: |
739 | * Zero on success, negative error code if failed. |
740 | */ |
741 | static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu, |
742 | unsigned long gpa, void *buf, unsigned long len) |
743 | { |
744 | if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) |
745 | return -ESRCH; |
746 | return vfio_dma_rw(device: &vgpu->vfio_device, iova: gpa, data: buf, len, write: true); |
747 | } |
748 | |
749 | void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); |
750 | void intel_gvt_debugfs_init(struct intel_gvt *gvt); |
751 | void intel_gvt_debugfs_clean(struct intel_gvt *gvt); |
752 | |
753 | int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn); |
754 | int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn); |
755 | int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr); |
756 | int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, |
757 | unsigned long size, dma_addr_t *dma_addr); |
758 | void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, |
759 | dma_addr_t dma_addr); |
760 | |
761 | #include "trace.h" |
762 | |
763 | #endif |
764 | |