1 | /* SPDX-License-Identifier: MIT */ |
2 | /* |
3 | * Copyright © 2016 Intel Corporation |
4 | */ |
5 | |
6 | #ifndef __I915_TIMELINE_TYPES_H__ |
7 | #define __I915_TIMELINE_TYPES_H__ |
8 | |
9 | #include <linux/list.h> |
10 | #include <linux/kref.h> |
11 | #include <linux/mutex.h> |
12 | #include <linux/rcupdate.h> |
13 | #include <linux/types.h> |
14 | |
15 | #include "i915_active_types.h" |
16 | |
17 | struct i915_vma; |
18 | struct i915_syncmap; |
19 | struct intel_gt; |
20 | |
21 | struct intel_timeline { |
22 | u64 fence_context; |
23 | u32 seqno; |
24 | |
25 | struct mutex mutex; /* protects the flow of requests */ |
26 | |
27 | /* |
28 | * pin_count and active_count track essentially the same thing: |
29 | * How many requests are in flight or may be under construction. |
30 | * |
31 | * We need two distinct counters so that we can assign different |
32 | * lifetimes to the events for different use-cases. For example, |
33 | * we want to permanently keep the timeline pinned for the kernel |
34 | * context so that we can issue requests at any time without having |
35 | * to acquire space in the GGTT. However, we want to keep tracking |
36 | * the activity (to be able to detect when we become idle) along that |
37 | * permanently pinned timeline and so end up requiring two counters. |
38 | * |
39 | * Note that the active_count is protected by the intel_timeline.mutex, |
40 | * but the pin_count is protected by a combination of serialisation |
41 | * from the intel_context caller plus internal atomicity. |
42 | */ |
43 | atomic_t pin_count; |
44 | atomic_t active_count; |
45 | |
46 | void *hwsp_map; |
47 | const u32 *hwsp_seqno; |
48 | struct i915_vma *hwsp_ggtt; |
49 | u32 hwsp_offset; |
50 | |
51 | bool has_initial_breadcrumb; |
52 | |
53 | /** |
54 | * List of breadcrumbs associated with GPU requests currently |
55 | * outstanding. |
56 | */ |
57 | struct list_head requests; |
58 | |
59 | /* |
60 | * Contains an RCU guarded pointer to the last request. No reference is |
61 | * held to the request, users must carefully acquire a reference to |
62 | * the request using i915_active_fence_get(), or manage the RCU |
63 | * protection themselves (cf the i915_active_fence API). |
64 | */ |
65 | struct i915_active_fence last_request; |
66 | |
67 | struct i915_active active; |
68 | |
69 | /** A chain of completed timelines ready for early retirement. */ |
70 | struct intel_timeline *retire; |
71 | |
72 | /** |
73 | * We track the most recent seqno that we wait on in every context so |
74 | * that we only have to emit a new await and dependency on a more |
75 | * recent sync point. As the contexts may be executed out-of-order, we |
76 | * have to track each individually and can not rely on an absolute |
77 | * global_seqno. When we know that all tracked fences are completed |
78 | * (i.e. when the driver is idle), we know that the syncmap is |
79 | * redundant and we can discard it without loss of generality. |
80 | */ |
81 | struct i915_syncmap *sync; |
82 | |
83 | struct list_head link; |
84 | struct intel_gt *gt; |
85 | |
86 | struct list_head engine_link; |
87 | |
88 | struct kref kref; |
89 | struct rcu_head rcu; |
90 | }; |
91 | |
92 | #endif /* __I915_TIMELINE_TYPES_H__ */ |
93 | |