1 | /* |
2 | * Copyright 2011 Red Hat Inc. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
24 | * |
25 | */ |
26 | /* |
27 | * Authors: |
28 | * Jerome Glisse <glisse@freedesktop.org> |
29 | */ |
30 | /* Algorithm: |
31 | * |
32 | * We store the last allocated bo in "hole", we always try to allocate |
33 | * after the last allocated bo. Principle is that in a linear GPU ring |
34 | * progression was is after last is the oldest bo we allocated and thus |
35 | * the first one that should no longer be in use by the GPU. |
36 | * |
37 | * If it's not the case we skip over the bo after last to the closest |
38 | * done bo if such one exist. If none exist and we are not asked to |
39 | * block we report failure to allocate. |
40 | * |
41 | * If we are asked to block we wait on all the oldest fence of all |
42 | * rings. We just wait for any of those fence to complete. |
43 | */ |
44 | |
45 | #include "radeon.h" |
46 | |
47 | int radeon_sa_bo_manager_init(struct radeon_device *rdev, |
48 | struct radeon_sa_manager *sa_manager, |
49 | unsigned int size, u32 sa_align, u32 domain, |
50 | u32 flags) |
51 | { |
52 | int r; |
53 | |
54 | r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, kernel: true, |
55 | domain, flags, NULL, NULL, bo_ptr: &sa_manager->bo); |
56 | if (r) { |
57 | dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n" , r); |
58 | return r; |
59 | } |
60 | |
61 | sa_manager->domain = domain; |
62 | |
63 | drm_suballoc_manager_init(sa_manager: &sa_manager->base, size, align: sa_align); |
64 | |
65 | return r; |
66 | } |
67 | |
68 | void radeon_sa_bo_manager_fini(struct radeon_device *rdev, |
69 | struct radeon_sa_manager *sa_manager) |
70 | { |
71 | drm_suballoc_manager_fini(sa_manager: &sa_manager->base); |
72 | radeon_bo_unref(bo: &sa_manager->bo); |
73 | } |
74 | |
75 | int radeon_sa_bo_manager_start(struct radeon_device *rdev, |
76 | struct radeon_sa_manager *sa_manager) |
77 | { |
78 | int r; |
79 | |
80 | if (sa_manager->bo == NULL) { |
81 | dev_err(rdev->dev, "no bo for sa manager\n" ); |
82 | return -EINVAL; |
83 | } |
84 | |
85 | /* map the buffer */ |
86 | r = radeon_bo_reserve(bo: sa_manager->bo, no_intr: false); |
87 | if (r) { |
88 | dev_err(rdev->dev, "(%d) failed to reserve manager bo\n" , r); |
89 | return r; |
90 | } |
91 | r = radeon_bo_pin(bo: sa_manager->bo, domain: sa_manager->domain, gpu_addr: &sa_manager->gpu_addr); |
92 | if (r) { |
93 | radeon_bo_unreserve(bo: sa_manager->bo); |
94 | dev_err(rdev->dev, "(%d) failed to pin manager bo\n" , r); |
95 | return r; |
96 | } |
97 | r = radeon_bo_kmap(bo: sa_manager->bo, ptr: &sa_manager->cpu_ptr); |
98 | radeon_bo_unreserve(bo: sa_manager->bo); |
99 | return r; |
100 | } |
101 | |
102 | int radeon_sa_bo_manager_suspend(struct radeon_device *rdev, |
103 | struct radeon_sa_manager *sa_manager) |
104 | { |
105 | int r; |
106 | |
107 | if (sa_manager->bo == NULL) { |
108 | dev_err(rdev->dev, "no bo for sa manager\n" ); |
109 | return -EINVAL; |
110 | } |
111 | |
112 | r = radeon_bo_reserve(bo: sa_manager->bo, no_intr: false); |
113 | if (!r) { |
114 | radeon_bo_kunmap(bo: sa_manager->bo); |
115 | radeon_bo_unpin(bo: sa_manager->bo); |
116 | radeon_bo_unreserve(bo: sa_manager->bo); |
117 | } |
118 | return r; |
119 | } |
120 | |
121 | int radeon_sa_bo_new(struct radeon_sa_manager *sa_manager, |
122 | struct drm_suballoc **sa_bo, |
123 | unsigned int size, unsigned int align) |
124 | { |
125 | struct drm_suballoc *sa = drm_suballoc_new(sa_manager: &sa_manager->base, size, |
126 | GFP_KERNEL, intr: false, align); |
127 | |
128 | if (IS_ERR(ptr: sa)) { |
129 | *sa_bo = NULL; |
130 | return PTR_ERR(ptr: sa); |
131 | } |
132 | |
133 | *sa_bo = sa; |
134 | return 0; |
135 | } |
136 | |
137 | void radeon_sa_bo_free(struct drm_suballoc **sa_bo, |
138 | struct radeon_fence *fence) |
139 | { |
140 | if (sa_bo == NULL || *sa_bo == NULL) { |
141 | return; |
142 | } |
143 | |
144 | if (fence) |
145 | drm_suballoc_free(sa: *sa_bo, fence: &fence->base); |
146 | else |
147 | drm_suballoc_free(sa: *sa_bo, NULL); |
148 | |
149 | *sa_bo = NULL; |
150 | } |
151 | |
152 | #if defined(CONFIG_DEBUG_FS) |
153 | void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager, |
154 | struct seq_file *m) |
155 | { |
156 | struct drm_printer p = drm_seq_file_printer(f: m); |
157 | |
158 | drm_suballoc_dump_debug_info(sa_manager: &sa_manager->base, p: &p, suballoc_base: sa_manager->gpu_addr); |
159 | } |
160 | #endif |
161 | |