1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian K├Ânig
23 */
24
25#include <drm/drmP.h>
26#include "amdgpu.h"
27
28struct amdgpu_vram_mgr {
29 struct drm_mm mm;
30 spinlock_t lock;
31 atomic64_t usage;
32 atomic64_t vis_usage;
33};
34
35/**
36 * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
37 *
38 * @man: TTM memory type manager
39 * @p_size: maximum size of VRAM
40 *
41 * Allocate and initialize the VRAM manager.
42 */
43static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
44 unsigned long p_size)
45{
46 struct amdgpu_vram_mgr *mgr;
47
48 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
49 if (!mgr)
50 return -ENOMEM;
51
52 drm_mm_init(&mgr->mm, 0, p_size);
53 spin_lock_init(&mgr->lock);
54 man->priv = mgr;
55 return 0;
56}
57
58/**
59 * amdgpu_vram_mgr_fini - free and destroy VRAM manager
60 *
61 * @man: TTM memory type manager
62 *
63 * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
64 * allocated inside it.
65 */
66static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
67{
68 struct amdgpu_vram_mgr *mgr = man->priv;
69
70 spin_lock(&mgr->lock);
71 drm_mm_takedown(&mgr->mm);
72 spin_unlock(&mgr->lock);
73 kfree(mgr);
74 man->priv = NULL;
75 return 0;
76}
77
78/**
79 * amdgpu_vram_mgr_vis_size - Calculate visible node size
80 *
81 * @adev: amdgpu device structure
82 * @node: MM node structure
83 *
84 * Calculate how many bytes of the MM node are inside visible VRAM
85 */
86static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
87 struct drm_mm_node *node)
88{
89 uint64_t start = node->start << PAGE_SHIFT;
90 uint64_t end = (node->size + node->start) << PAGE_SHIFT;
91
92 if (start >= adev->gmc.visible_vram_size)
93 return 0;
94
95 return (end > adev->gmc.visible_vram_size ?
96 adev->gmc.visible_vram_size : end) - start;
97}
98
99/**
100 * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
101 *
102 * @bo: &amdgpu_bo buffer object (must be in VRAM)
103 *
104 * Returns:
105 * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
106 */
107u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
108{
109 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
110 struct ttm_mem_reg *mem = &bo->tbo.mem;
111 struct drm_mm_node *nodes = mem->mm_node;
112 unsigned pages = mem->num_pages;
113 u64 usage;
114
115 if (amdgpu_gmc_vram_full_visible(&adev->gmc))
116 return amdgpu_bo_size(bo);
117
118 if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
119 return 0;
120
121 for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
122 usage += amdgpu_vram_mgr_vis_size(adev, nodes);
123
124 return usage;
125}
126
127/**
128 * amdgpu_vram_mgr_virt_start - update virtual start address
129 *
130 * @mem: ttm_mem_reg to update
131 * @node: just allocated node
132 *
133 * Calculate a virtual BO start address to easily check if everything is CPU
134 * accessible.
135 */
136static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem,
137 struct drm_mm_node *node)
138{
139 unsigned long start;
140
141 start = node->start + node->size;
142 if (start > mem->num_pages)
143 start -= mem->num_pages;
144 else
145 start = 0;
146 mem->start = max(mem->start, start);
147}
148
149/**
150 * amdgpu_vram_mgr_new - allocate new ranges
151 *
152 * @man: TTM memory type manager
153 * @tbo: TTM BO we need this range for
154 * @place: placement flags and restrictions
155 * @mem: the resulting mem object
156 *
157 * Allocate VRAM for the given BO.
158 */
159static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
160 struct ttm_buffer_object *tbo,
161 const struct ttm_place *place,
162 struct ttm_mem_reg *mem)
163{
164 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
165 struct amdgpu_vram_mgr *mgr = man->priv;
166 struct drm_mm *mm = &mgr->mm;
167 struct drm_mm_node *nodes;
168 enum drm_mm_insert_mode mode;
169 unsigned long lpfn, num_nodes, pages_per_node, pages_left;
170 uint64_t usage = 0, vis_usage = 0;
171 unsigned i;
172 int r;
173
174 lpfn = place->lpfn;
175 if (!lpfn)
176 lpfn = man->size;
177
178 if (place->flags & TTM_PL_FLAG_CONTIGUOUS ||
179 amdgpu_vram_page_split == -1) {
180 pages_per_node = ~0ul;
181 num_nodes = 1;
182 } else {
183 pages_per_node = max((uint32_t)amdgpu_vram_page_split,
184 mem->page_alignment);
185 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
186 }
187
188 nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
189 GFP_KERNEL | __GFP_ZERO);
190 if (!nodes)
191 return -ENOMEM;
192
193 mode = DRM_MM_INSERT_BEST;
194 if (place->flags & TTM_PL_FLAG_TOPDOWN)
195 mode = DRM_MM_INSERT_HIGH;
196
197 mem->start = 0;
198 pages_left = mem->num_pages;
199
200 spin_lock(&mgr->lock);
201 for (i = 0; pages_left >= pages_per_node; ++i) {
202 unsigned long pages = rounddown_pow_of_two(pages_left);
203
204 r = drm_mm_insert_node_in_range(mm, &nodes[i], pages,
205 pages_per_node, 0,
206 place->fpfn, lpfn,
207 mode);
208 if (unlikely(r))
209 break;
210
211 usage += nodes[i].size << PAGE_SHIFT;
212 vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
213 amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
214 pages_left -= pages;
215 }
216
217 for (; pages_left; ++i) {
218 unsigned long pages = min(pages_left, pages_per_node);
219 uint32_t alignment = mem->page_alignment;
220
221 if (pages == pages_per_node)
222 alignment = pages_per_node;
223
224 r = drm_mm_insert_node_in_range(mm, &nodes[i],
225 pages, alignment, 0,
226 place->fpfn, lpfn,
227 mode);
228 if (unlikely(r))
229 goto error;
230
231 usage += nodes[i].size << PAGE_SHIFT;
232 vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
233 amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
234 pages_left -= pages;
235 }
236 spin_unlock(&mgr->lock);
237
238 atomic64_add(usage, &mgr->usage);
239 atomic64_add(vis_usage, &mgr->vis_usage);
240
241 mem->mm_node = nodes;
242
243 return 0;
244
245error:
246 while (i--)
247 drm_mm_remove_node(&nodes[i]);
248 spin_unlock(&mgr->lock);
249
250 kvfree(nodes);
251 return r == -ENOSPC ? 0 : r;
252}
253
254/**
255 * amdgpu_vram_mgr_del - free ranges
256 *
257 * @man: TTM memory type manager
258 * @tbo: TTM BO we need this range for
259 * @place: placement flags and restrictions
260 * @mem: TTM memory object
261 *
262 * Free the allocated VRAM again.
263 */
264static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
265 struct ttm_mem_reg *mem)
266{
267 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
268 struct amdgpu_vram_mgr *mgr = man->priv;
269 struct drm_mm_node *nodes = mem->mm_node;
270 uint64_t usage = 0, vis_usage = 0;
271 unsigned pages = mem->num_pages;
272
273 if (!mem->mm_node)
274 return;
275
276 spin_lock(&mgr->lock);
277 while (pages) {
278 pages -= nodes->size;
279 drm_mm_remove_node(nodes);
280 usage += nodes->size << PAGE_SHIFT;
281 vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
282 ++nodes;
283 }
284 spin_unlock(&mgr->lock);
285
286 atomic64_sub(usage, &mgr->usage);
287 atomic64_sub(vis_usage, &mgr->vis_usage);
288
289 kvfree(mem->mm_node);
290 mem->mm_node = NULL;
291}
292
293/**
294 * amdgpu_vram_mgr_usage - how many bytes are used in this domain
295 *
296 * @man: TTM memory type manager
297 *
298 * Returns how many bytes are used in this domain.
299 */
300uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man)
301{
302 struct amdgpu_vram_mgr *mgr = man->priv;
303
304 return atomic64_read(&mgr->usage);
305}
306
307/**
308 * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
309 *
310 * @man: TTM memory type manager
311 *
312 * Returns how many bytes are used in the visible part of VRAM
313 */
314uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man)
315{
316 struct amdgpu_vram_mgr *mgr = man->priv;
317
318 return atomic64_read(&mgr->vis_usage);
319}
320
321/**
322 * amdgpu_vram_mgr_debug - dump VRAM table
323 *
324 * @man: TTM memory type manager
325 * @printer: DRM printer to use
326 *
327 * Dump the table content using printk.
328 */
329static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
330 struct drm_printer *printer)
331{
332 struct amdgpu_vram_mgr *mgr = man->priv;
333
334 spin_lock(&mgr->lock);
335 drm_mm_print(&mgr->mm, printer);
336 spin_unlock(&mgr->lock);
337
338 drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
339 man->size, amdgpu_vram_mgr_usage(man) >> 20,
340 amdgpu_vram_mgr_vis_usage(man) >> 20);
341}
342
343const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
344 .init = amdgpu_vram_mgr_init,
345 .takedown = amdgpu_vram_mgr_fini,
346 .get_node = amdgpu_vram_mgr_new,
347 .put_node = amdgpu_vram_mgr_del,
348 .debug = amdgpu_vram_mgr_debug
349};
350