1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __AMDGPU_CTX_H__
24#define __AMDGPU_CTX_H__
25
26#include "amdgpu_ring.h"
27
28struct drm_device;
29struct drm_file;
30struct amdgpu_fpriv;
31
32struct amdgpu_ctx_entity {
33 uint64_t sequence;
34 struct dma_fence **fences;
35 struct drm_sched_entity entity;
36};
37
38struct amdgpu_ctx {
39 struct kref refcount;
40 struct amdgpu_device *adev;
41 unsigned reset_counter;
42 unsigned reset_counter_query;
43 uint32_t vram_lost_counter;
44 spinlock_t ring_lock;
45 struct dma_fence **fences;
46 struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM];
47 bool preamble_presented;
48 enum drm_sched_priority init_priority;
49 enum drm_sched_priority override_priority;
50 struct mutex lock;
51 atomic_t guilty;
52};
53
54struct amdgpu_ctx_mgr {
55 struct amdgpu_device *adev;
56 struct mutex lock;
57 /* protected by lock */
58 struct idr ctx_handles;
59};
60
61extern const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM];
62
63struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
64int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
65
66int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
67 u32 ring, struct drm_sched_entity **entity);
68void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
69 struct drm_sched_entity *entity,
70 struct dma_fence *fence, uint64_t *seq);
71struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
72 struct drm_sched_entity *entity,
73 uint64_t seq);
74void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
75 enum drm_sched_priority priority);
76
77int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
78 struct drm_file *filp);
79
80int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
81 struct drm_sched_entity *entity);
82
83void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
84void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
85void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
86void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
87
88#endif
89