1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | /* |
3 | * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. |
4 | * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. |
5 | */ |
6 | |
7 | #include <linux/vmalloc.h> |
8 | #include "rxe.h" |
9 | #include "rxe_loc.h" |
10 | #include "rxe_queue.h" |
11 | |
12 | int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, |
13 | struct ib_udata *udata, struct rxe_queue_buf *buf, |
14 | size_t buf_size, struct rxe_mmap_info **ip_p) |
15 | { |
16 | int err; |
17 | struct rxe_mmap_info *ip = NULL; |
18 | |
19 | if (outbuf) { |
20 | ip = rxe_create_mmap_info(dev: rxe, size: buf_size, udata, obj: buf); |
21 | if (IS_ERR(ptr: ip)) { |
22 | err = PTR_ERR(ptr: ip); |
23 | goto err1; |
24 | } |
25 | |
26 | if (copy_to_user(to: outbuf, from: &ip->info, n: sizeof(ip->info))) { |
27 | err = -EFAULT; |
28 | goto err2; |
29 | } |
30 | |
31 | spin_lock_bh(lock: &rxe->pending_lock); |
32 | list_add(new: &ip->pending_mmaps, head: &rxe->pending_mmaps); |
33 | spin_unlock_bh(lock: &rxe->pending_lock); |
34 | } |
35 | |
36 | *ip_p = ip; |
37 | |
38 | return 0; |
39 | |
40 | err2: |
41 | kfree(objp: ip); |
42 | err1: |
43 | return err; |
44 | } |
45 | |
46 | inline void rxe_queue_reset(struct rxe_queue *q) |
47 | { |
48 | /* queue is comprised from header and the memory |
49 | * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h |
50 | * reset only the queue itself and not the management header |
51 | */ |
52 | memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf)); |
53 | } |
54 | |
55 | struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem, |
56 | unsigned int elem_size, enum queue_type type) |
57 | { |
58 | struct rxe_queue *q; |
59 | size_t buf_size; |
60 | unsigned int num_slots; |
61 | |
62 | /* num_elem == 0 is allowed, but uninteresting */ |
63 | if (*num_elem < 0) |
64 | return NULL; |
65 | |
66 | q = kzalloc(size: sizeof(*q), GFP_KERNEL); |
67 | if (!q) |
68 | return NULL; |
69 | |
70 | q->rxe = rxe; |
71 | q->type = type; |
72 | |
73 | /* used in resize, only need to copy used part of queue */ |
74 | q->elem_size = elem_size; |
75 | |
76 | /* pad element up to at least a cacheline and always a power of 2 */ |
77 | if (elem_size < cache_line_size()) |
78 | elem_size = cache_line_size(); |
79 | elem_size = roundup_pow_of_two(elem_size); |
80 | |
81 | q->log2_elem_size = order_base_2(elem_size); |
82 | |
83 | num_slots = *num_elem + 1; |
84 | num_slots = roundup_pow_of_two(num_slots); |
85 | q->index_mask = num_slots - 1; |
86 | |
87 | buf_size = sizeof(struct rxe_queue_buf) + num_slots * elem_size; |
88 | |
89 | q->buf = vmalloc_user(size: buf_size); |
90 | if (!q->buf) |
91 | goto err2; |
92 | |
93 | q->buf->log2_elem_size = q->log2_elem_size; |
94 | q->buf->index_mask = q->index_mask; |
95 | |
96 | q->buf_size = buf_size; |
97 | |
98 | *num_elem = num_slots - 1; |
99 | return q; |
100 | |
101 | err2: |
102 | kfree(objp: q); |
103 | return NULL; |
104 | } |
105 | |
106 | /* copies elements from original q to new q and then swaps the contents of the |
107 | * two q headers. This is so that if anyone is holding a pointer to q it will |
108 | * still work |
109 | */ |
110 | static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q, |
111 | unsigned int num_elem) |
112 | { |
113 | enum queue_type type = q->type; |
114 | u32 new_prod; |
115 | u32 prod; |
116 | u32 cons; |
117 | |
118 | if (!queue_empty(q, type: q->type) && (num_elem < queue_count(q, type))) |
119 | return -EINVAL; |
120 | |
121 | new_prod = queue_get_producer(q: new_q, type); |
122 | prod = queue_get_producer(q, type); |
123 | cons = queue_get_consumer(q, type); |
124 | |
125 | while ((prod - cons) & q->index_mask) { |
126 | memcpy(queue_addr_from_index(new_q, new_prod), |
127 | queue_addr_from_index(q, cons), new_q->elem_size); |
128 | new_prod = queue_next_index(q: new_q, index: new_prod); |
129 | cons = queue_next_index(q, index: cons); |
130 | } |
131 | |
132 | new_q->buf->producer_index = new_prod; |
133 | q->buf->consumer_index = cons; |
134 | |
135 | /* update private index copies */ |
136 | if (type == QUEUE_TYPE_TO_CLIENT) |
137 | new_q->index = new_q->buf->producer_index; |
138 | else |
139 | q->index = q->buf->consumer_index; |
140 | |
141 | /* exchange rxe_queue headers */ |
142 | swap(*q, *new_q); |
143 | |
144 | return 0; |
145 | } |
146 | |
147 | int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, |
148 | unsigned int elem_size, struct ib_udata *udata, |
149 | struct mminfo __user *outbuf, spinlock_t *producer_lock, |
150 | spinlock_t *consumer_lock) |
151 | { |
152 | struct rxe_queue *new_q; |
153 | unsigned int num_elem = *num_elem_p; |
154 | int err; |
155 | unsigned long producer_flags; |
156 | unsigned long consumer_flags; |
157 | |
158 | new_q = rxe_queue_init(rxe: q->rxe, num_elem: &num_elem, elem_size, type: q->type); |
159 | if (!new_q) |
160 | return -ENOMEM; |
161 | |
162 | err = do_mmap_info(rxe: new_q->rxe, outbuf, udata, buf: new_q->buf, |
163 | buf_size: new_q->buf_size, ip_p: &new_q->ip); |
164 | if (err) { |
165 | vfree(addr: new_q->buf); |
166 | kfree(objp: new_q); |
167 | goto err1; |
168 | } |
169 | |
170 | spin_lock_irqsave(consumer_lock, consumer_flags); |
171 | |
172 | if (producer_lock) { |
173 | spin_lock_irqsave(producer_lock, producer_flags); |
174 | err = resize_finish(q, new_q, num_elem); |
175 | spin_unlock_irqrestore(lock: producer_lock, flags: producer_flags); |
176 | } else { |
177 | err = resize_finish(q, new_q, num_elem); |
178 | } |
179 | |
180 | spin_unlock_irqrestore(lock: consumer_lock, flags: consumer_flags); |
181 | |
182 | rxe_queue_cleanup(queue: new_q); /* new/old dep on err */ |
183 | if (err) |
184 | goto err1; |
185 | |
186 | *num_elem_p = num_elem; |
187 | return 0; |
188 | |
189 | err1: |
190 | return err; |
191 | } |
192 | |
193 | void rxe_queue_cleanup(struct rxe_queue *q) |
194 | { |
195 | if (q->ip) |
196 | kref_put(kref: &q->ip->ref, release: rxe_mmap_release); |
197 | else |
198 | vfree(addr: q->buf); |
199 | |
200 | kfree(objp: q); |
201 | } |
202 | |