1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * |
4 | * Copyright (c) 2009, Microsoft Corporation. |
5 | * |
6 | * Authors: |
7 | * Haiyang Zhang <haiyangz@microsoft.com> |
8 | * Hank Janssen <hjanssen@microsoft.com> |
9 | * K. Y. Srinivasan <kys@microsoft.com> |
10 | */ |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
12 | |
13 | #include <linux/kernel.h> |
14 | #include <linux/mm.h> |
15 | #include <linux/hyperv.h> |
16 | #include <linux/uio.h> |
17 | #include <linux/vmalloc.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/prefetch.h> |
20 | #include <linux/io.h> |
21 | #include <asm/mshyperv.h> |
22 | |
23 | #include "hyperv_vmbus.h" |
24 | |
25 | #define VMBUS_PKT_TRAILER 8 |
26 | |
27 | /* |
28 | * When we write to the ring buffer, check if the host needs to |
29 | * be signaled. Here is the details of this protocol: |
30 | * |
31 | * 1. The host guarantees that while it is draining the |
32 | * ring buffer, it will set the interrupt_mask to |
33 | * indicate it does not need to be interrupted when |
34 | * new data is placed. |
35 | * |
36 | * 2. The host guarantees that it will completely drain |
37 | * the ring buffer before exiting the read loop. Further, |
38 | * once the ring buffer is empty, it will clear the |
39 | * interrupt_mask and re-check to see if new data has |
40 | * arrived. |
41 | * |
42 | * KYS: Oct. 30, 2016: |
43 | * It looks like Windows hosts have logic to deal with DOS attacks that |
44 | * can be triggered if it receives interrupts when it is not expecting |
45 | * the interrupt. The host expects interrupts only when the ring |
46 | * transitions from empty to non-empty (or full to non full on the guest |
47 | * to host ring). |
48 | * So, base the signaling decision solely on the ring state until the |
49 | * host logic is fixed. |
50 | */ |
51 | |
52 | static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel) |
53 | { |
54 | struct hv_ring_buffer_info *rbi = &channel->outbound; |
55 | |
56 | virt_mb(); |
57 | if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) |
58 | return; |
59 | |
60 | /* check interrupt_mask before read_index */ |
61 | virt_rmb(); |
62 | /* |
63 | * This is the only case we need to signal when the |
64 | * ring transitions from being empty to non-empty. |
65 | */ |
66 | if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) { |
67 | ++channel->intr_out_empty; |
68 | vmbus_setevent(channel); |
69 | } |
70 | } |
71 | |
72 | /* Get the next write location for the specified ring buffer. */ |
73 | static inline u32 |
74 | hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) |
75 | { |
76 | u32 next = ring_info->ring_buffer->write_index; |
77 | |
78 | return next; |
79 | } |
80 | |
81 | /* Set the next write location for the specified ring buffer. */ |
82 | static inline void |
83 | hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, |
84 | u32 next_write_location) |
85 | { |
86 | ring_info->ring_buffer->write_index = next_write_location; |
87 | } |
88 | |
89 | /* Get the size of the ring buffer. */ |
90 | static inline u32 |
91 | hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info) |
92 | { |
93 | return ring_info->ring_datasize; |
94 | } |
95 | |
96 | /* Get the read and write indices as u64 of the specified ring buffer. */ |
97 | static inline u64 |
98 | hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) |
99 | { |
100 | return (u64)ring_info->ring_buffer->write_index << 32; |
101 | } |
102 | |
103 | /* |
104 | * Helper routine to copy from source to ring buffer. |
105 | * Assume there is enough room. Handles wrap-around in dest case only!! |
106 | */ |
107 | static u32 hv_copyto_ringbuffer( |
108 | struct hv_ring_buffer_info *ring_info, |
109 | u32 start_write_offset, |
110 | const void *src, |
111 | u32 srclen) |
112 | { |
113 | void *ring_buffer = hv_get_ring_buffer(ring_info); |
114 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); |
115 | |
116 | memcpy(ring_buffer + start_write_offset, src, srclen); |
117 | |
118 | start_write_offset += srclen; |
119 | if (start_write_offset >= ring_buffer_size) |
120 | start_write_offset -= ring_buffer_size; |
121 | |
122 | return start_write_offset; |
123 | } |
124 | |
125 | /* |
126 | * |
127 | * hv_get_ringbuffer_availbytes() |
128 | * |
129 | * Get number of bytes available to read and to write to |
130 | * for the specified ring buffer |
131 | */ |
132 | static void |
133 | hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, |
134 | u32 *read, u32 *write) |
135 | { |
136 | u32 read_loc, write_loc, dsize; |
137 | |
138 | /* Capture the read/write indices before they changed */ |
139 | read_loc = READ_ONCE(rbi->ring_buffer->read_index); |
140 | write_loc = READ_ONCE(rbi->ring_buffer->write_index); |
141 | dsize = rbi->ring_datasize; |
142 | |
143 | *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : |
144 | read_loc - write_loc; |
145 | *read = dsize - *write; |
146 | } |
147 | |
148 | /* Get various debug metrics for the specified ring buffer. */ |
149 | int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, |
150 | struct hv_ring_buffer_debug_info *debug_info) |
151 | { |
152 | u32 bytes_avail_towrite; |
153 | u32 bytes_avail_toread; |
154 | |
155 | mutex_lock(&ring_info->ring_buffer_mutex); |
156 | |
157 | if (!ring_info->ring_buffer) { |
158 | mutex_unlock(lock: &ring_info->ring_buffer_mutex); |
159 | return -EINVAL; |
160 | } |
161 | |
162 | hv_get_ringbuffer_availbytes(rbi: ring_info, |
163 | read: &bytes_avail_toread, |
164 | write: &bytes_avail_towrite); |
165 | debug_info->bytes_avail_toread = bytes_avail_toread; |
166 | debug_info->bytes_avail_towrite = bytes_avail_towrite; |
167 | debug_info->current_read_index = ring_info->ring_buffer->read_index; |
168 | debug_info->current_write_index = ring_info->ring_buffer->write_index; |
169 | debug_info->current_interrupt_mask |
170 | = ring_info->ring_buffer->interrupt_mask; |
171 | mutex_unlock(lock: &ring_info->ring_buffer_mutex); |
172 | |
173 | return 0; |
174 | } |
175 | EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); |
176 | |
177 | /* Initialize a channel's ring buffer info mutex locks */ |
178 | void hv_ringbuffer_pre_init(struct vmbus_channel *channel) |
179 | { |
180 | mutex_init(&channel->inbound.ring_buffer_mutex); |
181 | mutex_init(&channel->outbound.ring_buffer_mutex); |
182 | } |
183 | |
184 | /* Initialize the ring buffer. */ |
185 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, |
186 | struct page *pages, u32 page_cnt, u32 max_pkt_size) |
187 | { |
188 | struct page **pages_wraparound; |
189 | int i; |
190 | |
191 | BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE)); |
192 | |
193 | /* |
194 | * First page holds struct hv_ring_buffer, do wraparound mapping for |
195 | * the rest. |
196 | */ |
197 | pages_wraparound = kcalloc(n: page_cnt * 2 - 1, |
198 | size: sizeof(struct page *), |
199 | GFP_KERNEL); |
200 | if (!pages_wraparound) |
201 | return -ENOMEM; |
202 | |
203 | pages_wraparound[0] = pages; |
204 | for (i = 0; i < 2 * (page_cnt - 1); i++) |
205 | pages_wraparound[i + 1] = |
206 | &pages[i % (page_cnt - 1) + 1]; |
207 | |
208 | ring_info->ring_buffer = (struct hv_ring_buffer *) |
209 | vmap(pages: pages_wraparound, count: page_cnt * 2 - 1, VM_MAP, |
210 | pgprot_decrypted(PAGE_KERNEL)); |
211 | |
212 | kfree(objp: pages_wraparound); |
213 | if (!ring_info->ring_buffer) |
214 | return -ENOMEM; |
215 | |
216 | /* |
217 | * Ensure the header page is zero'ed since |
218 | * encryption status may have changed. |
219 | */ |
220 | memset(ring_info->ring_buffer, 0, HV_HYP_PAGE_SIZE); |
221 | |
222 | ring_info->ring_buffer->read_index = |
223 | ring_info->ring_buffer->write_index = 0; |
224 | |
225 | /* Set the feature bit for enabling flow control. */ |
226 | ring_info->ring_buffer->feature_bits.value = 1; |
227 | |
228 | ring_info->ring_size = page_cnt << PAGE_SHIFT; |
229 | ring_info->ring_size_div10_reciprocal = |
230 | reciprocal_value(d: ring_info->ring_size / 10); |
231 | ring_info->ring_datasize = ring_info->ring_size - |
232 | sizeof(struct hv_ring_buffer); |
233 | ring_info->priv_read_index = 0; |
234 | |
235 | /* Initialize buffer that holds copies of incoming packets */ |
236 | if (max_pkt_size) { |
237 | ring_info->pkt_buffer = kzalloc(size: max_pkt_size, GFP_KERNEL); |
238 | if (!ring_info->pkt_buffer) |
239 | return -ENOMEM; |
240 | ring_info->pkt_buffer_size = max_pkt_size; |
241 | } |
242 | |
243 | spin_lock_init(&ring_info->ring_lock); |
244 | |
245 | return 0; |
246 | } |
247 | |
248 | /* Cleanup the ring buffer. */ |
249 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) |
250 | { |
251 | mutex_lock(&ring_info->ring_buffer_mutex); |
252 | vunmap(addr: ring_info->ring_buffer); |
253 | ring_info->ring_buffer = NULL; |
254 | mutex_unlock(lock: &ring_info->ring_buffer_mutex); |
255 | |
256 | kfree(objp: ring_info->pkt_buffer); |
257 | ring_info->pkt_buffer = NULL; |
258 | ring_info->pkt_buffer_size = 0; |
259 | } |
260 | |
261 | /* |
262 | * Check if the ring buffer spinlock is available to take or not; used on |
263 | * atomic contexts, like panic path (see the Hyper-V framebuffer driver). |
264 | */ |
265 | |
266 | bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel) |
267 | { |
268 | struct hv_ring_buffer_info *rinfo = &channel->outbound; |
269 | |
270 | return spin_is_locked(lock: &rinfo->ring_lock); |
271 | } |
272 | EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy); |
273 | |
274 | /* Write to the ring buffer. */ |
275 | int hv_ringbuffer_write(struct vmbus_channel *channel, |
276 | const struct kvec *kv_list, u32 kv_count, |
277 | u64 requestid, u64 *trans_id) |
278 | { |
279 | int i; |
280 | u32 bytes_avail_towrite; |
281 | u32 totalbytes_towrite = sizeof(u64); |
282 | u32 next_write_location; |
283 | u32 old_write; |
284 | u64 prev_indices; |
285 | unsigned long flags; |
286 | struct hv_ring_buffer_info *outring_info = &channel->outbound; |
287 | struct vmpacket_descriptor *desc = kv_list[0].iov_base; |
288 | u64 __trans_id, rqst_id = VMBUS_NO_RQSTOR; |
289 | |
290 | if (channel->rescind) |
291 | return -ENODEV; |
292 | |
293 | for (i = 0; i < kv_count; i++) |
294 | totalbytes_towrite += kv_list[i].iov_len; |
295 | |
296 | spin_lock_irqsave(&outring_info->ring_lock, flags); |
297 | |
298 | bytes_avail_towrite = hv_get_bytes_to_write(rbi: outring_info); |
299 | |
300 | /* |
301 | * If there is only room for the packet, assume it is full. |
302 | * Otherwise, the next time around, we think the ring buffer |
303 | * is empty since the read index == write index. |
304 | */ |
305 | if (bytes_avail_towrite <= totalbytes_towrite) { |
306 | ++channel->out_full_total; |
307 | |
308 | if (!channel->out_full_flag) { |
309 | ++channel->out_full_first; |
310 | channel->out_full_flag = true; |
311 | } |
312 | |
313 | spin_unlock_irqrestore(lock: &outring_info->ring_lock, flags); |
314 | return -EAGAIN; |
315 | } |
316 | |
317 | channel->out_full_flag = false; |
318 | |
319 | /* Write to the ring buffer */ |
320 | next_write_location = hv_get_next_write_location(ring_info: outring_info); |
321 | |
322 | old_write = next_write_location; |
323 | |
324 | for (i = 0; i < kv_count; i++) { |
325 | next_write_location = hv_copyto_ringbuffer(ring_info: outring_info, |
326 | start_write_offset: next_write_location, |
327 | src: kv_list[i].iov_base, |
328 | srclen: kv_list[i].iov_len); |
329 | } |
330 | |
331 | /* |
332 | * Allocate the request ID after the data has been copied into the |
333 | * ring buffer. Once this request ID is allocated, the completion |
334 | * path could find the data and free it. |
335 | */ |
336 | |
337 | if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) { |
338 | if (channel->next_request_id_callback != NULL) { |
339 | rqst_id = channel->next_request_id_callback(channel, requestid); |
340 | if (rqst_id == VMBUS_RQST_ERROR) { |
341 | spin_unlock_irqrestore(lock: &outring_info->ring_lock, flags); |
342 | return -EAGAIN; |
343 | } |
344 | } |
345 | } |
346 | desc = hv_get_ring_buffer(ring_info: outring_info) + old_write; |
347 | __trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id; |
348 | /* |
349 | * Ensure the compiler doesn't generate code that reads the value of |
350 | * the transaction ID from the ring buffer, which is shared with the |
351 | * Hyper-V host and subject to being changed at any time. |
352 | */ |
353 | WRITE_ONCE(desc->trans_id, __trans_id); |
354 | if (trans_id) |
355 | *trans_id = __trans_id; |
356 | |
357 | /* Set previous packet start */ |
358 | prev_indices = hv_get_ring_bufferindices(ring_info: outring_info); |
359 | |
360 | next_write_location = hv_copyto_ringbuffer(ring_info: outring_info, |
361 | start_write_offset: next_write_location, |
362 | src: &prev_indices, |
363 | srclen: sizeof(u64)); |
364 | |
365 | /* Issue a full memory barrier before updating the write index */ |
366 | virt_mb(); |
367 | |
368 | /* Now, update the write location */ |
369 | hv_set_next_write_location(ring_info: outring_info, next_write_location); |
370 | |
371 | |
372 | spin_unlock_irqrestore(lock: &outring_info->ring_lock, flags); |
373 | |
374 | hv_signal_on_write(old_write, channel); |
375 | |
376 | if (channel->rescind) { |
377 | if (rqst_id != VMBUS_NO_RQSTOR) { |
378 | /* Reclaim request ID to avoid leak of IDs */ |
379 | if (channel->request_addr_callback != NULL) |
380 | channel->request_addr_callback(channel, rqst_id); |
381 | } |
382 | return -ENODEV; |
383 | } |
384 | |
385 | return 0; |
386 | } |
387 | |
388 | int hv_ringbuffer_read(struct vmbus_channel *channel, |
389 | void *buffer, u32 buflen, u32 *buffer_actual_len, |
390 | u64 *requestid, bool raw) |
391 | { |
392 | struct vmpacket_descriptor *desc; |
393 | u32 packetlen, offset; |
394 | |
395 | if (unlikely(buflen == 0)) |
396 | return -EINVAL; |
397 | |
398 | *buffer_actual_len = 0; |
399 | *requestid = 0; |
400 | |
401 | /* Make sure there is something to read */ |
402 | desc = hv_pkt_iter_first(channel); |
403 | if (desc == NULL) { |
404 | /* |
405 | * No error is set when there is even no header, drivers are |
406 | * supposed to analyze buffer_actual_len. |
407 | */ |
408 | return 0; |
409 | } |
410 | |
411 | offset = raw ? 0 : (desc->offset8 << 3); |
412 | packetlen = (desc->len8 << 3) - offset; |
413 | *buffer_actual_len = packetlen; |
414 | *requestid = desc->trans_id; |
415 | |
416 | if (unlikely(packetlen > buflen)) |
417 | return -ENOBUFS; |
418 | |
419 | /* since ring is double mapped, only one copy is necessary */ |
420 | memcpy(buffer, (const char *)desc + offset, packetlen); |
421 | |
422 | /* Advance ring index to next packet descriptor */ |
423 | __hv_pkt_iter_next(channel, pkt: desc); |
424 | |
425 | /* Notify host of update */ |
426 | hv_pkt_iter_close(channel); |
427 | |
428 | return 0; |
429 | } |
430 | |
431 | /* |
432 | * Determine number of bytes available in ring buffer after |
433 | * the current iterator (priv_read_index) location. |
434 | * |
435 | * This is similar to hv_get_bytes_to_read but with private |
436 | * read index instead. |
437 | */ |
438 | static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) |
439 | { |
440 | u32 priv_read_loc = rbi->priv_read_index; |
441 | u32 write_loc; |
442 | |
443 | /* |
444 | * The Hyper-V host writes the packet data, then uses |
445 | * store_release() to update the write_index. Use load_acquire() |
446 | * here to prevent loads of the packet data from being re-ordered |
447 | * before the read of the write_index and potentially getting |
448 | * stale data. |
449 | */ |
450 | write_loc = virt_load_acquire(&rbi->ring_buffer->write_index); |
451 | |
452 | if (write_loc >= priv_read_loc) |
453 | return write_loc - priv_read_loc; |
454 | else |
455 | return (rbi->ring_datasize - priv_read_loc) + write_loc; |
456 | } |
457 | |
458 | /* |
459 | * Get first vmbus packet from ring buffer after read_index |
460 | * |
461 | * If ring buffer is empty, returns NULL and no other action needed. |
462 | */ |
463 | struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel) |
464 | { |
465 | struct hv_ring_buffer_info *rbi = &channel->inbound; |
466 | struct vmpacket_descriptor *desc, *desc_copy; |
467 | u32 bytes_avail, pkt_len, pkt_offset; |
468 | |
469 | hv_debug_delay_test(channel, delay_type: MESSAGE_DELAY); |
470 | |
471 | bytes_avail = hv_pkt_iter_avail(rbi); |
472 | if (bytes_avail < sizeof(struct vmpacket_descriptor)) |
473 | return NULL; |
474 | bytes_avail = min(rbi->pkt_buffer_size, bytes_avail); |
475 | |
476 | desc = (struct vmpacket_descriptor *)(hv_get_ring_buffer(ring_info: rbi) + rbi->priv_read_index); |
477 | |
478 | /* |
479 | * Ensure the compiler does not use references to incoming Hyper-V values (which |
480 | * could change at any moment) when reading local variables later in the code |
481 | */ |
482 | pkt_len = READ_ONCE(desc->len8) << 3; |
483 | pkt_offset = READ_ONCE(desc->offset8) << 3; |
484 | |
485 | /* |
486 | * If pkt_len is invalid, set it to the smaller of hv_pkt_iter_avail() and |
487 | * rbi->pkt_buffer_size |
488 | */ |
489 | if (pkt_len < sizeof(struct vmpacket_descriptor) || pkt_len > bytes_avail) |
490 | pkt_len = bytes_avail; |
491 | |
492 | /* |
493 | * If pkt_offset is invalid, arbitrarily set it to |
494 | * the size of vmpacket_descriptor |
495 | */ |
496 | if (pkt_offset < sizeof(struct vmpacket_descriptor) || pkt_offset > pkt_len) |
497 | pkt_offset = sizeof(struct vmpacket_descriptor); |
498 | |
499 | /* Copy the Hyper-V packet out of the ring buffer */ |
500 | desc_copy = (struct vmpacket_descriptor *)rbi->pkt_buffer; |
501 | memcpy(desc_copy, desc, pkt_len); |
502 | |
503 | /* |
504 | * Hyper-V could still change len8 and offset8 after the earlier read. |
505 | * Ensure that desc_copy has legal values for len8 and offset8 that |
506 | * are consistent with the copy we just made |
507 | */ |
508 | desc_copy->len8 = pkt_len >> 3; |
509 | desc_copy->offset8 = pkt_offset >> 3; |
510 | |
511 | return desc_copy; |
512 | } |
513 | EXPORT_SYMBOL_GPL(hv_pkt_iter_first); |
514 | |
515 | /* |
516 | * Get next vmbus packet from ring buffer. |
517 | * |
518 | * Advances the current location (priv_read_index) and checks for more |
519 | * data. If the end of the ring buffer is reached, then return NULL. |
520 | */ |
521 | struct vmpacket_descriptor * |
522 | __hv_pkt_iter_next(struct vmbus_channel *channel, |
523 | const struct vmpacket_descriptor *desc) |
524 | { |
525 | struct hv_ring_buffer_info *rbi = &channel->inbound; |
526 | u32 packetlen = desc->len8 << 3; |
527 | u32 dsize = rbi->ring_datasize; |
528 | |
529 | hv_debug_delay_test(channel, delay_type: MESSAGE_DELAY); |
530 | /* bump offset to next potential packet */ |
531 | rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER; |
532 | if (rbi->priv_read_index >= dsize) |
533 | rbi->priv_read_index -= dsize; |
534 | |
535 | /* more data? */ |
536 | return hv_pkt_iter_first(channel); |
537 | } |
538 | EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); |
539 | |
540 | /* How many bytes were read in this iterator cycle */ |
541 | static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi, |
542 | u32 start_read_index) |
543 | { |
544 | if (rbi->priv_read_index >= start_read_index) |
545 | return rbi->priv_read_index - start_read_index; |
546 | else |
547 | return rbi->ring_datasize - start_read_index + |
548 | rbi->priv_read_index; |
549 | } |
550 | |
551 | /* |
552 | * Update host ring buffer after iterating over packets. If the host has |
553 | * stopped queuing new entries because it found the ring buffer full, and |
554 | * sufficient space is being freed up, signal the host. But be careful to |
555 | * only signal the host when necessary, both for performance reasons and |
556 | * because Hyper-V protects itself by throttling guests that signal |
557 | * inappropriately. |
558 | * |
559 | * Determining when to signal is tricky. There are three key data inputs |
560 | * that must be handled in this order to avoid race conditions: |
561 | * |
562 | * 1. Update the read_index |
563 | * 2. Read the pending_send_sz |
564 | * 3. Read the current write_index |
565 | * |
566 | * The interrupt_mask is not used to determine when to signal. The |
567 | * interrupt_mask is used only on the guest->host ring buffer when |
568 | * sending requests to the host. The host does not use it on the host-> |
569 | * guest ring buffer to indicate whether it should be signaled. |
570 | */ |
571 | void hv_pkt_iter_close(struct vmbus_channel *channel) |
572 | { |
573 | struct hv_ring_buffer_info *rbi = &channel->inbound; |
574 | u32 curr_write_sz, pending_sz, bytes_read, start_read_index; |
575 | |
576 | /* |
577 | * Make sure all reads are done before we update the read index since |
578 | * the writer may start writing to the read area once the read index |
579 | * is updated. |
580 | */ |
581 | virt_rmb(); |
582 | start_read_index = rbi->ring_buffer->read_index; |
583 | rbi->ring_buffer->read_index = rbi->priv_read_index; |
584 | |
585 | /* |
586 | * Older versions of Hyper-V (before WS2102 and Win8) do not |
587 | * implement pending_send_sz and simply poll if the host->guest |
588 | * ring buffer is full. No signaling is needed or expected. |
589 | */ |
590 | if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz) |
591 | return; |
592 | |
593 | /* |
594 | * Issue a full memory barrier before making the signaling decision. |
595 | * If reading pending_send_sz were to be reordered and happen |
596 | * before we commit the new read_index, a race could occur. If the |
597 | * host were to set the pending_send_sz after we have sampled |
598 | * pending_send_sz, and the ring buffer blocks before we commit the |
599 | * read index, we could miss sending the interrupt. Issue a full |
600 | * memory barrier to address this. |
601 | */ |
602 | virt_mb(); |
603 | |
604 | /* |
605 | * If the pending_send_sz is zero, then the ring buffer is not |
606 | * blocked and there is no need to signal. This is far by the |
607 | * most common case, so exit quickly for best performance. |
608 | */ |
609 | pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); |
610 | if (!pending_sz) |
611 | return; |
612 | |
613 | /* |
614 | * Ensure the read of write_index in hv_get_bytes_to_write() |
615 | * happens after the read of pending_send_sz. |
616 | */ |
617 | virt_rmb(); |
618 | curr_write_sz = hv_get_bytes_to_write(rbi); |
619 | bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index); |
620 | |
621 | /* |
622 | * We want to signal the host only if we're transitioning |
623 | * from a "not enough free space" state to a "enough free |
624 | * space" state. For example, it's possible that this function |
625 | * could run and free up enough space to signal the host, and then |
626 | * run again and free up additional space before the host has a |
627 | * chance to clear the pending_send_sz. The 2nd invocation would |
628 | * be a null transition from "enough free space" to "enough free |
629 | * space", which doesn't warrant a signal. |
630 | * |
631 | * Exactly filling the ring buffer is treated as "not enough |
632 | * space". The ring buffer always must have at least one byte |
633 | * empty so the empty and full conditions are distinguishable. |
634 | * hv_get_bytes_to_write() doesn't fully tell the truth in |
635 | * this regard. |
636 | * |
637 | * So first check if we were in the "enough free space" state |
638 | * before we began the iteration. If so, the host was not |
639 | * blocked, and there's no need to signal. |
640 | */ |
641 | if (curr_write_sz - bytes_read > pending_sz) |
642 | return; |
643 | |
644 | /* |
645 | * Similarly, if the new state is "not enough space", then |
646 | * there's no need to signal. |
647 | */ |
648 | if (curr_write_sz <= pending_sz) |
649 | return; |
650 | |
651 | ++channel->intr_in_full; |
652 | vmbus_setevent(channel); |
653 | } |
654 | EXPORT_SYMBOL_GPL(hv_pkt_iter_close); |
655 | |