1 | /* |
2 | * linux/drivers/video/fb_defio.c |
3 | * |
4 | * Copyright (C) 2006 Jaya Kumar |
5 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file COPYING in the main directory of this archive |
8 | * for more details. |
9 | */ |
10 | |
11 | #include <linux/module.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/errno.h> |
14 | #include <linux/string.h> |
15 | #include <linux/mm.h> |
16 | #include <linux/vmalloc.h> |
17 | #include <linux/delay.h> |
18 | #include <linux/interrupt.h> |
19 | #include <linux/fb.h> |
20 | #include <linux/list.h> |
21 | |
22 | /* to support deferred IO */ |
23 | #include <linux/rmap.h> |
24 | #include <linux/pagemap.h> |
25 | |
26 | static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) |
27 | { |
28 | void *screen_base = (void __force *) info->screen_base; |
29 | struct page *page; |
30 | |
31 | if (is_vmalloc_addr(x: screen_base + offs)) |
32 | page = vmalloc_to_page(addr: screen_base + offs); |
33 | else |
34 | page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT); |
35 | |
36 | return page; |
37 | } |
38 | |
39 | static struct fb_deferred_io_pageref *(struct fb_info *info, |
40 | unsigned long offset, |
41 | struct page *page) |
42 | { |
43 | struct fb_deferred_io *fbdefio = info->fbdefio; |
44 | struct list_head *pos = &fbdefio->pagereflist; |
45 | unsigned long pgoff = offset >> PAGE_SHIFT; |
46 | struct fb_deferred_io_pageref *, *cur; |
47 | |
48 | if (WARN_ON_ONCE(pgoff >= info->npagerefs)) |
49 | return NULL; /* incorrect allocation size */ |
50 | |
51 | /* 1:1 mapping between pageref and page offset */ |
52 | pageref = &info->pagerefs[pgoff]; |
53 | |
54 | /* |
55 | * This check is to catch the case where a new process could start |
56 | * writing to the same page through a new PTE. This new access |
57 | * can cause a call to .page_mkwrite even if the original process' |
58 | * PTE is marked writable. |
59 | */ |
60 | if (!list_empty(head: &pageref->list)) |
61 | goto pageref_already_added; |
62 | |
63 | pageref->page = page; |
64 | pageref->offset = pgoff << PAGE_SHIFT; |
65 | |
66 | if (unlikely(fbdefio->sort_pagereflist)) { |
67 | /* |
68 | * We loop through the list of pagerefs before adding in |
69 | * order to keep the pagerefs sorted. This has significant |
70 | * overhead of O(n^2) with n being the number of written |
71 | * pages. If possible, drivers should try to work with |
72 | * unsorted page lists instead. |
73 | */ |
74 | list_for_each_entry(cur, &fbdefio->pagereflist, list) { |
75 | if (cur->offset > pageref->offset) |
76 | break; |
77 | } |
78 | pos = &cur->list; |
79 | } |
80 | |
81 | list_add_tail(new: &pageref->list, head: pos); |
82 | |
83 | : |
84 | return pageref; |
85 | } |
86 | |
87 | static void (struct fb_deferred_io_pageref *, |
88 | struct fb_info *info) |
89 | { |
90 | list_del_init(entry: &pageref->list); |
91 | } |
92 | |
93 | /* this is to find and return the vmalloc-ed fb pages */ |
94 | static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) |
95 | { |
96 | unsigned long offset; |
97 | struct page *page; |
98 | struct fb_info *info = vmf->vma->vm_private_data; |
99 | |
100 | offset = vmf->pgoff << PAGE_SHIFT; |
101 | if (offset >= info->fix.smem_len) |
102 | return VM_FAULT_SIGBUS; |
103 | |
104 | page = fb_deferred_io_page(info, offs: offset); |
105 | if (!page) |
106 | return VM_FAULT_SIGBUS; |
107 | |
108 | get_page(page); |
109 | |
110 | if (vmf->vma->vm_file) |
111 | page->mapping = vmf->vma->vm_file->f_mapping; |
112 | else |
113 | printk(KERN_ERR "no mapping available\n" ); |
114 | |
115 | BUG_ON(!page->mapping); |
116 | page->index = vmf->pgoff; /* for page_mkclean() */ |
117 | |
118 | vmf->page = page; |
119 | return 0; |
120 | } |
121 | |
122 | int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
123 | { |
124 | struct fb_info *info = file->private_data; |
125 | struct inode *inode = file_inode(f: file); |
126 | int err = file_write_and_wait_range(file, start, end); |
127 | if (err) |
128 | return err; |
129 | |
130 | /* Skip if deferred io is compiled-in but disabled on this fbdev */ |
131 | if (!info->fbdefio) |
132 | return 0; |
133 | |
134 | inode_lock(inode); |
135 | flush_delayed_work(dwork: &info->deferred_work); |
136 | inode_unlock(inode); |
137 | |
138 | return 0; |
139 | } |
140 | EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); |
141 | |
142 | /* |
143 | * Adds a page to the dirty list. Call this from struct |
144 | * vm_operations_struct.page_mkwrite. |
145 | */ |
146 | static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset, |
147 | struct page *page) |
148 | { |
149 | struct fb_deferred_io *fbdefio = info->fbdefio; |
150 | struct fb_deferred_io_pageref *; |
151 | vm_fault_t ret; |
152 | |
153 | /* protect against the workqueue changing the page list */ |
154 | mutex_lock(&fbdefio->lock); |
155 | |
156 | pageref = fb_deferred_io_pageref_get(info, offset, page); |
157 | if (WARN_ON_ONCE(!pageref)) { |
158 | ret = VM_FAULT_OOM; |
159 | goto err_mutex_unlock; |
160 | } |
161 | |
162 | /* |
163 | * We want the page to remain locked from ->page_mkwrite until |
164 | * the PTE is marked dirty to avoid page_mkclean() being called |
165 | * before the PTE is updated, which would leave the page ignored |
166 | * by defio. |
167 | * Do this by locking the page here and informing the caller |
168 | * about it with VM_FAULT_LOCKED. |
169 | */ |
170 | lock_page(page: pageref->page); |
171 | |
172 | mutex_unlock(lock: &fbdefio->lock); |
173 | |
174 | /* come back after delay to process the deferred IO */ |
175 | schedule_delayed_work(dwork: &info->deferred_work, delay: fbdefio->delay); |
176 | return VM_FAULT_LOCKED; |
177 | |
178 | err_mutex_unlock: |
179 | mutex_unlock(lock: &fbdefio->lock); |
180 | return ret; |
181 | } |
182 | |
183 | /* |
184 | * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O |
185 | * @fb_info: The fbdev info structure |
186 | * @vmf: The VM fault |
187 | * |
188 | * This is a callback we get when userspace first tries to |
189 | * write to the page. We schedule a workqueue. That workqueue |
190 | * will eventually mkclean the touched pages and execute the |
191 | * deferred framebuffer IO. Then if userspace touches a page |
192 | * again, we repeat the same scheme. |
193 | * |
194 | * Returns: |
195 | * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise. |
196 | */ |
197 | static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf) |
198 | { |
199 | unsigned long offset = vmf->address - vmf->vma->vm_start; |
200 | struct page *page = vmf->page; |
201 | |
202 | file_update_time(file: vmf->vma->vm_file); |
203 | |
204 | return fb_deferred_io_track_page(info, offset, page); |
205 | } |
206 | |
207 | /* vm_ops->page_mkwrite handler */ |
208 | static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) |
209 | { |
210 | struct fb_info *info = vmf->vma->vm_private_data; |
211 | |
212 | return fb_deferred_io_page_mkwrite(info, vmf); |
213 | } |
214 | |
215 | static const struct vm_operations_struct fb_deferred_io_vm_ops = { |
216 | .fault = fb_deferred_io_fault, |
217 | .page_mkwrite = fb_deferred_io_mkwrite, |
218 | }; |
219 | |
220 | static const struct address_space_operations fb_deferred_io_aops = { |
221 | .dirty_folio = noop_dirty_folio, |
222 | }; |
223 | |
224 | int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) |
225 | { |
226 | vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); |
227 | |
228 | vma->vm_ops = &fb_deferred_io_vm_ops; |
229 | vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); |
230 | if (!(info->flags & FBINFO_VIRTFB)) |
231 | vm_flags_set(vma, VM_IO); |
232 | vma->vm_private_data = info; |
233 | return 0; |
234 | } |
235 | EXPORT_SYMBOL_GPL(fb_deferred_io_mmap); |
236 | |
237 | /* workqueue callback */ |
238 | static void fb_deferred_io_work(struct work_struct *work) |
239 | { |
240 | struct fb_info *info = container_of(work, struct fb_info, deferred_work.work); |
241 | struct fb_deferred_io_pageref *, *next; |
242 | struct fb_deferred_io *fbdefio = info->fbdefio; |
243 | |
244 | /* here we mkclean the pages, then do all deferred IO */ |
245 | mutex_lock(&fbdefio->lock); |
246 | list_for_each_entry(pageref, &fbdefio->pagereflist, list) { |
247 | struct page *cur = pageref->page; |
248 | lock_page(page: cur); |
249 | page_mkclean(page: cur); |
250 | unlock_page(page: cur); |
251 | } |
252 | |
253 | /* driver's callback with pagereflist */ |
254 | fbdefio->deferred_io(info, &fbdefio->pagereflist); |
255 | |
256 | /* clear the list */ |
257 | list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list) |
258 | fb_deferred_io_pageref_put(pageref, info); |
259 | |
260 | mutex_unlock(lock: &fbdefio->lock); |
261 | } |
262 | |
263 | int fb_deferred_io_init(struct fb_info *info) |
264 | { |
265 | struct fb_deferred_io *fbdefio = info->fbdefio; |
266 | struct fb_deferred_io_pageref *; |
267 | unsigned long , i; |
268 | int ret; |
269 | |
270 | BUG_ON(!fbdefio); |
271 | |
272 | if (WARN_ON(!info->fix.smem_len)) |
273 | return -EINVAL; |
274 | |
275 | mutex_init(&fbdefio->lock); |
276 | INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work); |
277 | INIT_LIST_HEAD(list: &fbdefio->pagereflist); |
278 | if (fbdefio->delay == 0) /* set a default of 1 s */ |
279 | fbdefio->delay = HZ; |
280 | |
281 | npagerefs = DIV_ROUND_UP(info->fix.smem_len, PAGE_SIZE); |
282 | |
283 | /* alloc a page ref for each page of the display memory */ |
284 | pagerefs = kvcalloc(n: npagerefs, size: sizeof(*pagerefs), GFP_KERNEL); |
285 | if (!pagerefs) { |
286 | ret = -ENOMEM; |
287 | goto err; |
288 | } |
289 | for (i = 0; i < npagerefs; ++i) |
290 | INIT_LIST_HEAD(list: &pagerefs[i].list); |
291 | info->npagerefs = npagerefs; |
292 | info->pagerefs = pagerefs; |
293 | |
294 | return 0; |
295 | |
296 | err: |
297 | mutex_destroy(lock: &fbdefio->lock); |
298 | return ret; |
299 | } |
300 | EXPORT_SYMBOL_GPL(fb_deferred_io_init); |
301 | |
302 | void fb_deferred_io_open(struct fb_info *info, |
303 | struct inode *inode, |
304 | struct file *file) |
305 | { |
306 | struct fb_deferred_io *fbdefio = info->fbdefio; |
307 | |
308 | file->f_mapping->a_ops = &fb_deferred_io_aops; |
309 | fbdefio->open_count++; |
310 | } |
311 | EXPORT_SYMBOL_GPL(fb_deferred_io_open); |
312 | |
313 | static void fb_deferred_io_lastclose(struct fb_info *info) |
314 | { |
315 | struct page *page; |
316 | int i; |
317 | |
318 | flush_delayed_work(dwork: &info->deferred_work); |
319 | |
320 | /* clear out the mapping that we setup */ |
321 | for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { |
322 | page = fb_deferred_io_page(info, offs: i); |
323 | page->mapping = NULL; |
324 | } |
325 | } |
326 | |
327 | void fb_deferred_io_release(struct fb_info *info) |
328 | { |
329 | struct fb_deferred_io *fbdefio = info->fbdefio; |
330 | |
331 | if (!--fbdefio->open_count) |
332 | fb_deferred_io_lastclose(info); |
333 | } |
334 | EXPORT_SYMBOL_GPL(fb_deferred_io_release); |
335 | |
336 | void fb_deferred_io_cleanup(struct fb_info *info) |
337 | { |
338 | struct fb_deferred_io *fbdefio = info->fbdefio; |
339 | |
340 | fb_deferred_io_lastclose(info); |
341 | |
342 | kvfree(addr: info->pagerefs); |
343 | mutex_destroy(lock: &fbdefio->lock); |
344 | } |
345 | EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); |
346 | |