1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * This header is for implementations of dma_map_ops and related code. |
4 | * It should not be included in drivers just using the DMA API. |
5 | */ |
6 | #ifndef _LINUX_DMA_MAP_OPS_H |
7 | #define _LINUX_DMA_MAP_OPS_H |
8 | |
9 | #include <linux/dma-mapping.h> |
10 | #include <linux/pgtable.h> |
11 | #include <linux/slab.h> |
12 | |
13 | struct cma; |
14 | struct iommu_ops; |
15 | |
16 | /* |
17 | * Values for struct dma_map_ops.flags: |
18 | * |
19 | * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can |
20 | * handle PCI P2PDMA pages in the map_sg/unmap_sg operation. |
21 | */ |
22 | #define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0) |
23 | |
24 | struct dma_map_ops { |
25 | unsigned int flags; |
26 | |
27 | void *(*alloc)(struct device *dev, size_t size, |
28 | dma_addr_t *dma_handle, gfp_t gfp, |
29 | unsigned long attrs); |
30 | void (*free)(struct device *dev, size_t size, void *vaddr, |
31 | dma_addr_t dma_handle, unsigned long attrs); |
32 | struct page *(*alloc_pages)(struct device *dev, size_t size, |
33 | dma_addr_t *dma_handle, enum dma_data_direction dir, |
34 | gfp_t gfp); |
35 | void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, |
36 | dma_addr_t dma_handle, enum dma_data_direction dir); |
37 | struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size, |
38 | enum dma_data_direction dir, gfp_t gfp, |
39 | unsigned long attrs); |
40 | void (*free_noncontiguous)(struct device *dev, size_t size, |
41 | struct sg_table *sgt, enum dma_data_direction dir); |
42 | int (*mmap)(struct device *, struct vm_area_struct *, |
43 | void *, dma_addr_t, size_t, unsigned long attrs); |
44 | |
45 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, |
46 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
47 | unsigned long attrs); |
48 | |
49 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
50 | unsigned long offset, size_t size, |
51 | enum dma_data_direction dir, unsigned long attrs); |
52 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, |
53 | size_t size, enum dma_data_direction dir, |
54 | unsigned long attrs); |
55 | /* |
56 | * map_sg should return a negative error code on error. See |
57 | * dma_map_sgtable() for a list of appropriate error codes |
58 | * and their meanings. |
59 | */ |
60 | int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, |
61 | enum dma_data_direction dir, unsigned long attrs); |
62 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, |
63 | enum dma_data_direction dir, unsigned long attrs); |
64 | dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, |
65 | size_t size, enum dma_data_direction dir, |
66 | unsigned long attrs); |
67 | void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, |
68 | size_t size, enum dma_data_direction dir, |
69 | unsigned long attrs); |
70 | void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, |
71 | size_t size, enum dma_data_direction dir); |
72 | void (*sync_single_for_device)(struct device *dev, |
73 | dma_addr_t dma_handle, size_t size, |
74 | enum dma_data_direction dir); |
75 | void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, |
76 | int nents, enum dma_data_direction dir); |
77 | void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, |
78 | int nents, enum dma_data_direction dir); |
79 | void (*cache_sync)(struct device *dev, void *vaddr, size_t size, |
80 | enum dma_data_direction direction); |
81 | int (*dma_supported)(struct device *dev, u64 mask); |
82 | u64 (*get_required_mask)(struct device *dev); |
83 | size_t (*max_mapping_size)(struct device *dev); |
84 | size_t (*opt_mapping_size)(void); |
85 | unsigned long (*get_merge_boundary)(struct device *dev); |
86 | }; |
87 | |
88 | #ifdef CONFIG_DMA_OPS |
89 | #include <asm/dma-mapping.h> |
90 | |
91 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) |
92 | { |
93 | if (dev->dma_ops) |
94 | return dev->dma_ops; |
95 | return get_arch_dma_ops(); |
96 | } |
97 | |
98 | static inline void set_dma_ops(struct device *dev, |
99 | const struct dma_map_ops *dma_ops) |
100 | { |
101 | dev->dma_ops = dma_ops; |
102 | } |
103 | #else /* CONFIG_DMA_OPS */ |
104 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) |
105 | { |
106 | return NULL; |
107 | } |
108 | static inline void set_dma_ops(struct device *dev, |
109 | const struct dma_map_ops *dma_ops) |
110 | { |
111 | } |
112 | #endif /* CONFIG_DMA_OPS */ |
113 | |
114 | #ifdef CONFIG_DMA_CMA |
115 | extern struct cma *dma_contiguous_default_area; |
116 | |
117 | static inline struct cma *dev_get_cma_area(struct device *dev) |
118 | { |
119 | if (dev && dev->cma_area) |
120 | return dev->cma_area; |
121 | return dma_contiguous_default_area; |
122 | } |
123 | |
124 | void dma_contiguous_reserve(phys_addr_t addr_limit); |
125 | int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, |
126 | phys_addr_t limit, struct cma **res_cma, bool fixed); |
127 | |
128 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
129 | unsigned int order, bool no_warn); |
130 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, |
131 | int count); |
132 | struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp); |
133 | void dma_free_contiguous(struct device *dev, struct page *page, size_t size); |
134 | |
135 | void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); |
136 | #else /* CONFIG_DMA_CMA */ |
137 | static inline struct cma *dev_get_cma_area(struct device *dev) |
138 | { |
139 | return NULL; |
140 | } |
141 | static inline void dma_contiguous_reserve(phys_addr_t limit) |
142 | { |
143 | } |
144 | static inline int dma_contiguous_reserve_area(phys_addr_t size, |
145 | phys_addr_t base, phys_addr_t limit, struct cma **res_cma, |
146 | bool fixed) |
147 | { |
148 | return -ENOSYS; |
149 | } |
150 | static inline struct page *dma_alloc_from_contiguous(struct device *dev, |
151 | size_t count, unsigned int order, bool no_warn) |
152 | { |
153 | return NULL; |
154 | } |
155 | static inline bool dma_release_from_contiguous(struct device *dev, |
156 | struct page *pages, int count) |
157 | { |
158 | return false; |
159 | } |
160 | /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */ |
161 | static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, |
162 | gfp_t gfp) |
163 | { |
164 | return NULL; |
165 | } |
166 | static inline void dma_free_contiguous(struct device *dev, struct page *page, |
167 | size_t size) |
168 | { |
169 | __free_pages(page, get_order(size)); |
170 | } |
171 | #endif /* CONFIG_DMA_CMA*/ |
172 | |
173 | #ifdef CONFIG_DMA_DECLARE_COHERENT |
174 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
175 | dma_addr_t device_addr, size_t size); |
176 | void dma_release_coherent_memory(struct device *dev); |
177 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, |
178 | dma_addr_t *dma_handle, void **ret); |
179 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); |
180 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, |
181 | void *cpu_addr, size_t size, int *ret); |
182 | #else |
183 | static inline int dma_declare_coherent_memory(struct device *dev, |
184 | phys_addr_t phys_addr, dma_addr_t device_addr, size_t size) |
185 | { |
186 | return -ENOSYS; |
187 | } |
188 | |
189 | #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) |
190 | #define dma_release_from_dev_coherent(dev, order, vaddr) (0) |
191 | #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) |
192 | static inline void dma_release_coherent_memory(struct device *dev) { } |
193 | #endif /* CONFIG_DMA_DECLARE_COHERENT */ |
194 | |
195 | #ifdef CONFIG_DMA_GLOBAL_POOL |
196 | void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, |
197 | dma_addr_t *dma_handle); |
198 | int dma_release_from_global_coherent(int order, void *vaddr); |
199 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, |
200 | size_t size, int *ret); |
201 | int dma_init_global_coherent(phys_addr_t phys_addr, size_t size); |
202 | #else |
203 | static inline void *dma_alloc_from_global_coherent(struct device *dev, |
204 | ssize_t size, dma_addr_t *dma_handle) |
205 | { |
206 | return NULL; |
207 | } |
208 | static inline int dma_release_from_global_coherent(int order, void *vaddr) |
209 | { |
210 | return 0; |
211 | } |
212 | static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, |
213 | void *cpu_addr, size_t size, int *ret) |
214 | { |
215 | return 0; |
216 | } |
217 | #endif /* CONFIG_DMA_GLOBAL_POOL */ |
218 | |
219 | /* |
220 | * This is the actual return value from the ->alloc_noncontiguous method. |
221 | * The users of the DMA API should only care about the sg_table, but to make |
222 | * the DMA-API internal vmaping and freeing easier we stash away the page |
223 | * array as well (except for the fallback case). This can go away any time, |
224 | * e.g. when a vmap-variant that takes a scatterlist comes along. |
225 | */ |
226 | struct dma_sgt_handle { |
227 | struct sg_table sgt; |
228 | struct page **pages; |
229 | }; |
230 | #define sgt_handle(sgt) \ |
231 | container_of((sgt), struct dma_sgt_handle, sgt) |
232 | |
233 | int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, |
234 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
235 | unsigned long attrs); |
236 | int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
237 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
238 | unsigned long attrs); |
239 | struct page *dma_common_alloc_pages(struct device *dev, size_t size, |
240 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); |
241 | void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr, |
242 | dma_addr_t dma_handle, enum dma_data_direction dir); |
243 | |
244 | struct page **dma_common_find_pages(void *cpu_addr); |
245 | void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot, |
246 | const void *caller); |
247 | void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot, |
248 | const void *caller); |
249 | void dma_common_free_remap(void *cpu_addr, size_t size); |
250 | |
251 | struct page *dma_alloc_from_pool(struct device *dev, size_t size, |
252 | void **cpu_addr, gfp_t flags, |
253 | bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)); |
254 | bool dma_free_from_pool(struct device *dev, void *start, size_t size); |
255 | |
256 | int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, |
257 | dma_addr_t dma_start, u64 size); |
258 | |
259 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
260 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ |
261 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) |
262 | extern bool dma_default_coherent; |
263 | static inline bool dev_is_dma_coherent(struct device *dev) |
264 | { |
265 | return dev->dma_coherent; |
266 | } |
267 | #else |
268 | #define dma_default_coherent true |
269 | |
270 | static inline bool dev_is_dma_coherent(struct device *dev) |
271 | { |
272 | return true; |
273 | } |
274 | #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ |
275 | |
276 | /* |
277 | * Check whether potential kmalloc() buffers are safe for non-coherent DMA. |
278 | */ |
279 | static inline bool dma_kmalloc_safe(struct device *dev, |
280 | enum dma_data_direction dir) |
281 | { |
282 | /* |
283 | * If DMA bouncing of kmalloc() buffers is disabled, the kmalloc() |
284 | * caches have already been aligned to a DMA-safe size. |
285 | */ |
286 | if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC)) |
287 | return true; |
288 | |
289 | /* |
290 | * kmalloc() buffers are DMA-safe irrespective of size if the device |
291 | * is coherent or the direction is DMA_TO_DEVICE (non-desctructive |
292 | * cache maintenance and benign cache line evictions). |
293 | */ |
294 | if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE) |
295 | return true; |
296 | |
297 | return false; |
298 | } |
299 | |
300 | /* |
301 | * Check whether the given size, assuming it is for a kmalloc()'ed buffer, is |
302 | * sufficiently aligned for non-coherent DMA. |
303 | */ |
304 | static inline bool dma_kmalloc_size_aligned(size_t size) |
305 | { |
306 | /* |
307 | * Larger kmalloc() sizes are guaranteed to be aligned to |
308 | * ARCH_DMA_MINALIGN. |
309 | */ |
310 | if (size >= 2 * ARCH_DMA_MINALIGN || |
311 | IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment())) |
312 | return true; |
313 | |
314 | return false; |
315 | } |
316 | |
317 | /* |
318 | * Check whether the given object size may have originated from a kmalloc() |
319 | * buffer with a slab alignment below the DMA-safe alignment and needs |
320 | * bouncing for non-coherent DMA. The pointer alignment is not considered and |
321 | * in-structure DMA-safe offsets are the responsibility of the caller. Such |
322 | * code should use the static ARCH_DMA_MINALIGN for compiler annotations. |
323 | * |
324 | * The heuristics can have false positives, bouncing unnecessarily, though the |
325 | * buffers would be small. False negatives are theoretically possible if, for |
326 | * example, multiple small kmalloc() buffers are coalesced into a larger |
327 | * buffer that passes the alignment check. There are no such known constructs |
328 | * in the kernel. |
329 | */ |
330 | static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size, |
331 | enum dma_data_direction dir) |
332 | { |
333 | return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size); |
334 | } |
335 | |
336 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
337 | gfp_t gfp, unsigned long attrs); |
338 | void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, |
339 | dma_addr_t dma_addr, unsigned long attrs); |
340 | |
341 | #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK |
342 | void arch_dma_set_mask(struct device *dev, u64 mask); |
343 | #else |
344 | #define arch_dma_set_mask(dev, mask) do { } while (0) |
345 | #endif |
346 | |
347 | #ifdef CONFIG_MMU |
348 | /* |
349 | * Page protection so that devices that can't snoop CPU caches can use the |
350 | * memory coherently. We default to pgprot_noncached which is usually used |
351 | * for ioremap as a safe bet, but architectures can override this with less |
352 | * strict semantics if possible. |
353 | */ |
354 | #ifndef pgprot_dmacoherent |
355 | #define pgprot_dmacoherent(prot) pgprot_noncached(prot) |
356 | #endif |
357 | |
358 | pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); |
359 | #else |
360 | static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, |
361 | unsigned long attrs) |
362 | { |
363 | return prot; /* no protection bits supported without page tables */ |
364 | } |
365 | #endif /* CONFIG_MMU */ |
366 | |
367 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE |
368 | void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, |
369 | enum dma_data_direction dir); |
370 | #else |
371 | static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, |
372 | enum dma_data_direction dir) |
373 | { |
374 | } |
375 | #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ |
376 | |
377 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU |
378 | void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, |
379 | enum dma_data_direction dir); |
380 | #else |
381 | static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, |
382 | enum dma_data_direction dir) |
383 | { |
384 | } |
385 | #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ |
386 | |
387 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL |
388 | void arch_sync_dma_for_cpu_all(void); |
389 | #else |
390 | static inline void arch_sync_dma_for_cpu_all(void) |
391 | { |
392 | } |
393 | #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */ |
394 | |
395 | #ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT |
396 | void arch_dma_prep_coherent(struct page *page, size_t size); |
397 | #else |
398 | static inline void arch_dma_prep_coherent(struct page *page, size_t size) |
399 | { |
400 | } |
401 | #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ |
402 | |
403 | #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN |
404 | void arch_dma_mark_clean(phys_addr_t paddr, size_t size); |
405 | #else |
406 | static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size) |
407 | { |
408 | } |
409 | #endif /* ARCH_HAS_DMA_MARK_CLEAN */ |
410 | |
411 | void *arch_dma_set_uncached(void *addr, size_t size); |
412 | void arch_dma_clear_uncached(void *addr, size_t size); |
413 | |
414 | #ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT |
415 | bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr); |
416 | bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle); |
417 | bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg, |
418 | int nents); |
419 | bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, |
420 | int nents); |
421 | #else |
422 | #define arch_dma_map_page_direct(d, a) (false) |
423 | #define arch_dma_unmap_page_direct(d, a) (false) |
424 | #define arch_dma_map_sg_direct(d, s, n) (false) |
425 | #define arch_dma_unmap_sg_direct(d, s, n) (false) |
426 | #endif |
427 | |
428 | #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS |
429 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
430 | bool coherent); |
431 | #else |
432 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, |
433 | u64 size, bool coherent) |
434 | { |
435 | } |
436 | #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ |
437 | |
438 | #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS |
439 | void arch_teardown_dma_ops(struct device *dev); |
440 | #else |
441 | static inline void arch_teardown_dma_ops(struct device *dev) |
442 | { |
443 | } |
444 | #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */ |
445 | |
446 | #ifdef CONFIG_DMA_API_DEBUG |
447 | void dma_debug_add_bus(const struct bus_type *bus); |
448 | void debug_dma_dump_mappings(struct device *dev); |
449 | #else |
450 | static inline void dma_debug_add_bus(const struct bus_type *bus) |
451 | { |
452 | } |
453 | static inline void debug_dma_dump_mappings(struct device *dev) |
454 | { |
455 | } |
456 | #endif /* CONFIG_DMA_API_DEBUG */ |
457 | |
458 | extern const struct dma_map_ops dma_dummy_ops; |
459 | |
460 | enum pci_p2pdma_map_type { |
461 | /* |
462 | * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping |
463 | * type hasn't been calculated yet. Functions that return this enum |
464 | * never return this value. |
465 | */ |
466 | PCI_P2PDMA_MAP_UNKNOWN = 0, |
467 | |
468 | /* |
469 | * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will |
470 | * traverse the host bridge and the host bridge is not in the |
471 | * allowlist. DMA Mapping routines should return an error when |
472 | * this is returned. |
473 | */ |
474 | PCI_P2PDMA_MAP_NOT_SUPPORTED, |
475 | |
476 | /* |
477 | * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to |
478 | * each other directly through a PCI switch and the transaction will |
479 | * not traverse the host bridge. Such a mapping should program |
480 | * the DMA engine with PCI bus addresses. |
481 | */ |
482 | PCI_P2PDMA_MAP_BUS_ADDR, |
483 | |
484 | /* |
485 | * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk |
486 | * to each other, but the transaction traverses a host bridge on the |
487 | * allowlist. In this case, a normal mapping either with CPU physical |
488 | * addresses (in the case of dma-direct) or IOVA addresses (in the |
489 | * case of IOMMUs) should be used to program the DMA engine. |
490 | */ |
491 | PCI_P2PDMA_MAP_THRU_HOST_BRIDGE, |
492 | }; |
493 | |
494 | struct pci_p2pdma_map_state { |
495 | struct dev_pagemap *pgmap; |
496 | int map; |
497 | u64 bus_off; |
498 | }; |
499 | |
500 | #ifdef CONFIG_PCI_P2PDMA |
501 | enum pci_p2pdma_map_type |
502 | pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, |
503 | struct scatterlist *sg); |
504 | #else /* CONFIG_PCI_P2PDMA */ |
505 | static inline enum pci_p2pdma_map_type |
506 | pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, |
507 | struct scatterlist *sg) |
508 | { |
509 | return PCI_P2PDMA_MAP_NOT_SUPPORTED; |
510 | } |
511 | #endif /* CONFIG_PCI_P2PDMA */ |
512 | |
513 | #endif /* _LINUX_DMA_MAP_OPS_H */ |
514 | |