1 | /* |
2 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
3 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. |
4 | * |
5 | * This software is available to you under a choice of one of two |
6 | * licenses. You may choose to be licensed under the terms of the GNU |
7 | * General Public License (GPL) Version 2, available from the file |
8 | * COPYING in the main directory of this source tree, or the |
9 | * OpenIB.org BSD license below: |
10 | * |
11 | * Redistribution and use in source and binary forms, with or |
12 | * without modification, are permitted provided that the following |
13 | * conditions are met: |
14 | * |
15 | * - Redistributions of source code must retain the above |
16 | * copyright notice, this list of conditions and the following |
17 | * disclaimer. |
18 | * |
19 | * - Redistributions in binary form must reproduce the above |
20 | * copyright notice, this list of conditions and the following |
21 | * disclaimer in the documentation and/or other materials |
22 | * provided with the distribution. |
23 | * |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
31 | * SOFTWARE. |
32 | */ |
33 | |
34 | #ifndef MLX4_ICM_H |
35 | #define MLX4_ICM_H |
36 | |
37 | #include <linux/list.h> |
38 | #include <linux/pci.h> |
39 | #include <linux/mutex.h> |
40 | |
41 | #define MLX4_ICM_CHUNK_LEN \ |
42 | ((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \ |
43 | (sizeof(struct scatterlist))) |
44 | |
45 | enum { |
46 | MLX4_ICM_PAGE_SHIFT = 12, |
47 | MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT, |
48 | }; |
49 | |
50 | struct mlx4_icm_buf { |
51 | void *addr; |
52 | size_t size; |
53 | dma_addr_t dma_addr; |
54 | }; |
55 | |
56 | struct mlx4_icm_chunk { |
57 | struct list_head list; |
58 | int npages; |
59 | int nsg; |
60 | bool coherent; |
61 | union { |
62 | struct scatterlist sg[MLX4_ICM_CHUNK_LEN]; |
63 | struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN]; |
64 | }; |
65 | }; |
66 | |
67 | struct mlx4_icm { |
68 | struct list_head chunk_list; |
69 | int refcount; |
70 | }; |
71 | |
72 | struct mlx4_icm_iter { |
73 | struct mlx4_icm *icm; |
74 | struct mlx4_icm_chunk *chunk; |
75 | int page_idx; |
76 | }; |
77 | |
78 | struct mlx4_dev; |
79 | |
80 | struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, |
81 | gfp_t gfp_mask, int coherent); |
82 | void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent); |
83 | |
84 | int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); |
85 | void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); |
86 | int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, |
87 | u32 start, u32 end); |
88 | void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, |
89 | u32 start, u32 end); |
90 | int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, |
91 | u64 virt, int obj_size, u32 nobj, int reserved, |
92 | int use_lowmem, int use_coherent); |
93 | void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); |
94 | void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, dma_addr_t *dma_handle); |
95 | |
96 | static inline void mlx4_icm_first(struct mlx4_icm *icm, |
97 | struct mlx4_icm_iter *iter) |
98 | { |
99 | iter->icm = icm; |
100 | iter->chunk = list_empty(head: &icm->chunk_list) ? |
101 | NULL : list_entry(icm->chunk_list.next, |
102 | struct mlx4_icm_chunk, list); |
103 | iter->page_idx = 0; |
104 | } |
105 | |
106 | static inline int mlx4_icm_last(struct mlx4_icm_iter *iter) |
107 | { |
108 | return !iter->chunk; |
109 | } |
110 | |
111 | static inline void mlx4_icm_next(struct mlx4_icm_iter *iter) |
112 | { |
113 | if (++iter->page_idx >= iter->chunk->nsg) { |
114 | if (iter->chunk->list.next == &iter->icm->chunk_list) { |
115 | iter->chunk = NULL; |
116 | return; |
117 | } |
118 | |
119 | iter->chunk = list_entry(iter->chunk->list.next, |
120 | struct mlx4_icm_chunk, list); |
121 | iter->page_idx = 0; |
122 | } |
123 | } |
124 | |
125 | static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) |
126 | { |
127 | if (iter->chunk->coherent) |
128 | return iter->chunk->buf[iter->page_idx].dma_addr; |
129 | else |
130 | return sg_dma_address(&iter->chunk->sg[iter->page_idx]); |
131 | } |
132 | |
133 | static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) |
134 | { |
135 | if (iter->chunk->coherent) |
136 | return iter->chunk->buf[iter->page_idx].size; |
137 | else |
138 | return sg_dma_len(&iter->chunk->sg[iter->page_idx]); |
139 | } |
140 | |
141 | int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); |
142 | int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); |
143 | |
144 | #endif /* MLX4_ICM_H */ |
145 | |