1 | /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ |
2 | /* |
3 | * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. |
4 | */ |
5 | #ifndef _IBA_DEFS_H_ |
6 | #define _IBA_DEFS_H_ |
7 | |
8 | #include <linux/kernel.h> |
9 | #include <linux/bitfield.h> |
10 | #include <asm/unaligned.h> |
11 | |
12 | static inline u32 _iba_get8(const u8 *ptr) |
13 | { |
14 | return *ptr; |
15 | } |
16 | |
17 | static inline void _iba_set8(u8 *ptr, u32 mask, u32 prep_value) |
18 | { |
19 | *ptr = (*ptr & ~mask) | prep_value; |
20 | } |
21 | |
22 | static inline u16 _iba_get16(const __be16 *ptr) |
23 | { |
24 | return be16_to_cpu(*ptr); |
25 | } |
26 | |
27 | static inline void _iba_set16(__be16 *ptr, u16 mask, u16 prep_value) |
28 | { |
29 | *ptr = cpu_to_be16((be16_to_cpu(*ptr) & ~mask) | prep_value); |
30 | } |
31 | |
32 | static inline u32 _iba_get32(const __be32 *ptr) |
33 | { |
34 | return be32_to_cpu(*ptr); |
35 | } |
36 | |
37 | static inline void _iba_set32(__be32 *ptr, u32 mask, u32 prep_value) |
38 | { |
39 | *ptr = cpu_to_be32((be32_to_cpu(*ptr) & ~mask) | prep_value); |
40 | } |
41 | |
42 | static inline u64 _iba_get64(const __be64 *ptr) |
43 | { |
44 | /* |
45 | * The mads are constructed so that 32 bit and smaller are naturally |
46 | * aligned, everything larger has a max alignment of 4 bytes. |
47 | */ |
48 | return be64_to_cpu(get_unaligned(ptr)); |
49 | } |
50 | |
51 | static inline void _iba_set64(__be64 *ptr, u64 mask, u64 prep_value) |
52 | { |
53 | put_unaligned(cpu_to_be64((_iba_get64(ptr) & ~mask) | prep_value), ptr); |
54 | } |
55 | |
56 | #define _IBA_SET(field_struct, field_offset, field_mask, num_bits, ptr, value) \ |
57 | ({ \ |
58 | field_struct *_ptr = ptr; \ |
59 | _iba_set##num_bits((void *)_ptr + (field_offset), field_mask, \ |
60 | FIELD_PREP(field_mask, value)); \ |
61 | }) |
62 | #define IBA_SET(field, ptr, value) _IBA_SET(field, ptr, value) |
63 | |
64 | #define _IBA_GET_MEM_PTR(field_struct, field_offset, type, num_bits, ptr) \ |
65 | ({ \ |
66 | field_struct *_ptr = ptr; \ |
67 | (type *)((void *)_ptr + (field_offset)); \ |
68 | }) |
69 | #define IBA_GET_MEM_PTR(field, ptr) _IBA_GET_MEM_PTR(field, ptr) |
70 | |
71 | /* FIXME: A set should always set the entire field, meaning we should zero the trailing bytes */ |
72 | #define _IBA_SET_MEM(field_struct, field_offset, type, num_bits, ptr, in, \ |
73 | bytes) \ |
74 | ({ \ |
75 | const type *_in_ptr = in; \ |
76 | WARN_ON(bytes * 8 > num_bits); \ |
77 | if (in && bytes) \ |
78 | memcpy(_IBA_GET_MEM_PTR(field_struct, field_offset, \ |
79 | type, num_bits, ptr), \ |
80 | _in_ptr, bytes); \ |
81 | }) |
82 | #define IBA_SET_MEM(field, ptr, in, bytes) _IBA_SET_MEM(field, ptr, in, bytes) |
83 | |
84 | #define _IBA_GET(field_struct, field_offset, field_mask, num_bits, ptr) \ |
85 | ({ \ |
86 | const field_struct *_ptr = ptr; \ |
87 | (u##num_bits) FIELD_GET( \ |
88 | field_mask, _iba_get##num_bits((const void *)_ptr + \ |
89 | (field_offset))); \ |
90 | }) |
91 | #define IBA_GET(field, ptr) _IBA_GET(field, ptr) |
92 | |
93 | #define _IBA_GET_MEM(field_struct, field_offset, type, num_bits, ptr, out, \ |
94 | bytes) \ |
95 | ({ \ |
96 | type *_out_ptr = out; \ |
97 | WARN_ON(bytes * 8 > num_bits); \ |
98 | if (out && bytes) \ |
99 | memcpy(_out_ptr, \ |
100 | _IBA_GET_MEM_PTR(field_struct, field_offset, \ |
101 | type, num_bits, ptr), \ |
102 | bytes); \ |
103 | }) |
104 | #define IBA_GET_MEM(field, ptr, out, bytes) _IBA_GET_MEM(field, ptr, out, bytes) |
105 | |
106 | /* |
107 | * The generated list becomes the parameters to the macros, the order is: |
108 | * - struct this applies to |
109 | * - starting offset of the max |
110 | * - GENMASK or GENMASK_ULL in CPU order |
111 | * - The width of data the mask operations should work on, in bits |
112 | */ |
113 | |
114 | /* |
115 | * Extraction using a tabular description like table 106. bit_offset is from |
116 | * the Byte[Bit] notation. |
117 | */ |
118 | #define IBA_FIELD_BLOC(field_struct, byte_offset, bit_offset, num_bits) \ |
119 | field_struct, byte_offset, \ |
120 | GENMASK(7 - (bit_offset), 7 - (bit_offset) - (num_bits - 1)), \ |
121 | 8 |
122 | #define IBA_FIELD8_LOC(field_struct, byte_offset, num_bits) \ |
123 | IBA_FIELD_BLOC(field_struct, byte_offset, 0, num_bits) |
124 | |
125 | #define IBA_FIELD16_LOC(field_struct, byte_offset, num_bits) \ |
126 | field_struct, (byte_offset)&0xFFFE, \ |
127 | GENMASK(15 - (((byte_offset) % 2) * 8), \ |
128 | 15 - (((byte_offset) % 2) * 8) - (num_bits - 1)), \ |
129 | 16 |
130 | |
131 | #define IBA_FIELD32_LOC(field_struct, byte_offset, num_bits) \ |
132 | field_struct, (byte_offset)&0xFFFC, \ |
133 | GENMASK(31 - (((byte_offset) % 4) * 8), \ |
134 | 31 - (((byte_offset) % 4) * 8) - (num_bits - 1)), \ |
135 | 32 |
136 | |
137 | #define IBA_FIELD64_LOC(field_struct, byte_offset) \ |
138 | field_struct, byte_offset, GENMASK_ULL(63, 0), 64 |
139 | /* |
140 | * In IBTA spec, everything that is more than 64bits is multiple |
141 | * of bytes without leftover bits. |
142 | */ |
143 | #define IBA_FIELD_MLOC(field_struct, byte_offset, num_bits, type) \ |
144 | field_struct, byte_offset, type, num_bits |
145 | |
146 | #endif /* _IBA_DEFS_H_ */ |
147 | |