1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Synchronous Compression operations
4 *
5 * Copyright 2015 LG Electronics Inc.
6 * Copyright (c) 2016, Intel Corporation
7 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8 */
9
10#include <crypto/internal/acompress.h>
11#include <crypto/internal/scompress.h>
12#include <crypto/scatterwalk.h>
13#include <linux/cryptouser.h>
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/scatterlist.h>
18#include <linux/seq_file.h>
19#include <linux/slab.h>
20#include <linux/string.h>
21#include <linux/vmalloc.h>
22#include <net/netlink.h>
23
24#include "compress.h"
25
26struct scomp_scratch {
27 spinlock_t lock;
28 void *src;
29 void *dst;
30};
31
32static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
33 .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
34};
35
36static const struct crypto_type crypto_scomp_type;
37static int scomp_scratch_users;
38static DEFINE_MUTEX(scomp_lock);
39
40static int __maybe_unused crypto_scomp_report(
41 struct sk_buff *skb, struct crypto_alg *alg)
42{
43 struct crypto_report_comp rscomp;
44
45 memset(&rscomp, 0, sizeof(rscomp));
46
47 strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
48
49 return nla_put(skb, attrtype: CRYPTOCFGA_REPORT_COMPRESS,
50 attrlen: sizeof(rscomp), data: &rscomp);
51}
52
53static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
54 __maybe_unused;
55
56static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
57{
58 seq_puts(m, s: "type : scomp\n");
59}
60
61static void crypto_scomp_free_scratches(void)
62{
63 struct scomp_scratch *scratch;
64 int i;
65
66 for_each_possible_cpu(i) {
67 scratch = per_cpu_ptr(&scomp_scratch, i);
68
69 vfree(addr: scratch->src);
70 vfree(addr: scratch->dst);
71 scratch->src = NULL;
72 scratch->dst = NULL;
73 }
74}
75
76static int crypto_scomp_alloc_scratches(void)
77{
78 struct scomp_scratch *scratch;
79 int i;
80
81 for_each_possible_cpu(i) {
82 void *mem;
83
84 scratch = per_cpu_ptr(&scomp_scratch, i);
85
86 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(cpu: i));
87 if (!mem)
88 goto error;
89 scratch->src = mem;
90 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(cpu: i));
91 if (!mem)
92 goto error;
93 scratch->dst = mem;
94 }
95 return 0;
96error:
97 crypto_scomp_free_scratches();
98 return -ENOMEM;
99}
100
101static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
102{
103 int ret = 0;
104
105 mutex_lock(&scomp_lock);
106 if (!scomp_scratch_users++)
107 ret = crypto_scomp_alloc_scratches();
108 mutex_unlock(lock: &scomp_lock);
109
110 return ret;
111}
112
113static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
114{
115 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
116 void **tfm_ctx = acomp_tfm_ctx(tfm);
117 struct crypto_scomp *scomp = *tfm_ctx;
118 void **ctx = acomp_request_ctx(req);
119 struct scomp_scratch *scratch;
120 void *src, *dst;
121 unsigned int dlen;
122 int ret;
123
124 if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
125 return -EINVAL;
126
127 if (req->dst && !req->dlen)
128 return -EINVAL;
129
130 if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
131 req->dlen = SCOMP_SCRATCH_SIZE;
132
133 dlen = req->dlen;
134
135 scratch = raw_cpu_ptr(&scomp_scratch);
136 spin_lock(lock: &scratch->lock);
137
138 if (sg_nents(sg: req->src) == 1 && !PageHighMem(page: sg_page(sg: req->src))) {
139 src = page_to_virt(sg_page(req->src)) + req->src->offset;
140 } else {
141 scatterwalk_map_and_copy(buf: scratch->src, sg: req->src, start: 0,
142 nbytes: req->slen, out: 0);
143 src = scratch->src;
144 }
145
146 if (req->dst && sg_nents(sg: req->dst) == 1 && !PageHighMem(page: sg_page(sg: req->dst)))
147 dst = page_to_virt(sg_page(req->dst)) + req->dst->offset;
148 else
149 dst = scratch->dst;
150
151 if (dir)
152 ret = crypto_scomp_compress(tfm: scomp, src, slen: req->slen,
153 dst, dlen: &req->dlen, ctx: *ctx);
154 else
155 ret = crypto_scomp_decompress(tfm: scomp, src, slen: req->slen,
156 dst, dlen: &req->dlen, ctx: *ctx);
157 if (!ret) {
158 if (!req->dst) {
159 req->dst = sgl_alloc(length: req->dlen, GFP_ATOMIC, NULL);
160 if (!req->dst) {
161 ret = -ENOMEM;
162 goto out;
163 }
164 } else if (req->dlen > dlen) {
165 ret = -ENOSPC;
166 goto out;
167 }
168 if (dst == scratch->dst) {
169 scatterwalk_map_and_copy(buf: scratch->dst, sg: req->dst, start: 0,
170 nbytes: req->dlen, out: 1);
171 } else {
172 int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE);
173 int i;
174 struct page *dst_page = sg_page(sg: req->dst);
175
176 for (i = 0; i < nr_pages; i++)
177 flush_dcache_page(page: dst_page + i);
178 }
179 }
180out:
181 spin_unlock(lock: &scratch->lock);
182 return ret;
183}
184
185static int scomp_acomp_compress(struct acomp_req *req)
186{
187 return scomp_acomp_comp_decomp(req, dir: 1);
188}
189
190static int scomp_acomp_decompress(struct acomp_req *req)
191{
192 return scomp_acomp_comp_decomp(req, dir: 0);
193}
194
195static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
196{
197 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
198
199 crypto_free_scomp(tfm: *ctx);
200
201 mutex_lock(&scomp_lock);
202 if (!--scomp_scratch_users)
203 crypto_scomp_free_scratches();
204 mutex_unlock(lock: &scomp_lock);
205}
206
207int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
208{
209 struct crypto_alg *calg = tfm->__crt_alg;
210 struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
211 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
212 struct crypto_scomp *scomp;
213
214 if (!crypto_mod_get(alg: calg))
215 return -EAGAIN;
216
217 scomp = crypto_create_tfm(alg: calg, frontend: &crypto_scomp_type);
218 if (IS_ERR(ptr: scomp)) {
219 crypto_mod_put(alg: calg);
220 return PTR_ERR(ptr: scomp);
221 }
222
223 *ctx = scomp;
224 tfm->exit = crypto_exit_scomp_ops_async;
225
226 crt->compress = scomp_acomp_compress;
227 crt->decompress = scomp_acomp_decompress;
228 crt->dst_free = sgl_free;
229 crt->reqsize = sizeof(void *);
230
231 return 0;
232}
233
234struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
235{
236 struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
237 struct crypto_tfm *tfm = crypto_acomp_tfm(tfm: acomp);
238 struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
239 struct crypto_scomp *scomp = *tfm_ctx;
240 void *ctx;
241
242 ctx = crypto_scomp_alloc_ctx(tfm: scomp);
243 if (IS_ERR(ptr: ctx)) {
244 kfree(objp: req);
245 return NULL;
246 }
247
248 *req->__ctx = ctx;
249
250 return req;
251}
252
253void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
254{
255 struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
256 struct crypto_tfm *tfm = crypto_acomp_tfm(tfm: acomp);
257 struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
258 struct crypto_scomp *scomp = *tfm_ctx;
259 void *ctx = *req->__ctx;
260
261 if (ctx)
262 crypto_scomp_free_ctx(tfm: scomp, ctx);
263}
264
265static const struct crypto_type crypto_scomp_type = {
266 .extsize = crypto_alg_extsize,
267 .init_tfm = crypto_scomp_init_tfm,
268#ifdef CONFIG_PROC_FS
269 .show = crypto_scomp_show,
270#endif
271#if IS_ENABLED(CONFIG_CRYPTO_USER)
272 .report = crypto_scomp_report,
273#endif
274#ifdef CONFIG_CRYPTO_STATS
275 .report_stat = crypto_acomp_report_stat,
276#endif
277 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
278 .maskset = CRYPTO_ALG_TYPE_MASK,
279 .type = CRYPTO_ALG_TYPE_SCOMPRESS,
280 .tfmsize = offsetof(struct crypto_scomp, base),
281};
282
283int crypto_register_scomp(struct scomp_alg *alg)
284{
285 struct crypto_alg *base = &alg->calg.base;
286
287 comp_prepare_alg(alg: &alg->calg);
288
289 base->cra_type = &crypto_scomp_type;
290 base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
291
292 return crypto_register_alg(alg: base);
293}
294EXPORT_SYMBOL_GPL(crypto_register_scomp);
295
296void crypto_unregister_scomp(struct scomp_alg *alg)
297{
298 crypto_unregister_alg(alg: &alg->base);
299}
300EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
301
302int crypto_register_scomps(struct scomp_alg *algs, int count)
303{
304 int i, ret;
305
306 for (i = 0; i < count; i++) {
307 ret = crypto_register_scomp(&algs[i]);
308 if (ret)
309 goto err;
310 }
311
312 return 0;
313
314err:
315 for (--i; i >= 0; --i)
316 crypto_unregister_scomp(&algs[i]);
317
318 return ret;
319}
320EXPORT_SYMBOL_GPL(crypto_register_scomps);
321
322void crypto_unregister_scomps(struct scomp_alg *algs, int count)
323{
324 int i;
325
326 for (i = count - 1; i >= 0; --i)
327 crypto_unregister_scomp(&algs[i]);
328}
329EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
330
331MODULE_LICENSE("GPL");
332MODULE_DESCRIPTION("Synchronous compression type");
333

source code of linux/crypto/scompress.c