summaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/crypto/glue_helper.h
blob: d1818634ae7ee921c1ef665f3b966a93821990d8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Shared glue code for 128bit block ciphers
 */

#ifndef _CRYPTO_GLUE_HELPER_H
#define _CRYPTO_GLUE_HELPER_H

#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <asm/fpu/api.h>
#include <crypto/b128ops.h>

typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src);
typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src,
				       le128 *iv);
typedef void (*common_glue_xts_func_t)(void *ctx, u128 *dst, const u128 *src,
				       le128 *iv);

#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn))
#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn))
#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn))
#define GLUE_XTS_FUNC_CAST(fn) ((common_glue_xts_func_t)(fn))

struct common_glue_func_entry {
	unsigned int num_blocks; /* number of blocks that @fn will process */
	union {
		common_glue_func_t ecb;
		common_glue_cbc_func_t cbc;
		common_glue_ctr_func_t ctr;
		common_glue_xts_func_t xts;
	} fn_u;
};

struct common_glue_ctx {
	unsigned int num_funcs;
	int fpu_blocks_limit; /* -1 means fpu not needed at all */

	/*
	 * First funcs entry must have largest num_blocks and last funcs entry
	 * must have num_blocks == 1!
	 */
	struct common_glue_func_entry funcs[];
};

static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
				  struct skcipher_walk *walk,
				  bool fpu_enabled, unsigned int nbytes)
{
	if (likely(fpu_blocks_limit < 0))
		return false;

	if (fpu_enabled)
		return true;

	/*
	 * Vector-registers are only used when chunk to be processed is large
	 * enough, so do not enable FPU until it is necessary.
	 */
	if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
		return false;

	/* prevent sleeping if FPU is in use */
	skcipher_walk_atomise(walk);

	kernel_fpu_begin();
	return true;
}

static inline void glue_fpu_end(bool fpu_enabled)
{
	if (fpu_enabled)
		kernel_fpu_end();
}

static inline void le128_to_be128(be128 *dst, const le128 *src)
{
	dst->a = cpu_to_be64(le64_to_cpu(src->a));
	dst->b = cpu_to_be64(le64_to_cpu(src->b));
}

static inline void be128_to_le128(le128 *dst, const be128 *src)
{
	dst->a = cpu_to_le64(be64_to_cpu(src->a));
	dst->b = cpu_to_le64(be64_to_cpu(src->b));
}

static inline void le128_inc(le128 *i)
{
	u64 a = le64_to_cpu(i->a);
	u64 b = le64_to_cpu(i->b);

	b++;
	if (!b)
		a++;

	i->a = cpu_to_le64(a);
	i->b = cpu_to_le64(b);
}

extern int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
			       struct skcipher_request *req);

extern int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
				       struct skcipher_request *req);

extern int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
				       struct skcipher_request *req);

extern int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
			       struct skcipher_request *req);

extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
			       struct skcipher_request *req,
			       common_glue_func_t tweak_fn, void *tweak_ctx,
			       void *crypt_ctx);

extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src,
				      le128 *iv, common_glue_func_t fn);

#endif /* _CRYPTO_GLUE_HELPER_H */