1
2
3
4
5
6
7#include <linux/bitrev.h>
8#include <linux/clk.h>
9#include <linux/module.h>
10#include <linux/platform_device.h>
11
12#include <crypto/internal/hash.h>
13
14#include <asm/unaligned.h>
15
16#define DRIVER_NAME "stm32-crc32"
17#define CHKSUM_DIGEST_SIZE 4
18#define CHKSUM_BLOCK_SIZE 1
19
20
21#define CRC_DR 0x00000000
22#define CRC_CR 0x00000008
23#define CRC_INIT 0x00000010
24#define CRC_POL 0x00000014
25
26
27#define CRC_CR_RESET BIT(0)
28#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
29#define CRC_INIT_DEFAULT 0xFFFFFFFF
30
31
32#define POLY_CRC32 0xEDB88320
33#define POLY_CRC32C 0x82F63B78
34
35struct stm32_crc {
36 struct list_head list;
37 struct device *dev;
38 void __iomem *regs;
39 struct clk *clk;
40 u8 pending_data[sizeof(u32)];
41 size_t nb_pending_bytes;
42};
43
44struct stm32_crc_list {
45 struct list_head dev_list;
46 spinlock_t lock;
47};
48
49static struct stm32_crc_list crc_list = {
50 .dev_list = LIST_HEAD_INIT(crc_list.dev_list),
51 .lock = __SPIN_LOCK_UNLOCKED(crc_list.lock),
52};
53
54struct stm32_crc_ctx {
55 u32 key;
56 u32 poly;
57};
58
59struct stm32_crc_desc_ctx {
60 u32 partial;
61 struct stm32_crc *crc;
62};
63
64static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
65{
66 struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
67
68 mctx->key = CRC_INIT_DEFAULT;
69 mctx->poly = POLY_CRC32;
70 return 0;
71}
72
73static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
74{
75 struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
76
77 mctx->key = CRC_INIT_DEFAULT;
78 mctx->poly = POLY_CRC32C;
79 return 0;
80}
81
82static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
83 unsigned int keylen)
84{
85 struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
86
87 if (keylen != sizeof(u32)) {
88 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
89 return -EINVAL;
90 }
91
92 mctx->key = get_unaligned_le32(key);
93 return 0;
94}
95
96static int stm32_crc_init(struct shash_desc *desc)
97{
98 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
99 struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
100 struct stm32_crc *crc;
101
102 spin_lock_bh(&crc_list.lock);
103 list_for_each_entry(crc, &crc_list.dev_list, list) {
104 ctx->crc = crc;
105 break;
106 }
107 spin_unlock_bh(&crc_list.lock);
108
109
110 writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
111 writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
112 writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
113
114
115 ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
116 ctx->crc->nb_pending_bytes = 0;
117
118 return 0;
119}
120
121static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
122 unsigned int length)
123{
124 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
125 struct stm32_crc *crc = ctx->crc;
126 u32 *d32;
127 unsigned int i;
128
129 if (unlikely(crc->nb_pending_bytes)) {
130 while (crc->nb_pending_bytes != sizeof(u32) && length) {
131
132 crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
133 length--;
134 }
135
136 if (crc->nb_pending_bytes == sizeof(u32)) {
137
138 writel_relaxed(*(u32 *)crc->pending_data,
139 crc->regs + CRC_DR);
140 crc->nb_pending_bytes = 0;
141 }
142 }
143
144 d32 = (u32 *)d8;
145 for (i = 0; i < length >> 2; i++)
146
147 writel_relaxed(*(d32++), crc->regs + CRC_DR);
148
149
150 ctx->partial = readl_relaxed(crc->regs + CRC_DR);
151
152
153 length &= 3;
154 if (likely(!length))
155 return 0;
156
157 if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
158
159 dev_err(crc->dev, "Pending data overflow\n");
160 return -EINVAL;
161 }
162
163 d8 = (const u8 *)d32;
164 for (i = 0; i < length; i++)
165
166 crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
167
168 return 0;
169}
170
171static int stm32_crc_final(struct shash_desc *desc, u8 *out)
172{
173 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
174 struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
175
176
177 put_unaligned_le32(mctx->poly == POLY_CRC32C ?
178 ~ctx->partial : ctx->partial, out);
179
180 return 0;
181}
182
183static int stm32_crc_finup(struct shash_desc *desc, const u8 *data,
184 unsigned int length, u8 *out)
185{
186 return stm32_crc_update(desc, data, length) ?:
187 stm32_crc_final(desc, out);
188}
189
190static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
191 unsigned int length, u8 *out)
192{
193 return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
194}
195
196static struct shash_alg algs[] = {
197
198 {
199 .setkey = stm32_crc_setkey,
200 .init = stm32_crc_init,
201 .update = stm32_crc_update,
202 .final = stm32_crc_final,
203 .finup = stm32_crc_finup,
204 .digest = stm32_crc_digest,
205 .descsize = sizeof(struct stm32_crc_desc_ctx),
206 .digestsize = CHKSUM_DIGEST_SIZE,
207 .base = {
208 .cra_name = "crc32",
209 .cra_driver_name = DRIVER_NAME,
210 .cra_priority = 200,
211 .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
212 .cra_blocksize = CHKSUM_BLOCK_SIZE,
213 .cra_alignmask = 3,
214 .cra_ctxsize = sizeof(struct stm32_crc_ctx),
215 .cra_module = THIS_MODULE,
216 .cra_init = stm32_crc32_cra_init,
217 }
218 },
219
220 {
221 .setkey = stm32_crc_setkey,
222 .init = stm32_crc_init,
223 .update = stm32_crc_update,
224 .final = stm32_crc_final,
225 .finup = stm32_crc_finup,
226 .digest = stm32_crc_digest,
227 .descsize = sizeof(struct stm32_crc_desc_ctx),
228 .digestsize = CHKSUM_DIGEST_SIZE,
229 .base = {
230 .cra_name = "crc32c",
231 .cra_driver_name = DRIVER_NAME,
232 .cra_priority = 200,
233 .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
234 .cra_blocksize = CHKSUM_BLOCK_SIZE,
235 .cra_alignmask = 3,
236 .cra_ctxsize = sizeof(struct stm32_crc_ctx),
237 .cra_module = THIS_MODULE,
238 .cra_init = stm32_crc32c_cra_init,
239 }
240 }
241};
242
243static int stm32_crc_probe(struct platform_device *pdev)
244{
245 struct device *dev = &pdev->dev;
246 struct stm32_crc *crc;
247 struct resource *res;
248 int ret;
249
250 crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
251 if (!crc)
252 return -ENOMEM;
253
254 crc->dev = dev;
255
256 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
257 crc->regs = devm_ioremap_resource(dev, res);
258 if (IS_ERR(crc->regs)) {
259 dev_err(dev, "Cannot map CRC IO\n");
260 return PTR_ERR(crc->regs);
261 }
262
263 crc->clk = devm_clk_get(dev, NULL);
264 if (IS_ERR(crc->clk)) {
265 dev_err(dev, "Could not get clock\n");
266 return PTR_ERR(crc->clk);
267 }
268
269 ret = clk_prepare_enable(crc->clk);
270 if (ret) {
271 dev_err(crc->dev, "Failed to enable clock\n");
272 return ret;
273 }
274
275 platform_set_drvdata(pdev, crc);
276
277 spin_lock(&crc_list.lock);
278 list_add(&crc->list, &crc_list.dev_list);
279 spin_unlock(&crc_list.lock);
280
281 ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
282 if (ret) {
283 dev_err(dev, "Failed to register\n");
284 clk_disable_unprepare(crc->clk);
285 return ret;
286 }
287
288 dev_info(dev, "Initialized\n");
289
290 return 0;
291}
292
293static int stm32_crc_remove(struct platform_device *pdev)
294{
295 struct stm32_crc *crc = platform_get_drvdata(pdev);
296
297 spin_lock(&crc_list.lock);
298 list_del(&crc->list);
299 spin_unlock(&crc_list.lock);
300
301 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
302
303 clk_disable_unprepare(crc->clk);
304
305 return 0;
306}
307
308static const struct of_device_id stm32_dt_ids[] = {
309 { .compatible = "st,stm32f7-crc", },
310 {},
311};
312MODULE_DEVICE_TABLE(of, stm32_dt_ids);
313
314static struct platform_driver stm32_crc_driver = {
315 .probe = stm32_crc_probe,
316 .remove = stm32_crc_remove,
317 .driver = {
318 .name = DRIVER_NAME,
319 .of_match_table = stm32_dt_ids,
320 },
321};
322
323module_platform_driver(stm32_crc_driver);
324
325MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
326MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver");
327MODULE_LICENSE("GPL");
328