1
2
3
4
5
6
7#include <linux/bitrev.h>
8#include <linux/clk.h>
9#include <linux/module.h>
10#include <linux/platform_device.h>
11
12#include <crypto/internal/hash.h>
13
14#include <asm/unaligned.h>
15
16#define DRIVER_NAME "stm32-crc32"
17#define CHKSUM_DIGEST_SIZE 4
18#define CHKSUM_BLOCK_SIZE 1
19
20
21#define CRC_DR 0x00000000
22#define CRC_CR 0x00000008
23#define CRC_INIT 0x00000010
24#define CRC_POL 0x00000014
25
26
27#define CRC_CR_RESET BIT(0)
28#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
29#define CRC_INIT_DEFAULT 0xFFFFFFFF
30
31
32#define POLY_CRC32 0xEDB88320
33#define POLY_CRC32C 0x82F63B78
34
35struct stm32_crc {
36 struct list_head list;
37 struct device *dev;
38 void __iomem *regs;
39 struct clk *clk;
40 u8 pending_data[sizeof(u32)];
41 size_t nb_pending_bytes;
42};
43
44struct stm32_crc_list {
45 struct list_head dev_list;
46 spinlock_t lock;
47};
48
49static struct stm32_crc_list crc_list = {
50 .dev_list = LIST_HEAD_INIT(crc_list.dev_list),
51 .lock = __SPIN_LOCK_UNLOCKED(crc_list.lock),
52};
53
54struct stm32_crc_ctx {
55 u32 key;
56 u32 poly;
57};
58
59struct stm32_crc_desc_ctx {
60 u32 partial;
61 struct stm32_crc *crc;
62};
63
64static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
65{
66 struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
67
68 mctx->key = CRC_INIT_DEFAULT;
69 mctx->poly = POLY_CRC32;
70 return 0;
71}
72
73static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
74{
75 struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
76
77 mctx->key = CRC_INIT_DEFAULT;
78 mctx->poly = POLY_CRC32C;
79 return 0;
80}
81
82static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
83 unsigned int keylen)
84{
85 struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
86
87 if (keylen != sizeof(u32)) {
88 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
89 return -EINVAL;
90 }
91
92 mctx->key = get_unaligned_le32(key);
93 return 0;
94}
95
96static int stm32_crc_init(struct shash_desc *desc)
97{
98 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
99 struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
100 struct stm32_crc *crc;
101
102 spin_lock_bh(&crc_list.lock);
103 list_for_each_entry(crc, &crc_list.dev_list, list) {
104 ctx->crc = crc;
105 break;
106 }
107 spin_unlock_bh(&crc_list.lock);
108
109
110 writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
111 writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
112 writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
113
114
115 ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
116 ctx->crc->nb_pending_bytes = 0;
117
118 return 0;
119}
120
121static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
122 unsigned int length)
123{
124 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
125 struct stm32_crc *crc = ctx->crc;
126 u32 *d32;
127 unsigned int i;
128
129 if (unlikely(crc->nb_pending_bytes)) {
130 while (crc->nb_pending_bytes != sizeof(u32) && length) {
131
132 crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
133 length--;
134 }
135
136 if (crc->nb_pending_bytes == sizeof(u32)) {
137
138 writel_relaxed(*(u32 *)crc->pending_data,
139 crc->regs + CRC_DR);
140 crc->nb_pending_bytes = 0;
141 }
142 }
143
144 d32 = (u32 *)d8;
145 for (i = 0; i < length >> 2; i++)
146
147 writel_relaxed(*(d32++), crc->regs + CRC_DR);
148
149
150 ctx->partial = readl_relaxed(crc->regs + CRC_DR);
151
152
153 length &= 3;
154 if (likely(!length))
155 return 0;
156
157 if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
158
159 dev_err(crc->dev, "Pending data overflow\n");
160 return -EINVAL;
161 }
162
163 d8 = (const u8 *)d32;
164 for (i = 0; i < length; i++)
165
166 crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
167
168 return 0;
169}
170
171static int stm32_crc_final(struct shash_desc *desc, u8 *out)
172{
173 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
174 struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
175
176
177 put_unaligned_le32(mctx->poly == POLY_CRC32C ?
178 ~ctx->partial : ctx->partial, out);
179
180 return 0;
181}
182
183static int stm32_crc_finup(struct shash_desc *desc, const u8 *data,
184 unsigned int length, u8 *out)
185{
186 return stm32_crc_update(desc, data, length) ?:
187 stm32_crc_final(desc, out);
188}
189
190static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
191 unsigned int length, u8 *out)
192{
193 return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
194}
195
196static struct shash_alg algs[] = {
197
198 {
199 .setkey = stm32_crc_setkey,
200 .init = stm32_crc_init,
201 .update = stm32_crc_update,
202 .final = stm32_crc_final,
203 .finup = stm32_crc_finup,
204 .digest = stm32_crc_digest,
205 .descsize = sizeof(struct stm32_crc_desc_ctx),
206 .digestsize = CHKSUM_DIGEST_SIZE,
207 .base = {
208 .cra_name = "crc32",
209 .cra_driver_name = DRIVER_NAME,
210 .cra_priority = 200,
211 .cra_blocksize = CHKSUM_BLOCK_SIZE,
212 .cra_alignmask = 3,
213 .cra_ctxsize = sizeof(struct stm32_crc_ctx),
214 .cra_module = THIS_MODULE,
215 .cra_init = stm32_crc32_cra_init,
216 }
217 },
218
219 {
220 .setkey = stm32_crc_setkey,
221 .init = stm32_crc_init,
222 .update = stm32_crc_update,
223 .final = stm32_crc_final,
224 .finup = stm32_crc_finup,
225 .digest = stm32_crc_digest,
226 .descsize = sizeof(struct stm32_crc_desc_ctx),
227 .digestsize = CHKSUM_DIGEST_SIZE,
228 .base = {
229 .cra_name = "crc32c",
230 .cra_driver_name = DRIVER_NAME,
231 .cra_priority = 200,
232 .cra_blocksize = CHKSUM_BLOCK_SIZE,
233 .cra_alignmask = 3,
234 .cra_ctxsize = sizeof(struct stm32_crc_ctx),
235 .cra_module = THIS_MODULE,
236 .cra_init = stm32_crc32c_cra_init,
237 }
238 }
239};
240
241static int stm32_crc_probe(struct platform_device *pdev)
242{
243 struct device *dev = &pdev->dev;
244 struct stm32_crc *crc;
245 struct resource *res;
246 int ret;
247
248 crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
249 if (!crc)
250 return -ENOMEM;
251
252 crc->dev = dev;
253
254 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
255 crc->regs = devm_ioremap_resource(dev, res);
256 if (IS_ERR(crc->regs)) {
257 dev_err(dev, "Cannot map CRC IO\n");
258 return PTR_ERR(crc->regs);
259 }
260
261 crc->clk = devm_clk_get(dev, NULL);
262 if (IS_ERR(crc->clk)) {
263 dev_err(dev, "Could not get clock\n");
264 return PTR_ERR(crc->clk);
265 }
266
267 ret = clk_prepare_enable(crc->clk);
268 if (ret) {
269 dev_err(crc->dev, "Failed to enable clock\n");
270 return ret;
271 }
272
273 platform_set_drvdata(pdev, crc);
274
275 spin_lock(&crc_list.lock);
276 list_add(&crc->list, &crc_list.dev_list);
277 spin_unlock(&crc_list.lock);
278
279 ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
280 if (ret) {
281 dev_err(dev, "Failed to register\n");
282 clk_disable_unprepare(crc->clk);
283 return ret;
284 }
285
286 dev_info(dev, "Initialized\n");
287
288 return 0;
289}
290
291static int stm32_crc_remove(struct platform_device *pdev)
292{
293 struct stm32_crc *crc = platform_get_drvdata(pdev);
294
295 spin_lock(&crc_list.lock);
296 list_del(&crc->list);
297 spin_unlock(&crc_list.lock);
298
299 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
300
301 clk_disable_unprepare(crc->clk);
302
303 return 0;
304}
305
306static const struct of_device_id stm32_dt_ids[] = {
307 { .compatible = "st,stm32f7-crc", },
308 {},
309};
310MODULE_DEVICE_TABLE(of, stm32_dt_ids);
311
312static struct platform_driver stm32_crc_driver = {
313 .probe = stm32_crc_probe,
314 .remove = stm32_crc_remove,
315 .driver = {
316 .name = DRIVER_NAME,
317 .of_match_table = stm32_dt_ids,
318 },
319};
320
321module_platform_driver(stm32_crc_driver);
322
323MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
324MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver");
325MODULE_LICENSE("GPL");
326