1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/skbuff.h>
21
22#include <crypto/aes.h>
23#include <crypto/hash.h>
24
25#include "t4_msg.h"
26#include "chcr_core.h"
27#include "cxgb4_uld.h"
28
29static LIST_HEAD(uld_ctx_list);
30static DEFINE_MUTEX(dev_mutex);
31static atomic_t dev_count;
32static struct uld_ctx *ctx_rr;
33
34typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
35static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
36static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
37static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
38
39static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
40 [CPL_FW6_PLD] = cpl_fw6_pld_handler,
41};
42
43static struct cxgb4_uld_info chcr_uld_info = {
44 .name = DRV_MODULE_NAME,
45 .nrxq = MAX_ULD_QSETS,
46 .ntxq = MAX_ULD_QSETS,
47 .rxq_size = 1024,
48 .add = chcr_uld_add,
49 .state_change = chcr_uld_state_change,
50 .rx_handler = chcr_uld_rx_handler,
51#ifdef CONFIG_CHELSIO_IPSEC_INLINE
52 .tx_handler = chcr_uld_tx_handler,
53#endif
54};
55
56struct uld_ctx *assign_chcr_device(void)
57{
58 struct uld_ctx *u_ctx = NULL;
59
60
61
62
63
64
65
66 mutex_lock(&dev_mutex);
67 if (!list_empty(&uld_ctx_list)) {
68 u_ctx = ctx_rr;
69 if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
70 ctx_rr = list_first_entry(&uld_ctx_list,
71 struct uld_ctx,
72 entry);
73 else
74 ctx_rr = list_next_entry(ctx_rr, entry);
75 }
76 mutex_unlock(&dev_mutex);
77 return u_ctx;
78}
79
80static int chcr_dev_add(struct uld_ctx *u_ctx)
81{
82 struct chcr_dev *dev;
83
84 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
85 if (!dev)
86 return -ENXIO;
87
88 spin_lock_init(&dev->lock_chcr_dev);
89 u_ctx->dev = dev;
90 dev->u_ctx = u_ctx;
91 atomic_inc(&dev_count);
92 mutex_lock(&dev_mutex);
93 list_add_tail(&u_ctx->entry, &uld_ctx_list);
94 if (!ctx_rr)
95 ctx_rr = u_ctx;
96 mutex_unlock(&dev_mutex);
97 return 0;
98}
99
100static int chcr_dev_remove(struct uld_ctx *u_ctx)
101{
102 if (ctx_rr == u_ctx) {
103 if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
104 ctx_rr = list_first_entry(&uld_ctx_list,
105 struct uld_ctx,
106 entry);
107 else
108 ctx_rr = list_next_entry(ctx_rr, entry);
109 }
110 list_del(&u_ctx->entry);
111 if (list_empty(&uld_ctx_list))
112 ctx_rr = NULL;
113 kfree(u_ctx->dev);
114 u_ctx->dev = NULL;
115 atomic_dec(&dev_count);
116 return 0;
117}
118
119static int cpl_fw6_pld_handler(struct chcr_dev *dev,
120 unsigned char *input)
121{
122 struct crypto_async_request *req;
123 struct cpl_fw6_pld *fw6_pld;
124 u32 ack_err_status = 0;
125 int error_status = 0;
126 struct adapter *adap = padap(dev);
127
128 fw6_pld = (struct cpl_fw6_pld *)input;
129 req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
130 fw6_pld->data[1]);
131
132 ack_err_status =
133 ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
134 if (ack_err_status) {
135 if (CHK_MAC_ERR_BIT(ack_err_status) ||
136 CHK_PAD_ERR_BIT(ack_err_status))
137 error_status = -EBADMSG;
138 atomic_inc(&adap->chcr_stats.error);
139 }
140
141 if (req) {
142 error_status = chcr_handle_resp(req, input, error_status);
143 } else {
144 pr_err("Incorrect request address from the firmware\n");
145 return -EFAULT;
146 }
147 return 0;
148}
149
150int chcr_send_wr(struct sk_buff *skb)
151{
152 return cxgb4_crypto_send(skb->dev, skb);
153}
154
155static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
156{
157 struct uld_ctx *u_ctx;
158
159
160 if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
161 return ERR_PTR(-EOPNOTSUPP);
162
163
164 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
165 if (!u_ctx) {
166 u_ctx = ERR_PTR(-ENOMEM);
167 goto out;
168 }
169 u_ctx->lldi = *lld;
170#ifdef CONFIG_CHELSIO_IPSEC_INLINE
171 if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
172 chcr_add_xfrmops(lld);
173#endif
174out:
175 return u_ctx;
176}
177
178int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
179 const struct pkt_gl *pgl)
180{
181 struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
182 struct chcr_dev *dev = u_ctx->dev;
183 const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
184
185 if (rpl->opcode != CPL_FW6_PLD) {
186 pr_err("Unsupported opcode\n");
187 return 0;
188 }
189
190 if (!pgl)
191 work_handlers[rpl->opcode](dev, (unsigned char *)&rsp[1]);
192 else
193 work_handlers[rpl->opcode](dev, pgl->va);
194 return 0;
195}
196
197#ifdef CONFIG_CHELSIO_IPSEC_INLINE
198int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
199{
200 return chcr_ipsec_xmit(skb, dev);
201}
202#endif
203
204static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
205{
206 struct uld_ctx *u_ctx = handle;
207 int ret = 0;
208
209 switch (state) {
210 case CXGB4_STATE_UP:
211 if (!u_ctx->dev) {
212 ret = chcr_dev_add(u_ctx);
213 if (ret != 0)
214 return ret;
215 }
216 if (atomic_read(&dev_count) == 1)
217 ret = start_crypto();
218 break;
219
220 case CXGB4_STATE_DETACH:
221 if (u_ctx->dev) {
222 mutex_lock(&dev_mutex);
223 chcr_dev_remove(u_ctx);
224 mutex_unlock(&dev_mutex);
225 }
226 if (!atomic_read(&dev_count))
227 stop_crypto();
228 break;
229
230 case CXGB4_STATE_START_RECOVERY:
231 case CXGB4_STATE_DOWN:
232 default:
233 break;
234 }
235 return ret;
236}
237
238static int __init chcr_crypto_init(void)
239{
240 if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
241 pr_err("ULD register fail: No chcr crypto support in cxgb4\n");
242
243 return 0;
244}
245
246static void __exit chcr_crypto_exit(void)
247{
248 struct uld_ctx *u_ctx, *tmp;
249
250 if (atomic_read(&dev_count))
251 stop_crypto();
252
253
254 mutex_lock(&dev_mutex);
255 list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
256 if (u_ctx->dev)
257 chcr_dev_remove(u_ctx);
258 kfree(u_ctx);
259 }
260 mutex_unlock(&dev_mutex);
261 cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
262}
263
264module_init(chcr_crypto_init);
265module_exit(chcr_crypto_exit);
266
267MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
268MODULE_LICENSE("GPL");
269MODULE_AUTHOR("Chelsio Communications");
270MODULE_VERSION(DRV_VERSION);
271