1
2
3
4
5
6
7
8
9
10#include <linux/mmc/card.h>
11#include <linux/mmc/host.h>
12#include <linux/module.h>
13
14#include "mmc_hsq.h"
15
16#define HSQ_NUM_SLOTS 64
17#define HSQ_INVALID_TAG HSQ_NUM_SLOTS
18
19static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
20{
21 struct mmc_host *mmc = hsq->mmc;
22 struct hsq_slot *slot;
23 unsigned long flags;
24
25 spin_lock_irqsave(&hsq->lock, flags);
26
27
28 if (hsq->mrq) {
29 spin_unlock_irqrestore(&hsq->lock, flags);
30 return;
31 }
32
33
34 if (!hsq->qcnt || !hsq->enabled) {
35 spin_unlock_irqrestore(&hsq->lock, flags);
36 return;
37 }
38
39 slot = &hsq->slot[hsq->next_tag];
40 hsq->mrq = slot->mrq;
41 hsq->qcnt--;
42
43 spin_unlock_irqrestore(&hsq->lock, flags);
44
45 mmc->ops->request(mmc, hsq->mrq);
46}
47
48static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
49{
50 struct hsq_slot *slot;
51 int tag;
52
53
54
55
56
57 if (!remains) {
58 hsq->next_tag = HSQ_INVALID_TAG;
59 return;
60 }
61
62
63
64
65
66 if (++hsq->next_tag != HSQ_INVALID_TAG) {
67 slot = &hsq->slot[hsq->next_tag];
68 if (slot->mrq)
69 return;
70 }
71
72
73 for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
74 slot = &hsq->slot[tag];
75 if (slot->mrq)
76 break;
77 }
78
79 if (tag == HSQ_NUM_SLOTS)
80 tag = HSQ_INVALID_TAG;
81
82 hsq->next_tag = tag;
83}
84
85static void mmc_hsq_post_request(struct mmc_hsq *hsq)
86{
87 unsigned long flags;
88 int remains;
89
90 spin_lock_irqsave(&hsq->lock, flags);
91
92 remains = hsq->qcnt;
93 hsq->mrq = NULL;
94
95
96 mmc_hsq_update_next_tag(hsq, remains);
97
98 if (hsq->waiting_for_idle && !remains) {
99 hsq->waiting_for_idle = false;
100 wake_up(&hsq->wait_queue);
101 }
102
103
104 if (hsq->recovery_halt) {
105 spin_unlock_irqrestore(&hsq->lock, flags);
106 return;
107 }
108
109 spin_unlock_irqrestore(&hsq->lock, flags);
110
111
112
113
114
115 if (remains > 0)
116 mmc_hsq_pump_requests(hsq);
117}
118
119
120
121
122
123
124
125
126
127bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
128{
129 struct mmc_hsq *hsq = mmc->cqe_private;
130 unsigned long flags;
131
132 spin_lock_irqsave(&hsq->lock, flags);
133
134 if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
135 spin_unlock_irqrestore(&hsq->lock, flags);
136 return false;
137 }
138
139
140
141
142 hsq->slot[hsq->next_tag].mrq = NULL;
143
144 spin_unlock_irqrestore(&hsq->lock, flags);
145
146 mmc_cqe_request_done(mmc, hsq->mrq);
147
148 mmc_hsq_post_request(hsq);
149
150 return true;
151}
152EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
153
154static void mmc_hsq_recovery_start(struct mmc_host *mmc)
155{
156 struct mmc_hsq *hsq = mmc->cqe_private;
157 unsigned long flags;
158
159 spin_lock_irqsave(&hsq->lock, flags);
160
161 hsq->recovery_halt = true;
162
163 spin_unlock_irqrestore(&hsq->lock, flags);
164}
165
166static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
167{
168 struct mmc_hsq *hsq = mmc->cqe_private;
169 int remains;
170
171 spin_lock_irq(&hsq->lock);
172
173 hsq->recovery_halt = false;
174 remains = hsq->qcnt;
175
176 spin_unlock_irq(&hsq->lock);
177
178
179
180
181
182 if (remains > 0)
183 mmc_hsq_pump_requests(hsq);
184}
185
186static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
187{
188 struct mmc_hsq *hsq = mmc->cqe_private;
189 int tag = mrq->tag;
190
191 spin_lock_irq(&hsq->lock);
192
193 if (!hsq->enabled) {
194 spin_unlock_irq(&hsq->lock);
195 return -ESHUTDOWN;
196 }
197
198
199 if (hsq->recovery_halt) {
200 spin_unlock_irq(&hsq->lock);
201 return -EBUSY;
202 }
203
204 hsq->slot[tag].mrq = mrq;
205
206
207
208
209
210 if (hsq->next_tag == HSQ_INVALID_TAG)
211 hsq->next_tag = tag;
212
213 hsq->qcnt++;
214
215 spin_unlock_irq(&hsq->lock);
216
217 mmc_hsq_pump_requests(hsq);
218
219 return 0;
220}
221
222static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
223{
224 if (mmc->ops->post_req)
225 mmc->ops->post_req(mmc, mrq, 0);
226}
227
228static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
229{
230 bool is_idle;
231
232 spin_lock_irq(&hsq->lock);
233
234 is_idle = (!hsq->mrq && !hsq->qcnt) ||
235 hsq->recovery_halt;
236
237 *ret = hsq->recovery_halt ? -EBUSY : 0;
238 hsq->waiting_for_idle = !is_idle;
239
240 spin_unlock_irq(&hsq->lock);
241
242 return is_idle;
243}
244
245static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
246{
247 struct mmc_hsq *hsq = mmc->cqe_private;
248 int ret;
249
250 wait_event(hsq->wait_queue,
251 mmc_hsq_queue_is_idle(hsq, &ret));
252
253 return ret;
254}
255
256static void mmc_hsq_disable(struct mmc_host *mmc)
257{
258 struct mmc_hsq *hsq = mmc->cqe_private;
259 u32 timeout = 500;
260 int ret;
261
262 spin_lock_irq(&hsq->lock);
263
264 if (!hsq->enabled) {
265 spin_unlock_irq(&hsq->lock);
266 return;
267 }
268
269 spin_unlock_irq(&hsq->lock);
270
271 ret = wait_event_timeout(hsq->wait_queue,
272 mmc_hsq_queue_is_idle(hsq, &ret),
273 msecs_to_jiffies(timeout));
274 if (ret == 0) {
275 pr_warn("could not stop mmc software queue\n");
276 return;
277 }
278
279 spin_lock_irq(&hsq->lock);
280
281 hsq->enabled = false;
282
283 spin_unlock_irq(&hsq->lock);
284}
285
286static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
287{
288 struct mmc_hsq *hsq = mmc->cqe_private;
289
290 spin_lock_irq(&hsq->lock);
291
292 if (hsq->enabled) {
293 spin_unlock_irq(&hsq->lock);
294 return -EBUSY;
295 }
296
297 hsq->enabled = true;
298
299 spin_unlock_irq(&hsq->lock);
300
301 return 0;
302}
303
304static const struct mmc_cqe_ops mmc_hsq_ops = {
305 .cqe_enable = mmc_hsq_enable,
306 .cqe_disable = mmc_hsq_disable,
307 .cqe_request = mmc_hsq_request,
308 .cqe_post_req = mmc_hsq_post_req,
309 .cqe_wait_for_idle = mmc_hsq_wait_for_idle,
310 .cqe_recovery_start = mmc_hsq_recovery_start,
311 .cqe_recovery_finish = mmc_hsq_recovery_finish,
312};
313
314int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
315{
316 hsq->num_slots = HSQ_NUM_SLOTS;
317 hsq->next_tag = HSQ_INVALID_TAG;
318
319 hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
320 sizeof(struct hsq_slot), GFP_KERNEL);
321 if (!hsq->slot)
322 return -ENOMEM;
323
324 hsq->mmc = mmc;
325 hsq->mmc->cqe_private = hsq;
326 mmc->cqe_ops = &mmc_hsq_ops;
327
328 spin_lock_init(&hsq->lock);
329 init_waitqueue_head(&hsq->wait_queue);
330
331 return 0;
332}
333EXPORT_SYMBOL_GPL(mmc_hsq_init);
334
335void mmc_hsq_suspend(struct mmc_host *mmc)
336{
337 mmc_hsq_disable(mmc);
338}
339EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
340
341int mmc_hsq_resume(struct mmc_host *mmc)
342{
343 return mmc_hsq_enable(mmc, NULL);
344}
345EXPORT_SYMBOL_GPL(mmc_hsq_resume);
346
347MODULE_DESCRIPTION("MMC Host Software Queue support");
348MODULE_LICENSE("GPL v2");
349