1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/device.h>
16#include <linux/err.h>
17#include <linux/idr.h>
18#include <linux/pagemap.h>
19#include <linux/export.h>
20#include <linux/leds.h>
21#include <linux/slab.h>
22#include <linux/suspend.h>
23
24#include <linux/mmc/host.h>
25#include <linux/mmc/card.h>
26
27#include "core.h"
28#include "host.h"
29
30#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
31
32static void mmc_host_classdev_release(struct device *dev)
33{
34 struct mmc_host *host = cls_dev_to_mmc_host(dev);
35 kfree(host);
36}
37
38static struct class mmc_host_class = {
39 .name = "mmc_host",
40 .dev_release = mmc_host_classdev_release,
41};
42
43int mmc_register_host_class(void)
44{
45 return class_register(&mmc_host_class);
46}
47
48void mmc_unregister_host_class(void)
49{
50 class_unregister(&mmc_host_class);
51}
52
53static DEFINE_IDR(mmc_host_idr);
54static DEFINE_SPINLOCK(mmc_host_lock);
55
56#ifdef CONFIG_MMC_CLKGATE
57static ssize_t clkgate_delay_show(struct device *dev,
58 struct device_attribute *attr, char *buf)
59{
60 struct mmc_host *host = cls_dev_to_mmc_host(dev);
61 return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
62}
63
64static ssize_t clkgate_delay_store(struct device *dev,
65 struct device_attribute *attr, const char *buf, size_t count)
66{
67 struct mmc_host *host = cls_dev_to_mmc_host(dev);
68 unsigned long flags, value;
69
70 if (kstrtoul(buf, 0, &value))
71 return -EINVAL;
72
73 spin_lock_irqsave(&host->clk_lock, flags);
74 host->clkgate_delay = value;
75 spin_unlock_irqrestore(&host->clk_lock, flags);
76 return count;
77}
78
79
80
81
82
83
84
85
86static void mmc_host_clk_gate_delayed(struct mmc_host *host)
87{
88 unsigned long tick_ns;
89 unsigned long freq = host->ios.clock;
90 unsigned long flags;
91
92 if (!freq) {
93 pr_debug("%s: frequency set to 0 in disable function, "
94 "this means the clock is already disabled.\n",
95 mmc_hostname(host));
96 return;
97 }
98
99
100
101
102
103 spin_lock_irqsave(&host->clk_lock, flags);
104
105
106
107
108
109
110 if (!host->clk_requests) {
111 spin_unlock_irqrestore(&host->clk_lock, flags);
112 tick_ns = DIV_ROUND_UP(1000000000, freq);
113 ndelay(host->clk_delay * tick_ns);
114 } else {
115
116 spin_unlock_irqrestore(&host->clk_lock, flags);
117 return;
118 }
119 mutex_lock(&host->clk_gate_mutex);
120 spin_lock_irqsave(&host->clk_lock, flags);
121 if (!host->clk_requests) {
122 spin_unlock_irqrestore(&host->clk_lock, flags);
123
124 mmc_gate_clock(host);
125 spin_lock_irqsave(&host->clk_lock, flags);
126 pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
127 }
128 spin_unlock_irqrestore(&host->clk_lock, flags);
129 mutex_unlock(&host->clk_gate_mutex);
130}
131
132
133
134
135static void mmc_host_clk_gate_work(struct work_struct *work)
136{
137 struct mmc_host *host = container_of(work, struct mmc_host,
138 clk_gate_work.work);
139
140 mmc_host_clk_gate_delayed(host);
141}
142
143
144
145
146
147
148
149
150
151void mmc_host_clk_hold(struct mmc_host *host)
152{
153 unsigned long flags;
154
155
156 cancel_delayed_work_sync(&host->clk_gate_work);
157 mutex_lock(&host->clk_gate_mutex);
158 spin_lock_irqsave(&host->clk_lock, flags);
159 if (host->clk_gated) {
160 spin_unlock_irqrestore(&host->clk_lock, flags);
161 mmc_ungate_clock(host);
162 spin_lock_irqsave(&host->clk_lock, flags);
163 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
164 }
165 host->clk_requests++;
166 spin_unlock_irqrestore(&host->clk_lock, flags);
167 mutex_unlock(&host->clk_gate_mutex);
168}
169
170
171
172
173
174static bool mmc_host_may_gate_card(struct mmc_card *card)
175{
176
177 if (!card)
178 return true;
179
180
181
182
183
184
185
186
187 return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
188}
189
190
191
192
193
194
195
196
197
198void mmc_host_clk_release(struct mmc_host *host)
199{
200 unsigned long flags;
201
202 spin_lock_irqsave(&host->clk_lock, flags);
203 host->clk_requests--;
204 if (mmc_host_may_gate_card(host->card) &&
205 !host->clk_requests)
206 queue_delayed_work(system_nrt_wq, &host->clk_gate_work,
207 msecs_to_jiffies(host->clkgate_delay));
208 spin_unlock_irqrestore(&host->clk_lock, flags);
209}
210
211
212
213
214
215
216
217unsigned int mmc_host_clk_rate(struct mmc_host *host)
218{
219 unsigned long freq;
220 unsigned long flags;
221
222 spin_lock_irqsave(&host->clk_lock, flags);
223 if (host->clk_gated)
224 freq = host->clk_old;
225 else
226 freq = host->ios.clock;
227 spin_unlock_irqrestore(&host->clk_lock, flags);
228 return freq;
229}
230
231
232
233
234
235static inline void mmc_host_clk_init(struct mmc_host *host)
236{
237 host->clk_requests = 0;
238
239 host->clk_delay = 8;
240
241
242
243
244 host->clkgate_delay = 0;
245 host->clk_gated = false;
246 INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
247 spin_lock_init(&host->clk_lock);
248 mutex_init(&host->clk_gate_mutex);
249}
250
251
252
253
254
255static inline void mmc_host_clk_exit(struct mmc_host *host)
256{
257
258
259
260
261 if (cancel_delayed_work_sync(&host->clk_gate_work))
262 mmc_host_clk_gate_delayed(host);
263 if (host->clk_gated)
264 mmc_host_clk_hold(host);
265
266 WARN_ON(host->clk_requests > 1);
267}
268
269static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
270{
271 host->clkgate_delay_attr.show = clkgate_delay_show;
272 host->clkgate_delay_attr.store = clkgate_delay_store;
273 sysfs_attr_init(&host->clkgate_delay_attr.attr);
274 host->clkgate_delay_attr.attr.name = "clkgate_delay";
275 host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
276 if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
277 pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
278 mmc_hostname(host));
279}
280#else
281
282static inline void mmc_host_clk_init(struct mmc_host *host)
283{
284}
285
286static inline void mmc_host_clk_exit(struct mmc_host *host)
287{
288}
289
290static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
291{
292}
293
294#endif
295
296
297
298
299
300
301
302
303struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
304{
305 int err;
306 struct mmc_host *host;
307
308 if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL))
309 return NULL;
310
311 host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
312 if (!host)
313 return NULL;
314
315 spin_lock(&mmc_host_lock);
316 err = idr_get_new(&mmc_host_idr, host, &host->index);
317 spin_unlock(&mmc_host_lock);
318 if (err)
319 goto free;
320
321 dev_set_name(&host->class_dev, "mmc%d", host->index);
322
323 host->parent = dev;
324 host->class_dev.parent = dev;
325 host->class_dev.class = &mmc_host_class;
326 device_initialize(&host->class_dev);
327
328 mmc_host_clk_init(host);
329
330 spin_lock_init(&host->lock);
331 init_waitqueue_head(&host->wq);
332 INIT_DELAYED_WORK(&host->detect, mmc_rescan);
333 INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
334#ifdef CONFIG_PM
335 host->pm_notify.notifier_call = mmc_pm_notify;
336#endif
337
338
339
340
341
342 host->max_segs = 1;
343 host->max_seg_size = PAGE_CACHE_SIZE;
344
345 host->max_req_size = PAGE_CACHE_SIZE;
346 host->max_blk_size = 512;
347 host->max_blk_count = PAGE_CACHE_SIZE / 512;
348
349 return host;
350
351free:
352 kfree(host);
353 return NULL;
354}
355
356EXPORT_SYMBOL(mmc_alloc_host);
357
358
359
360
361
362
363
364
365
366int mmc_add_host(struct mmc_host *host)
367{
368 int err;
369
370 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
371 !host->ops->enable_sdio_irq);
372
373 err = device_add(&host->class_dev);
374 if (err)
375 return err;
376
377 led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
378
379#ifdef CONFIG_DEBUG_FS
380 mmc_add_host_debugfs(host);
381#endif
382 mmc_host_clk_sysfs_init(host);
383
384 mmc_start_host(host);
385 register_pm_notifier(&host->pm_notify);
386
387 return 0;
388}
389
390EXPORT_SYMBOL(mmc_add_host);
391
392
393
394
395
396
397
398
399
400void mmc_remove_host(struct mmc_host *host)
401{
402 unregister_pm_notifier(&host->pm_notify);
403 mmc_stop_host(host);
404
405#ifdef CONFIG_DEBUG_FS
406 mmc_remove_host_debugfs(host);
407#endif
408
409 device_del(&host->class_dev);
410
411 led_trigger_unregister_simple(host->led);
412
413 mmc_host_clk_exit(host);
414}
415
416EXPORT_SYMBOL(mmc_remove_host);
417
418
419
420
421
422
423
424void mmc_free_host(struct mmc_host *host)
425{
426 spin_lock(&mmc_host_lock);
427 idr_remove(&mmc_host_idr, host->index);
428 spin_unlock(&mmc_host_lock);
429
430 put_device(&host->class_dev);
431}
432
433EXPORT_SYMBOL(mmc_free_host);
434