1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * linux/include/linux/mmc/host.h
4 *
5 * Host driver specific definitions.
6 */
7 #ifndef LINUX_MMC_HOST_H
8 #define LINUX_MMC_HOST_H
9
10 #include <linux/sched.h>
11 #include <linux/device.h>
12 #include <linux/fault-inject.h>
13 #include <linux/debugfs.h>
14
15 #include <linux/mmc/core.h>
16 #include <linux/mmc/card.h>
17 #include <linux/mmc/pm.h>
18 #include <linux/dma-direction.h>
19 #include <linux/blk-crypto-profile.h>
20 #include <linux/android_kabi.h>
21
22 struct mmc_ios {
23 unsigned int clock; /* clock rate */
24 unsigned short vdd;
25 unsigned int power_delay_ms; /* waiting for stable power */
26
27 /* vdd stores the bit number of the selected voltage range from below. */
28
29 unsigned char bus_mode; /* command output mode */
30
31 #define MMC_BUSMODE_OPENDRAIN 1
32 #define MMC_BUSMODE_PUSHPULL 2
33
34 unsigned char chip_select; /* SPI chip select */
35
36 #define MMC_CS_DONTCARE 0
37 #define MMC_CS_HIGH 1
38 #define MMC_CS_LOW 2
39
40 unsigned char power_mode; /* power supply mode */
41
42 #define MMC_POWER_OFF 0
43 #define MMC_POWER_UP 1
44 #define MMC_POWER_ON 2
45 #define MMC_POWER_UNDEFINED 3
46
47 unsigned char bus_width; /* data bus width */
48
49 #define MMC_BUS_WIDTH_1 0
50 #define MMC_BUS_WIDTH_4 2
51 #define MMC_BUS_WIDTH_8 3
52
53 unsigned char timing; /* timing specification used */
54
55 #define MMC_TIMING_LEGACY 0
56 #define MMC_TIMING_MMC_HS 1
57 #define MMC_TIMING_SD_HS 2
58 #define MMC_TIMING_UHS_SDR12 3
59 #define MMC_TIMING_UHS_SDR25 4
60 #define MMC_TIMING_UHS_SDR50 5
61 #define MMC_TIMING_UHS_SDR104 6
62 #define MMC_TIMING_UHS_DDR50 7
63 #define MMC_TIMING_MMC_DDR52 8
64 #define MMC_TIMING_MMC_HS200 9
65 #define MMC_TIMING_MMC_HS400 10
66 #define MMC_TIMING_SD_EXP 11
67 #define MMC_TIMING_SD_EXP_1_2V 12
68
69 unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */
70
71 #define MMC_SIGNAL_VOLTAGE_330 0
72 #define MMC_SIGNAL_VOLTAGE_180 1
73 #define MMC_SIGNAL_VOLTAGE_120 2
74
75 unsigned char drv_type; /* driver type (A, B, C, D) */
76
77 #define MMC_SET_DRIVER_TYPE_B 0
78 #define MMC_SET_DRIVER_TYPE_A 1
79 #define MMC_SET_DRIVER_TYPE_C 2
80 #define MMC_SET_DRIVER_TYPE_D 3
81
82 bool enhanced_strobe; /* hs400es selection */
83 };
84
85 struct mmc_clk_phase {
86 bool valid;
87 u16 in_deg;
88 u16 out_deg;
89 };
90
91 #define MMC_NUM_CLK_PHASES (MMC_TIMING_MMC_HS400 + 1)
92 struct mmc_clk_phase_map {
93 struct mmc_clk_phase phase[MMC_NUM_CLK_PHASES];
94 };
95
96 struct mmc_host;
97
98 enum mmc_err_stat {
99 MMC_ERR_CMD_TIMEOUT,
100 MMC_ERR_CMD_CRC,
101 MMC_ERR_DAT_TIMEOUT,
102 MMC_ERR_DAT_CRC,
103 MMC_ERR_AUTO_CMD,
104 MMC_ERR_ADMA,
105 MMC_ERR_TUNING,
106 MMC_ERR_CMDQ_RED,
107 MMC_ERR_CMDQ_GCE,
108 MMC_ERR_CMDQ_ICCE,
109 MMC_ERR_REQ_TIMEOUT,
110 MMC_ERR_CMDQ_REQ_TIMEOUT,
111 MMC_ERR_ICE_CFG,
112 MMC_ERR_CTRL_TIMEOUT,
113 MMC_ERR_UNEXPECTED_IRQ,
114 MMC_ERR_MAX,
115 };
116
117 struct mmc_host_ops {
118 /*
119 * It is optional for the host to implement pre_req and post_req in
120 * order to support double buffering of requests (prepare one
121 * request while another request is active).
122 * pre_req() must always be followed by a post_req().
123 * To undo a call made to pre_req(), call post_req() with
124 * a nonzero err condition.
125 */
126 void (*post_req)(struct mmc_host *host, struct mmc_request *req,
127 int err);
128 void (*pre_req)(struct mmc_host *host, struct mmc_request *req);
129 void (*request)(struct mmc_host *host, struct mmc_request *req);
130 /* Submit one request to host in atomic context. */
131 int (*request_atomic)(struct mmc_host *host,
132 struct mmc_request *req);
133
134 /*
135 * Avoid calling the next three functions too often or in a "fast
136 * path", since underlaying controller might implement them in an
137 * expensive and/or slow way. Also note that these functions might
138 * sleep, so don't call them in the atomic contexts!
139 */
140
141 /*
142 * Notes to the set_ios callback:
143 * ios->clock might be 0. For some controllers, setting 0Hz
144 * as any other frequency works. However, some controllers
145 * explicitly need to disable the clock. Otherwise e.g. voltage
146 * switching might fail because the SDCLK is not really quiet.
147 */
148 void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
149
150 /*
151 * Return values for the get_ro callback should be:
152 * 0 for a read/write card
153 * 1 for a read-only card
154 * -ENOSYS when not supported (equal to NULL callback)
155 * or a negative errno value when something bad happened
156 */
157 int (*get_ro)(struct mmc_host *host);
158
159 /*
160 * Return values for the get_cd callback should be:
161 * 0 for a absent card
162 * 1 for a present card
163 * -ENOSYS when not supported (equal to NULL callback)
164 * or a negative errno value when something bad happened
165 */
166 int (*get_cd)(struct mmc_host *host);
167
168 void (*enable_sdio_irq)(struct mmc_host *host, int enable);
169 /* Mandatory callback when using MMC_CAP2_SDIO_IRQ_NOTHREAD. */
170 void (*ack_sdio_irq)(struct mmc_host *host);
171
172 /* optional callback for HC quirks */
173 void (*init_card)(struct mmc_host *host, struct mmc_card *card);
174
175 int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
176
177 /* Check if the card is pulling dat[0] low */
178 int (*card_busy)(struct mmc_host *host);
179
180 /* The tuning command opcode value is different for SD and eMMC cards */
181 int (*execute_tuning)(struct mmc_host *host, u32 opcode);
182
183 /* Prepare HS400 target operating frequency depending host driver */
184 int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
185
186 /* Execute HS400 tuning depending host driver */
187 int (*execute_hs400_tuning)(struct mmc_host *host, struct mmc_card *card);
188
189 /* Optional callback to prepare for SD high-speed tuning */
190 int (*prepare_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card);
191
192 /* Optional callback to execute SD high-speed tuning */
193 int (*execute_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card);
194
195 /* Prepare switch to DDR during the HS400 init sequence */
196 int (*hs400_prepare_ddr)(struct mmc_host *host);
197
198 /* Prepare for switching from HS400 to HS200 */
199 void (*hs400_downgrade)(struct mmc_host *host);
200
201 /* Complete selection of HS400 */
202 void (*hs400_complete)(struct mmc_host *host);
203
204 /* Prepare enhanced strobe depending host driver */
205 void (*hs400_enhanced_strobe)(struct mmc_host *host,
206 struct mmc_ios *ios);
207 int (*select_drive_strength)(struct mmc_card *card,
208 unsigned int max_dtr, int host_drv,
209 int card_drv, int *drv_type);
210 /* Reset the eMMC card via RST_n */
211 void (*card_hw_reset)(struct mmc_host *host);
212 void (*card_event)(struct mmc_host *host);
213
214 /*
215 * Optional callback to support controllers with HW issues for multiple
216 * I/O. Returns the number of supported blocks for the request.
217 */
218 int (*multi_io_quirk)(struct mmc_card *card,
219 unsigned int direction, int blk_size);
220
221 /* Initialize an SD express card, mandatory for MMC_CAP2_SD_EXP. */
222 int (*init_sd_express)(struct mmc_host *host, struct mmc_ios *ios);
223
224 ANDROID_KABI_RESERVE(1);
225 ANDROID_KABI_RESERVE(2);
226 };
227
228 struct mmc_cqe_ops {
229 /* Allocate resources, and make the CQE operational */
230 int (*cqe_enable)(struct mmc_host *host, struct mmc_card *card);
231 /* Free resources, and make the CQE non-operational */
232 void (*cqe_disable)(struct mmc_host *host);
233 /*
234 * Issue a read, write or DCMD request to the CQE. Also deal with the
235 * effect of ->cqe_off().
236 */
237 int (*cqe_request)(struct mmc_host *host, struct mmc_request *mrq);
238 /* Free resources (e.g. DMA mapping) associated with the request */
239 void (*cqe_post_req)(struct mmc_host *host, struct mmc_request *mrq);
240 /*
241 * Prepare the CQE and host controller to accept non-CQ commands. There
242 * is no corresponding ->cqe_on(), instead ->cqe_request() is required
243 * to deal with that.
244 */
245 void (*cqe_off)(struct mmc_host *host);
246 /*
247 * Wait for all CQE tasks to complete. Return an error if recovery
248 * becomes necessary.
249 */
250 int (*cqe_wait_for_idle)(struct mmc_host *host);
251 /*
252 * Notify CQE that a request has timed out. Return false if the request
253 * completed or true if a timeout happened in which case indicate if
254 * recovery is needed.
255 */
256 bool (*cqe_timeout)(struct mmc_host *host, struct mmc_request *mrq,
257 bool *recovery_needed);
258 /*
259 * Stop all CQE activity and prepare the CQE and host controller to
260 * accept recovery commands.
261 */
262 void (*cqe_recovery_start)(struct mmc_host *host);
263 /*
264 * Clear the queue and call mmc_cqe_request_done() on all requests.
265 * Requests that errored will have the error set on the mmc_request
266 * (data->error or cmd->error for DCMD). Requests that did not error
267 * will have zero data bytes transferred.
268 */
269 void (*cqe_recovery_finish)(struct mmc_host *host);
270
271 ANDROID_KABI_RESERVE(1);
272 ANDROID_KABI_RESERVE(2);
273 };
274
275 /**
276 * struct mmc_slot - MMC slot functions
277 *
278 * @cd_irq: MMC/SD-card slot hotplug detection IRQ or -EINVAL
279 * @handler_priv: MMC/SD-card slot context
280 *
281 * Some MMC/SD host controllers implement slot-functions like card and
282 * write-protect detection natively. However, a large number of controllers
283 * leave these functions to the CPU. This struct provides a hook to attach
284 * such slot-function drivers.
285 */
286 struct mmc_slot {
287 int cd_irq;
288 bool cd_wake_enabled;
289 void *handler_priv;
290 };
291
292 struct regulator;
293 struct mmc_pwrseq;
294
295 struct mmc_supply {
296 struct regulator *vmmc; /* Card power supply */
297 struct regulator *vqmmc; /* Optional Vccq supply */
298 };
299
300 struct mmc_ctx {
301 struct task_struct *task;
302 };
303
304 struct mmc_host {
305 struct device *parent;
306 struct device class_dev;
307 int index;
308 const struct mmc_host_ops *ops;
309 struct mmc_pwrseq *pwrseq;
310 unsigned int f_min;
311 unsigned int f_max;
312 unsigned int f_init;
313 u32 ocr_avail;
314 u32 ocr_avail_sdio; /* SDIO-specific OCR */
315 u32 ocr_avail_sd; /* SD-specific OCR */
316 u32 ocr_avail_mmc; /* MMC-specific OCR */
317 struct wakeup_source *ws; /* Enable consume of uevents */
318 u32 max_current_330;
319 u32 max_current_300;
320 u32 max_current_180;
321
322 #define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */
323 #define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */
324 #define MMC_VDD_21_22 0x00000200 /* VDD voltage 2.1 ~ 2.2 */
325 #define MMC_VDD_22_23 0x00000400 /* VDD voltage 2.2 ~ 2.3 */
326 #define MMC_VDD_23_24 0x00000800 /* VDD voltage 2.3 ~ 2.4 */
327 #define MMC_VDD_24_25 0x00001000 /* VDD voltage 2.4 ~ 2.5 */
328 #define MMC_VDD_25_26 0x00002000 /* VDD voltage 2.5 ~ 2.6 */
329 #define MMC_VDD_26_27 0x00004000 /* VDD voltage 2.6 ~ 2.7 */
330 #define MMC_VDD_27_28 0x00008000 /* VDD voltage 2.7 ~ 2.8 */
331 #define MMC_VDD_28_29 0x00010000 /* VDD voltage 2.8 ~ 2.9 */
332 #define MMC_VDD_29_30 0x00020000 /* VDD voltage 2.9 ~ 3.0 */
333 #define MMC_VDD_30_31 0x00040000 /* VDD voltage 3.0 ~ 3.1 */
334 #define MMC_VDD_31_32 0x00080000 /* VDD voltage 3.1 ~ 3.2 */
335 #define MMC_VDD_32_33 0x00100000 /* VDD voltage 3.2 ~ 3.3 */
336 #define MMC_VDD_33_34 0x00200000 /* VDD voltage 3.3 ~ 3.4 */
337 #define MMC_VDD_34_35 0x00400000 /* VDD voltage 3.4 ~ 3.5 */
338 #define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */
339
340 u32 caps; /* Host capabilities */
341
342 #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */
343 #define MMC_CAP_MMC_HIGHSPEED (1 << 1) /* Can do MMC high-speed timing */
344 #define MMC_CAP_SD_HIGHSPEED (1 << 2) /* Can do SD high-speed timing */
345 #define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */
346 #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */
347 #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */
348 #define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */
349 #define MMC_CAP_AGGRESSIVE_PM (1 << 7) /* Suspend (e)MMC/SD at idle */
350 #define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */
351 #define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */
352 #define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */
353 #define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */
354 #define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */
355 #define MMC_CAP_DDR (MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | \
356 MMC_CAP_1_2V_DDR)
357 #define MMC_CAP_POWER_OFF_CARD (1 << 14) /* Can power off after boot */
358 #define MMC_CAP_BUS_WIDTH_TEST (1 << 15) /* CMD14/CMD19 bus width ok */
359 #define MMC_CAP_UHS_SDR12 (1 << 16) /* Host supports UHS SDR12 mode */
360 #define MMC_CAP_UHS_SDR25 (1 << 17) /* Host supports UHS SDR25 mode */
361 #define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */
362 #define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */
363 #define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */
364 #define MMC_CAP_UHS (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | \
365 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \
366 MMC_CAP_UHS_DDR50)
367 #define MMC_CAP_SYNC_RUNTIME_PM (1 << 21) /* Synced runtime PM suspends. */
368 #define MMC_CAP_NEED_RSP_BUSY (1 << 22) /* Commands with R1B can't use R1. */
369 #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
370 #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
371 #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
372 #define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */
373 #define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */
374 #define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
375 #define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
376 #define MMC_CAP_HW_RESET (1 << 31) /* Reset the eMMC card via RST_n */
377
378 u32 caps2; /* More host capabilities */
379
380 #define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */
381 #define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */
382 #define MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND (1 << 3) /* Can do full power cycle in suspend */
383 #define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */
384 #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
385 #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
386 MMC_CAP2_HS200_1_2V_SDR)
387 #define MMC_CAP2_SD_EXP (1 << 7) /* SD express via PCIe */
388 #define MMC_CAP2_SD_EXP_1_2V (1 << 8) /* SD express 1.2V */
389 #define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */
390 #define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */
391 #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
392 #define MMC_CAP2_HS400_1_8V (1 << 15) /* Can support HS400 1.8V */
393 #define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */
394 #define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \
395 MMC_CAP2_HS400_1_2V)
396 #define MMC_CAP2_HSX00_1_8V (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)
397 #define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
398 #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
399 #define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */
400 #define MMC_CAP2_NO_SDIO (1 << 19) /* Do not send SDIO commands during initialization */
401 #define MMC_CAP2_HS400_ES (1 << 20) /* Host supports enhanced strobe */
402 #define MMC_CAP2_NO_SD (1 << 21) /* Do not send SD commands during initialization */
403 #define MMC_CAP2_NO_MMC (1 << 22) /* Do not send (e)MMC commands during initialization */
404 #define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */
405 #define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */
406 #define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */
407 #define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */
408 #ifdef CONFIG_MMC_CRYPTO
409 #define MMC_CAP2_CRYPTO (1 << 27) /* Host supports inline encryption */
410 #else
411 #define MMC_CAP2_CRYPTO 0
412 #endif
413 #define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */
414
415 int fixed_drv_type; /* fixed driver type for non-removable media */
416
417 mmc_pm_flag_t pm_caps; /* supported pm features */
418
419 /* host specific block data */
420 unsigned int max_seg_size; /* lim->max_segment_size */
421 unsigned short max_segs; /* lim->max_segments */
422 unsigned short unused;
423 unsigned int max_req_size; /* maximum number of bytes in one req */
424 unsigned int max_blk_size; /* maximum size of one mmc block */
425 unsigned int max_blk_count; /* maximum number of blocks in one req */
426 unsigned int max_busy_timeout; /* max busy timeout in ms */
427
428 /* private data */
429 spinlock_t lock; /* lock for claim and bus ops */
430
431 struct mmc_ios ios; /* current io bus settings */
432
433 /* group bitfields together to minimize padding */
434 unsigned int use_spi_crc:1;
435 unsigned int claimed:1; /* host exclusively claimed */
436 unsigned int doing_init_tune:1; /* initial tuning in progress */
437 unsigned int can_retune:1; /* re-tuning can be used */
438 unsigned int doing_retune:1; /* re-tuning in progress */
439 unsigned int retune_now:1; /* do re-tuning at next req */
440 unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
441 unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
442 unsigned int can_dma_map_merge:1; /* merging can be used */
443 unsigned int vqmmc_enabled:1; /* vqmmc regulator is enabled */
444
445 int rescan_disable; /* disable card detection */
446 int rescan_entered; /* used with nonremovable devices */
447
448 int need_retune; /* re-tuning is needed */
449 int hold_retune; /* hold off re-tuning */
450 unsigned int retune_period; /* re-tuning period in secs */
451 struct timer_list retune_timer; /* for periodic re-tuning */
452
453 bool trigger_card_event; /* card_event necessary */
454
455 struct mmc_card *card; /* device attached to this host */
456
457 wait_queue_head_t wq;
458 struct mmc_ctx *claimer; /* context that has host claimed */
459 int claim_cnt; /* "claim" nesting count */
460 struct mmc_ctx default_ctx; /* default context */
461
462 struct delayed_work detect;
463 int detect_change; /* card detect flag */
464 struct mmc_slot slot;
465
466 const struct mmc_bus_ops *bus_ops; /* current bus driver */
467
468 unsigned int sdio_irqs;
469 struct task_struct *sdio_irq_thread;
470 struct work_struct sdio_irq_work;
471 bool sdio_irq_pending;
472 atomic_t sdio_irq_thread_abort;
473
474 mmc_pm_flag_t pm_flags; /* requested pm features */
475
476 struct led_trigger *led; /* activity led */
477
478 #ifdef CONFIG_REGULATOR
479 bool regulator_enabled; /* regulator state */
480 #endif
481 struct mmc_supply supply;
482
483 struct dentry *debugfs_root;
484
485 /* Ongoing data transfer that allows commands during transfer */
486 struct mmc_request *ongoing_mrq;
487
488 #ifdef CONFIG_FAIL_MMC_REQUEST
489 struct fault_attr fail_mmc_request;
490 #endif
491
492 unsigned int actual_clock; /* Actual HC clock rate */
493
494 unsigned int slotno; /* used for sdio acpi binding */
495
496 int dsr_req; /* DSR value is valid */
497 u32 dsr; /* optional driver stage (DSR) value */
498
499 /* Command Queue Engine (CQE) support */
500 const struct mmc_cqe_ops *cqe_ops;
501 void *cqe_private;
502 int cqe_qdepth;
503 bool cqe_enabled;
504 bool cqe_on;
505 bool cqe_recovery_reset_always;
506
507 /* Inline encryption support */
508 #ifdef CONFIG_MMC_CRYPTO
509 struct blk_crypto_profile crypto_profile;
510 #endif
511
512 /* Host Software Queue support */
513 bool hsq_enabled;
514 int hsq_depth;
515
516 u32 err_stats[MMC_ERR_MAX];
517
518 ANDROID_KABI_RESERVE(1);
519 ANDROID_KABI_RESERVE(2);
520
521 unsigned long private[] ____cacheline_aligned;
522 };
523
524 struct device_node;
525
526 struct mmc_host *mmc_alloc_host(int extra, struct device *);
527 struct mmc_host *devm_mmc_alloc_host(struct device *dev, int extra);
528 int mmc_add_host(struct mmc_host *);
529 void mmc_remove_host(struct mmc_host *);
530 void mmc_free_host(struct mmc_host *);
531 void mmc_of_parse_clk_phase(struct device *dev,
532 struct mmc_clk_phase_map *map);
533 int mmc_of_parse(struct mmc_host *host);
534 int mmc_of_parse_voltage(struct mmc_host *host, u32 *mask);
535
mmc_priv(struct mmc_host * host)536 static inline void *mmc_priv(struct mmc_host *host)
537 {
538 return (void *)host->private;
539 }
540
mmc_from_priv(void * priv)541 static inline struct mmc_host *mmc_from_priv(void *priv)
542 {
543 return container_of(priv, struct mmc_host, private);
544 }
545
546 #define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI)
547
548 #define mmc_dev(x) ((x)->parent)
549 #define mmc_classdev(x) (&(x)->class_dev)
550 #define mmc_hostname(x) (dev_name(&(x)->class_dev))
551
552 void mmc_detect_change(struct mmc_host *, unsigned long delay);
553 void mmc_request_done(struct mmc_host *, struct mmc_request *);
554 void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
555
556 void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq);
557
558 /*
559 * May be called from host driver's system/runtime suspend/resume callbacks,
560 * to know if SDIO IRQs has been claimed.
561 */
sdio_irq_claimed(struct mmc_host * host)562 static inline bool sdio_irq_claimed(struct mmc_host *host)
563 {
564 return host->sdio_irqs > 0;
565 }
566
mmc_signal_sdio_irq(struct mmc_host * host)567 static inline void mmc_signal_sdio_irq(struct mmc_host *host)
568 {
569 host->ops->enable_sdio_irq(host, 0);
570 host->sdio_irq_pending = true;
571 if (host->sdio_irq_thread)
572 wake_up_process(host->sdio_irq_thread);
573 }
574
575 void sdio_signal_irq(struct mmc_host *host);
576
577 #ifdef CONFIG_REGULATOR
578 int mmc_regulator_set_ocr(struct mmc_host *mmc,
579 struct regulator *supply,
580 unsigned short vdd_bit);
581 int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios);
582 #else
mmc_regulator_set_ocr(struct mmc_host * mmc,struct regulator * supply,unsigned short vdd_bit)583 static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
584 struct regulator *supply,
585 unsigned short vdd_bit)
586 {
587 return 0;
588 }
589
mmc_regulator_set_vqmmc(struct mmc_host * mmc,struct mmc_ios * ios)590 static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
591 struct mmc_ios *ios)
592 {
593 return -EINVAL;
594 }
595 #endif
596
597 int mmc_regulator_get_supply(struct mmc_host *mmc);
598 int mmc_regulator_enable_vqmmc(struct mmc_host *mmc);
599 void mmc_regulator_disable_vqmmc(struct mmc_host *mmc);
600
mmc_card_is_removable(struct mmc_host * host)601 static inline int mmc_card_is_removable(struct mmc_host *host)
602 {
603 return !(host->caps & MMC_CAP_NONREMOVABLE);
604 }
605
mmc_card_keep_power(struct mmc_host * host)606 static inline int mmc_card_keep_power(struct mmc_host *host)
607 {
608 return host->pm_flags & MMC_PM_KEEP_POWER;
609 }
610
mmc_card_wake_sdio_irq(struct mmc_host * host)611 static inline int mmc_card_wake_sdio_irq(struct mmc_host *host)
612 {
613 return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ;
614 }
615
616 /* TODO: Move to private header */
mmc_card_hs(struct mmc_card * card)617 static inline int mmc_card_hs(struct mmc_card *card)
618 {
619 return card->host->ios.timing == MMC_TIMING_SD_HS ||
620 card->host->ios.timing == MMC_TIMING_MMC_HS;
621 }
622
623 /* TODO: Move to private header */
mmc_card_uhs(struct mmc_card * card)624 static inline int mmc_card_uhs(struct mmc_card *card)
625 {
626 return card->host->ios.timing >= MMC_TIMING_UHS_SDR12 &&
627 card->host->ios.timing <= MMC_TIMING_UHS_DDR50;
628 }
629
630 void mmc_retune_timer_stop(struct mmc_host *host);
631
mmc_retune_needed(struct mmc_host * host)632 static inline void mmc_retune_needed(struct mmc_host *host)
633 {
634 if (host->can_retune)
635 host->need_retune = 1;
636 }
637
mmc_can_retune(struct mmc_host * host)638 static inline bool mmc_can_retune(struct mmc_host *host)
639 {
640 return host->can_retune == 1;
641 }
642
mmc_doing_retune(struct mmc_host * host)643 static inline bool mmc_doing_retune(struct mmc_host *host)
644 {
645 return host->doing_retune == 1;
646 }
647
mmc_doing_tune(struct mmc_host * host)648 static inline bool mmc_doing_tune(struct mmc_host *host)
649 {
650 return host->doing_retune == 1 || host->doing_init_tune == 1;
651 }
652
mmc_get_dma_dir(struct mmc_data * data)653 static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
654 {
655 return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
656 }
657
mmc_debugfs_err_stats_inc(struct mmc_host * host,enum mmc_err_stat stat)658 static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host,
659 enum mmc_err_stat stat)
660 {
661 host->err_stats[stat] += 1;
662 }
663
664 int mmc_sd_switch(struct mmc_card *card, bool mode, int group,
665 u8 value, u8 *resp);
666 int mmc_send_status(struct mmc_card *card, u32 *status);
667 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
668 int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
669 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
670
671 #endif /* LINUX_MMC_HOST_H */
672