00001
00043 #include <stdbool.h>
00044 #include <string.h>
00045
00046 #include <debug.h>
00047 #include <delayed_work.h>
00048 #include <bitops.h>
00049 #include <io.h>
00050 #include <interrupt.h>
00051 #include <irq_handler.h>
00052 #include <softirq.h>
00053 #include <status-codes.h>
00054 #include <util.h>
00055 #include <slist.h>
00056 #include <timeout.h>
00057 #include <workqueue.h>
00058 #include <buffer.h>
00059 #include <malloc.h>
00060
00061 #include <dmac/dma_controller.h>
00062 #include <chip/clk.h>
00063 #include <chip/portmux.h>
00064 #include <chip/irq-map.h>
00065 #include <sdmmc/sdmmc.h>
00066 #include <sdmmc/mcihost.h>
00067 #include "protocol.h"
00068 #include "mci_regs.h"
00069 #include <board/sdmmc.h>
00070 #include <app/softirq.h>
00071 #include <app/timer.h>
00072 #include <app/workqueue.h>
00073
00074 #if defined(CONFIG_SDMMC_MCIHOST_SLOT_A) && defined(CONFIG_SDMMC_MCIHOST_SLOT_B)
00075 #define SDMMC_SLOT_COUNT 2
00076 #elif defined(CONFIG_SDMMC_MCIHOST_SLOT_A) || defined(CONFIG_SDMMC_MCIHOST_SLOT_B)
00077 #define SDMMC_SLOT_COUNT 1
00078 #else
00079 #error "No slot config set!"
00080 #endif
00081
00082
00083 #define MCIHOST_REQ_TIMEOUT_MS 10000
00084
00085
00086 #define MCIHOST_WATCHDOG_PERIOD_MS 10000
00087
00088 enum sdmmc_mcihost_event {
00089 EVENT_CMD_COMPLETE,
00090 EVENT_DATA_COMPLETE,
00091 EVENT_REQ_COMPLETE,
00092 EVENT_DATA_ERROR,
00093 EVENT_DMA_COMPLETE,
00094 };
00095
00096 enum sdmmc_mcihost_state {
00097 STATE_IDLE = 0,
00098 STATE_SENDING_CMD,
00099 STATE_DATA_XFER,
00100 STATE_SENDING_STOP,
00101 STATE_FINISH_REQ,
00102 STATE_CARD_STATUS,
00103 };
00104
00105 struct sdmmc_mci_slot {
00106 uint32_t dtor_write;
00107 uint32_t dtor_read;
00108 uint32_t sdc_reg;
00109 uint32_t cfg_reg;
00110 struct sdmmc_slot slot;
00111 uint32_t last_f_max;
00112 };
00113
00114 struct sdmmc_mcihost {
00115 struct sdmmc_host host;
00116 struct dmac_channel *dmac_chan;
00117 struct dmac_request dmac_req;
00118 struct timeout req_timeout;
00119 struct slist req_queue;
00120 struct sdmmc_request *current;
00121 unsigned long pending_events;
00122 enum sdmmc_mcihost_state state;
00123 struct sdmmc_mci_slot slots[SDMMC_SLOT_COUNT];
00124 bool dtor_valid;
00125 uint32_t cmd_status;
00126 uint32_t data_status;
00127 struct workqueue_item watchdog_work;
00128 struct delayed_work watchdog_dw;
00129 };
00130
00131
00132 #define MCI_CLEAR_ON_READ_BITS \
00133 (MCI_DCRCE | MCI_DTOE | MCI_CSTOE | MCI_BLKOVRE)
00134
00135 enum mci_send_cmd_flags {
00136 MCI_SEND_CMD_STOP,
00137 MCI_SEND_CMD_WRITE,
00138 };
00139
00140 enum sdmmc_mcihost_hwslot {
00141 SDMMC_MCIHOST_SLOT_A,
00142 SDMMC_MCIHOST_SLOT_B,
00143 };
00144
00145 static inline struct sdmmc_mcihost *sdmmc_mcihost_of(struct sdmmc_host *host)
00146 {
00147 return container_of(host, struct sdmmc_mcihost, host);
00148 }
00149
00150 static inline struct sdmmc_mci_slot *sdmmc_mcislot_of(struct sdmmc_slot *slot)
00151 {
00152 return container_of(slot, struct sdmmc_mci_slot, slot);
00153 }
00154
00155 static void mcihost_reset_controller(struct sdmmc_mcihost *mcihost)
00156 {
00157 uint32_t mr;
00158 unsigned long iflags;
00159
00160 iflags = cpu_irq_save();
00161 mr = mci_readl(MR);
00162 mci_writel(CR, MCI_CR_SWRST);
00163 mci_writel(MR, mr);
00164 mci_writel(CR, MCI_CR_MCIEN);
00165
00166
00167 mci_readl(MR);
00168 cpu_irq_restore(iflags);
00169 }
00170
00171 static inline enum sdmmc_mcihost_hwslot sdmmc_mcihost_get_hwslot(int slot_id)
00172 {
00173 #if defined(CONFIG_SDMMC_MCIHOST_SLOT_A) && defined(CONFIG_SDMMC_MCIHOST_SLOT_B)
00174 switch (slot_id) {
00175 case 0:
00176 return SDMMC_MCIHOST_SLOT_A;
00177 case 1:
00178 return SDMMC_MCIHOST_SLOT_B;
00179 default:
00180 unhandled_case(slot_id);
00181 return -1;
00182 }
00183 #elif defined(CONFIG_SDMMC_MCIHOST_SLOT_A)
00184 return SDMMC_MCIHOST_SLOT_A;
00185 #elif defined(CONFIG_SDMMC_MCIHOST_SLOT_B)
00186 return SDMMC_MCIHOST_SLOT_B;
00187 #endif
00188 }
00189
00190 static void sdmmc_mcihost_send_cmd(struct sdmmc_mcihost *mcihost,
00191 struct sdmmc_command *cmd, uint32_t blocks,
00192 uint32_t block_size, unsigned long flags)
00193 {
00194 uint32_t cmdr;
00195
00196 mcihost->cmd_status = 0;
00197
00198 cmdr = MCI_CMDR_CMDNB(cmd->opcode);
00199 if (cmd->flags & SDMMC_RSP_PRESENT) {
00200 if (cmd->flags & SDMMC_RSP_136)
00201 cmdr |= MCI_CMDR_RSPTYP_136BIT;
00202 else if (cmd->flags & SDMMC_RSP_BUSY)
00203 cmdr |= MCI_CMDR_RSPTYP_R1B;
00204 else
00205 cmdr |= MCI_CMDR_RSPTYP_48BIT;
00206 }
00207 if (cmd->flags & SDMMC_CMD_OPD)
00208 cmdr |= MCI_CMDR_OPDCMD;
00209
00210 cmdr |= MCI_CMDR_MAXLAT_64CYC;
00211
00212 if (blocks) {
00213 if (test_bit(MCI_SEND_CMD_STOP, &flags)) {
00214 cmdr |= MCI_CMDR_STOP_XFER;
00215 } else {
00216 cmdr |= MCI_CMDR_START_XFER;
00217 mci_writel(BLKR, MCI_BLKLEN(block_size)
00218 | MCI_BCNT(blocks));
00219 }
00220 if (blocks > 1)
00221 cmdr |= MCI_CMDR_MULTI_BLOCK;
00222 else
00223 cmdr |= MCI_CMDR_BLOCK;
00224 if (test_bit(MCI_SEND_CMD_WRITE, &flags))
00225 cmdr |= MCI_CMDR_TRDIR_WRITE;
00226 else
00227 cmdr |= MCI_CMDR_TRDIR_READ;
00228 }
00229
00230 mci_writel(ARGR, cmd->arg);
00231 mci_writel(CMDR, cmdr);
00232 }
00233
00234 static void sdmmc_mcihost_send_initseq(void)
00235 {
00236 mci_writel(CMDR, MCI_CMDR_SPCMD_INIT);
00237 }
00238
00239 static inline void sdmmc_mcihost_send_stop(struct sdmmc_mcihost *mcihost,
00240 bool write)
00241 {
00242 uint32_t cmdr;
00243
00244 mcihost->cmd_status = 0;
00245
00246 cmdr = MCI_CMDR_CMDNB(SDMMC_STOP_TRANSMISSION) |
00247 MCI_CMDR_RSPTYP_R1B |
00248 MCI_CMDR_MAXLAT_64CYC |
00249 MCI_CMDR_STOP_XFER |
00250 MCI_CMDR_MULTI_BLOCK;
00251
00252 if (write)
00253 cmdr |= MCI_CMDR_TRDIR_WRITE;
00254 else
00255 cmdr |= MCI_CMDR_TRDIR_READ;
00256
00257 mci_writel(ARGR, 0);
00258 mci_writel(CMDR, cmdr);
00259 mci_writel(IER, MCI_CMDRDY);
00260 }
00261
00262 static uint32_t sdmmc_mcihost_calc_data_timeout(struct sdmmc_mcihost *mcihost,
00263 uint32_t timeout_ns, uint32_t timeout_clks)
00264 {
00265 static const uint32_t dtomul_to_shift[] = {
00266 0, 4, 7, 8, 10, 12, 16, 20
00267 };
00268 uint32_t bus_hz;
00269 uint32_t clocks;
00270 uint32_t dtocyc;
00271 uint32_t dtomul;
00272
00273 bus_hz = get_mci_pclk_rate(0);
00274 clocks = div_ceil(timeout_ns * (bus_hz / 1000000), 1000);
00275 clocks += timeout_clks;
00276
00277 for (dtomul = 0; dtomul < 8; dtomul++) {
00278 unsigned int shift = dtomul_to_shift[dtomul];
00279 dtocyc = (clocks + (1 << shift) - 1) >> shift;
00280 if (dtocyc < 15)
00281 break;
00282 }
00283
00284 if (dtomul >= 8) {
00285 dtomul = 7;
00286 dtocyc = 15;
00287 }
00288
00289 return MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc);
00290 }
00291
00292 static void sdmmc_mcihost_update_timeouts(struct sdmmc_mcihost *mcihost,
00293 struct sdmmc_mci_slot *mcislot)
00294 {
00295 struct sdmmc_card *card = &mcislot->slot.card;
00296
00297 sdmmc_card_update_timeouts(card, mcihost->host.f_cur);
00298 mcislot->dtor_read = sdmmc_mcihost_calc_data_timeout(mcihost,
00299 card->read_timeout_ns, card->read_timeout_clks);
00300 mcislot->dtor_write = sdmmc_mcihost_calc_data_timeout(mcihost,
00301 card->write_timeout_ns, card->write_timeout_clks);
00302
00303 dbg_verbose("sdmmc_mcihost: data timeout: read %u / write %u\n",
00304 mcislot->dtor_read, mcislot->dtor_write);
00305 }
00306
00307 static void sdmmc_mcihost_set_clk_rate(struct sdmmc_mcihost *mcihost,
00308 struct sdmmc_mci_slot *mcislot)
00309 {
00310 struct sdmmc_slot *slot;
00311 uint32_t clkdiv;
00312 uint32_t master_hz;
00313 uint32_t target_hz;
00314 int i;
00315
00316 if (mcislot->last_f_max == mcislot->slot.f_max)
00317 return;
00318
00319
00320 target_hz = mcihost->host.f_max;
00321 for (i = 0; i < SDMMC_SLOT_COUNT; i++) {
00322 slot = &mcihost->slots[i].slot;
00323 if (slot->f_max >= 0)
00324 target_hz = min(target_hz, slot->f_max);
00325 }
00326
00327
00328 if (target_hz == 0) {
00329 mcislot->last_f_max = mcislot->slot.f_max;
00330 dbg_verbose("sdmmc_mcihost: stopping the clock\n");
00331 mci_writel(CR, MCI_CR_MCIDIS);
00332 return;
00333 }
00334
00335 master_hz = get_mci_pclk_rate(0);
00336 clkdiv = div_ceil(master_hz, 2 * target_hz) - 1;
00337 if (clkdiv > 255)
00338 clkdiv = 255;
00339
00340 mcihost->host.f_cur = master_hz / (2 * (clkdiv + 1));
00341 dbg_verbose("sdmmc_mcihost: new clock rate: %u Hz (requested %u Hz)\n",
00342 mcihost->host.f_cur, target_hz);
00343 mci_writel(MR, MCI_MR_CLKDIV(clkdiv) | MCI_MR_RDPROOF | MCI_MR_WRPROOF);
00344
00345
00346 if (mcislot->last_f_max == 0)
00347 mci_writel(CR, MCI_CR_MCIEN);
00348
00349 mcislot->last_f_max = mcislot->slot.f_max;
00350 }
00351
00352 static void sdmmc_mcihost_update_bus_width(struct sdmmc_mci_slot *mcislot)
00353 {
00354 uint32_t sdcr;
00355
00356 sdcr = mcislot->sdc_reg & ~MCI_SDCBUS_MASK;
00357 switch (mcislot->slot.bus_width) {
00358 case 1:
00359 sdcr |= MCI_SDCBUS_1BIT;
00360 break;
00361 case 4:
00362 sdcr |= MCI_SDCBUS_4BIT;
00363 break;
00364 case 8:
00365 sdcr |= MCI_SDCBUS_8BIT;
00366 break;
00367 default:
00368 unhandled_case(mcislot->slot.bus_width);
00369 break;
00370 }
00371 mcislot->sdc_reg = sdcr;
00372
00373 dbg_verbose("sdmmc_mcihost: current bus width: %u (SDCR=%08x)\n",
00374 mcislot->slot.bus_width, sdcr);
00375 }
00376
00377 static void sdmmc_mcihost_set_bus_params(struct sdmmc_host *host,
00378 struct sdmmc_slot *slot)
00379 {
00380 struct sdmmc_mcihost *mcihost = sdmmc_mcihost_of(host);
00381 struct sdmmc_mci_slot *mcislot = sdmmc_mcislot_of(slot);
00382 int i;
00383
00384 if (test_bit(SDMMC_SLOT_HIGH_SPEED, &mcislot->slot.flags))
00385 mcislot->cfg_reg |= MCI_CFG_HSMODE;
00386 else
00387 mcislot->cfg_reg &= ~MCI_CFG_HSMODE;
00388
00389 sdmmc_mcihost_update_bus_width(mcislot);
00390 sdmmc_mcihost_set_clk_rate(mcihost, mcislot);
00391
00392
00393
00394
00395
00396 for (i = 0; i < mcihost->host.slot_count; i++)
00397 sdmmc_mcihost_update_timeouts(mcihost, &mcihost->slots[i]);
00398 }
00399
00400
00401
00402
00403
00404
00405
00406
00407
00408
00409
00410
00411
00412
00413
00414
00415
00416
00417
00418
00419
00420
00421
00422
00423
00424
00425
00426
00427
00434 static void sdmmc_mcihost_send_next_req(struct sdmmc_mcihost *mcihost)
00435 {
00436 struct sdmmc_mci_slot *mcislot;
00437 struct sdmmc_request *req;
00438 struct sdmmc_command *cmd;
00439
00440 if (unlikely(mcihost->current))
00441 return;
00442
00443 if (unlikely(slist_is_empty(&mcihost->req_queue)))
00444 return;
00445
00446 req = slist_pop_head(&mcihost->req_queue, struct sdmmc_request, node);
00447 mcihost->current = req;
00448
00449 assert(!test_bit(SDMMC_REQ_WRITE, &req->flags) || req->blocks > 0);
00450
00451 cmd = &req->cmd;
00452
00453
00454 timeout_init_ms(&mcihost->req_timeout, MCIHOST_REQ_TIMEOUT_MS);
00455 barrier();
00456 mcihost->state = STATE_SENDING_CMD;
00457
00458 mcislot = sdmmc_mcislot_of(req->slot);
00459
00460
00461 mci_writel(CFG, mcislot->cfg_reg);
00462
00463
00464 mci_writel(SDCR, mcislot->sdc_reg);
00465
00466
00467 if (req->blocks) {
00468 mci_writel(DMA, MCI_DMAEN);
00469 if (test_bit(SDMMC_REQ_WRITE, &req->flags))
00470 mci_writel(DTOR, mcislot->dtor_write);
00471 else
00472 mci_writel(DTOR, mcislot->dtor_read);
00473 }
00474
00475
00476 if (test_bit(SDMMC_REQ_INITSEQ, &req->flags)) {
00477 dbg_verbose("mcihost: Initialization sequence (74 idle "
00478 "cycles)...\n");
00479 sdmmc_mcihost_send_initseq();
00480 } else {
00481 unsigned long flags;
00482
00483 dbg_verbose("mcihost: CMD%u(%08x) flags: %08lx\n", cmd->opcode,
00484 cmd->arg, cmd->flags);
00485
00486 flags = 0;
00487 if (test_bit(SDMMC_REQ_WRITE, &req->flags))
00488 set_bit(MCI_SEND_CMD_WRITE, &flags);
00489
00490 sdmmc_mcihost_send_cmd(mcihost, cmd, req->blocks,
00491 req->block_size, flags);
00492 }
00493 mci_writel(IER, MCI_CMDRDY);
00494 }
00495
00496 static void sdmmc_mcihost_submit_req(struct sdmmc_host *host,
00497 struct sdmmc_request *req)
00498 {
00499 struct sdmmc_mcihost *mcihost = sdmmc_mcihost_of(host);
00500 unsigned long flags;
00501
00502 req->status = -STATUS_IN_PROGRESS;
00503 req->bytes_xfered = 0;
00504
00505 flags = cpu_irq_save();
00506 slist_insert_tail(&mcihost->req_queue, &req->node);
00507 sdmmc_mcihost_send_next_req(mcihost);
00508 cpu_irq_restore(flags);
00509 }
00510
00511 static void sdmmc_mcihost_set_power(int slot_id, bool power)
00512 {
00513 #if defined(BOARD_MCISLOT_A_PWREN) || defined(BOARD_MCISLOT_B_PWREN)
00514 switch (sdmmc_mcihost_get_hwslot(slot_id)) {
00515 #ifdef BOARD_MCISLOT_A_PWREN
00516 case SDMMC_MCIHOST_SLOT_A:
00517 board_mcislot_a_set_power(power);
00518 break;
00519 #endif
00520 #ifdef BOARD_MCISLOT_B_PWREN
00521 case SDMMC_MCIHOST_SLOT_B:
00522 board_mcislot_b_set_power(power);
00523 break;
00524 #endif
00525 }
00526 #endif
00527 }
00528
00529 static void sdmmc_mcihost_power_up(struct sdmmc_host *host,
00530 struct sdmmc_slot *slot)
00531 {
00532 sdmmc_mcihost_set_power(slot->id, true);
00533 }
00534
00535 static void sdmmc_mcihost_power_down(struct sdmmc_host *host,
00536 struct sdmmc_slot *slot)
00537 {
00538 sdmmc_mcihost_set_power(slot->id, false);
00539 }
00540
00541 static void sdmmc_mcihost_set_voltage(struct sdmmc_host *host,
00542 uint32_t ocr_bit)
00543 {
00544 dbg_verbose("mcihost: TODO: set voltage to %u\n", ocr_bit);
00545 }
00546
00547 static void sdmmc_mcihost_req_close(struct sdmmc_mcihost *mcihost)
00548 {
00549 struct sdmmc_request *req = mcihost->current;
00550
00551 if (req->status == -STATUS_IN_PROGRESS)
00552 req->status = STATUS_OK;
00553
00554 dbg_verbose("mcihost: request done, status=%d, bytes_xfered: %zu\n",
00555 req->status, req->bytes_xfered);
00556 mcihost->current = NULL;
00557 sdmmc_mcihost_send_next_req(mcihost);
00558 req->req_done(req);
00559 }
00560
00567 static void sdmmc_mcihost_load_data(struct sdmmc_mcihost *mcihost)
00568 {
00569 struct sdmmc_request *sreq;
00570 struct dmac_request *dreq;
00571 struct slist buf_list;
00572 size_t bytes_total;
00573
00574 dreq = &mcihost->dmac_req;
00575
00576 slist_init(&buf_list);
00577 slist_move_to_tail(&buf_list, &dreq->buf_list);
00578
00579 sreq = mcihost->current;
00580 sreq->bytes_xfered += dreq->bytes_xfered;
00581
00582 bytes_total = sreq->blocks * sreq->block_size;
00583 assert(sreq->bytes_xfered <= bytes_total);
00584
00585 dbg_verbose("mcihost load_data: ds %d ms %d bx %zu/%zu\n",
00586 dreq->status, sreq->status, sreq->bytes_xfered,
00587 bytes_total);
00588
00589
00590
00591
00592
00593
00594 if (sreq->status == -STATUS_IN_PROGRESS) {
00595 if (dreq->status) {
00596 dbg_error("mcihost: dmac transfer failure: %d\n",
00597 dreq->status);
00598 sreq->status = dreq->status;
00599
00600 mci_writel(IER, MCI_FIFOEMPTY);
00601 } else if (sreq->bytes_xfered >= bytes_total) {
00602 assert(slist_is_empty(&sreq->buf_list));
00603
00604 mci_writel(IER, MCI_FIFOEMPTY);
00605 } else if (!slist_is_empty(&sreq->buf_list)) {
00606 slist_move_to_tail(&dreq->buf_list, &sreq->buf_list);
00607 dmac_chan_submit_request(mcihost->dmac_chan, dreq);
00608 }
00609 }
00610
00611 if (sreq->buf_list_done)
00612 sreq->buf_list_done(sreq, &buf_list);
00613 else
00614 slist_move_to_tail(&sreq->buf_list, &buf_list);
00615 }
00616
00617 static int sdmmc_mcihost_submit_buf_list(struct sdmmc_host *host,
00618 struct sdmmc_request *req, struct slist *buf_list)
00619 {
00620 struct sdmmc_mcihost *mcihost = sdmmc_mcihost_of(host);
00621 unsigned long sflags;
00622 int ret = 0;
00623
00624 dbg_verbose("mcihost: submit_buf_list r%p c%p s%d state %d\n",
00625 req, mcihost->current, req->status, mcihost->state);
00626
00627
00628
00629
00630
00631 assert(!cpu_is_in_hardirq_handler());
00632 assert(!slist_is_empty(buf_list));
00633 assert(req->buf_list_done);
00634
00635 sflags = softirq_save();
00636 if (req->status != -STATUS_IN_PROGRESS) {
00637 ret = -STATUS_FLUSHED;
00638 } else if (mcihost->current == req
00639 && slist_is_empty(&mcihost->dmac_req.buf_list)
00640 && mcihost->state == STATE_DATA_XFER) {
00641 slist_move_to_tail(&mcihost->dmac_req.buf_list, buf_list);
00642 dmac_chan_submit_request(mcihost->dmac_chan,
00643 &mcihost->dmac_req);
00644 } else {
00645 slist_move_to_tail(&req->buf_list, buf_list);
00646 }
00647 softirq_restore(sflags);
00648
00649 return ret;
00650 }
00651
00652 static void sdmmc_mcihost_dmac_done(struct dmac_channel *chan,
00653 struct dmac_request *req)
00654 {
00655 struct sdmmc_mcihost *mcihost = req->context;
00656
00657 dbg_verbose("mcihost: dmac_done\n");
00658
00659 atomic_set_bit(EVENT_DMA_COMPLETE, &mcihost->pending_events);
00660 softirq_raise(SOFTIRQ_ID_MCIHOST);
00661 }
00662
00663 static void sdmmc_mcihost_chk_cmd(struct sdmmc_mcihost *mcihost,
00664 uint32_t status)
00665 {
00666 struct sdmmc_request *req = mcihost->current;
00667 uint32_t error_flags;
00668
00669 error_flags = MCI_RINDE | MCI_RDIRE | MCI_RENDE | MCI_RTOE | MCI_DTOE;
00670 if (req->cmd.flags & SDMMC_RSP_CRC)
00671 error_flags |= MCI_RCRCE;
00672
00673 req->cmd.resp[0] = mci_readl(RSPR);
00674 req->cmd.resp[1] = mci_readl(RSPR);
00675 req->cmd.resp[2] = mci_readl(RSPR);
00676 req->cmd.resp[3] = mci_readl(RSPR);
00677
00678 dbg_verbose("mcihost: RESP: %08x status = %08x\n", req->cmd.resp[0],
00679 status);
00680
00681 if (status & error_flags) {
00682 if (status & (MCI_RTOE | MCI_DTOE)) {
00683 dbg_error("mcihost: CMD%u timeout. Flags: %08x "
00684 "Resp[0]: %08x\n", req->cmd.opcode,
00685 status,
00686 req->cmd.resp[0]);
00687 req->status = -STATUS_TIMEOUT;
00688 } else {
00689 dbg_error("mcihost: CMD%u error. Flags: %08x "
00690 "Resp[0]: %08x\n",req->cmd.opcode,
00691 status,
00692 req->cmd.resp[0]);
00693 req->status = -STATUS_IO_ERROR;
00694 }
00695 req->cmd.status = req->status;
00696 } else
00697 req->cmd.status = 0;
00698 }
00699
00700 static void sdmmc_mcihost_chk_data(struct sdmmc_mcihost *mcihost,
00701 uint32_t status)
00702 {
00703 struct sdmmc_request *req = mcihost->current;
00704 uint32_t error_flags;
00705
00706 error_flags = MCI_DCRCE | MCI_DTOE;
00707
00708 if (status & error_flags) {
00709 if (status & MCI_DTOE) {
00710 dbg_error("mcihost: Data timeout. Flags: %08x\n",
00711 status);
00712 if (!req->status)
00713 req->status = -STATUS_TIMEOUT;
00714 } else {
00715 dbg_error("mcihost: Data crc error. Flags: %08x\n",
00716 status);
00717 if (!req->status)
00718 req->status = -STATUS_IO_ERROR;
00719 }
00720 }
00721 }
00722 static void sdmmc_mcihost_chk_done(struct sdmmc_mcihost *mcihost,
00723 uint32_t status)
00724 {
00725 struct sdmmc_request *req = mcihost->current;
00726 uint32_t error_flags;
00727
00728 error_flags = MCI_RINDE | MCI_RDIRE | MCI_RENDE | MCI_RTOE |
00729 MCI_DCRCE | MCI_DTOE;
00730
00731 if (status & error_flags) {
00732 if (status & (MCI_RTOE | MCI_DTOE)) {
00733 dbg_error("mcihost: End timeout. Flags: %08x\n",
00734 status);
00735 if (!req->status)
00736 req->status = -STATUS_TIMEOUT;
00737 } else {
00738 dbg_error("mcihost: End crc error. Flags: %08x\n",
00739 status);
00740 if (!req->status)
00741 req->status = -STATUS_IO_ERROR;
00742 }
00743 }
00744 }
00745
00746 static void sdmmc_mcihost_check_status(struct sdmmc_mcihost *mcihost)
00747 {
00748 struct sdmmc_slot *slot;
00749
00750 slot = mcihost->current->slot;
00751 mci_writel(ARGR, slot->card.rca);
00752 mci_writel(CMDR, MCI_CMDR_CMDNB(SDMMC_SEND_STATUS)
00753 | MCI_CMDR_RSPTYP_48BIT
00754 | MCI_CMDR_MAXLAT_64CYC);
00755 mci_writel(IER, MCI_CMDRDY);
00756 }
00757
00758 static void sdmmc_mcihost_softirq(void *data)
00759 {
00760 struct sdmmc_mcihost *mcihost = data;
00761 struct sdmmc_request *req;
00762 enum sdmmc_mcihost_state prev_state;
00763 uint32_t status;
00764
00765
00766 do {
00767 prev_state = mcihost->state;
00768
00769 dbg_verbose("mcihost: softirq state %u events: %lx\n",
00770 mcihost->state, mcihost->pending_events);
00771
00772 switch (mcihost->state) {
00773 case STATE_IDLE:
00774 break;
00775
00776 case STATE_SENDING_CMD:
00777 if (!atomic_test_and_clear_bit(EVENT_CMD_COMPLETE,
00778 &mcihost->pending_events))
00779 break;
00780
00781 dbg_verbose(" CMD done: status 0x%x\n",
00782 mcihost->cmd_status);
00783
00784 sdmmc_mcihost_chk_cmd(mcihost, mcihost->cmd_status);
00785
00786 req = mcihost->current;
00787 if (req->status != -STATUS_IN_PROGRESS) {
00788
00789 mcihost_reset_controller(mcihost);
00790
00791 mcihost->state = STATE_IDLE;
00792 sdmmc_mcihost_req_close(mcihost);
00793 break;
00794 } else if (req->blocks == 0) {
00795 mci_writel(IER, MCI_XFRDONE);
00796 mcihost->state = STATE_FINISH_REQ;
00797 break;
00798 }
00799
00800 mcihost->data_status = 0;
00801 if (test_bit(SDMMC_REQ_WRITE, &req->flags))
00802 mcihost->dmac_req.direction = DMA_TO_DEVICE;
00803 else
00804 mcihost->dmac_req.direction = DMA_FROM_DEVICE;
00805
00806 if (!slist_is_empty(&req->buf_list)) {
00807 slist_move_to_tail(&mcihost->dmac_req.buf_list,
00808 &req->buf_list);
00809 dmac_chan_submit_request(mcihost->dmac_chan,
00810 &mcihost->dmac_req);
00811 }
00812 mcihost->state = prev_state = STATE_DATA_XFER;
00813
00814 if (req->req_started)
00815 req->req_started(req);
00816
00817 mci_writel(IER, MCI_DTOE);
00818 prev_state = mcihost->state = STATE_DATA_XFER;
00819
00820
00821
00822 case STATE_DATA_XFER:
00823 if (atomic_test_and_clear_bit(EVENT_DATA_ERROR,
00824 &mcihost->pending_events)) {
00825 sdmmc_mcihost_chk_data(mcihost,
00826 mcihost->data_status);
00827 dmac_chan_reset(mcihost->dmac_chan);
00828 mcihost_reset_controller(mcihost);
00829
00830 mcihost->state = STATE_IDLE;
00831 sdmmc_mcihost_req_close(mcihost);
00832 break;
00833 }
00834
00835 if (atomic_test_and_clear_bit(EVENT_DMA_COMPLETE,
00836 &mcihost->pending_events))
00837 sdmmc_mcihost_load_data(mcihost);
00838
00839 if (!atomic_test_and_clear_bit(EVENT_DATA_COMPLETE,
00840 &mcihost->pending_events)) {
00841 break;
00842 }
00843
00844 dbg_verbose(" DATA done: status 0x%x\n",
00845 mcihost->data_status);
00846
00847 req = mcihost->current;
00848 if (test_bit(SDMMC_REQ_STOP, &req->flags)) {
00849 sdmmc_mcihost_send_stop(mcihost,
00850 test_bit(SDMMC_REQ_WRITE,
00851 &req->flags));
00852 mcihost->state = STATE_SENDING_STOP;
00853 } else {
00854 mcihost->state = STATE_FINISH_REQ;
00855 mci_writel(IER, MCI_XFRDONE);
00856 }
00857 break;
00858
00859 case STATE_SENDING_STOP:
00860 if (!atomic_test_and_clear_bit(EVENT_CMD_COMPLETE,
00861 &mcihost->pending_events))
00862 break;
00863
00864 dbg_verbose(" STOP done: status 0x%x\n",
00865 mcihost->cmd_status);
00866
00867 sdmmc_mcihost_chk_done(mcihost, mcihost->cmd_status);
00868 mci_writel(IER, MCI_XFRDONE);
00869 mcihost->state = prev_state = STATE_FINISH_REQ;
00870
00871
00872
00873 case STATE_FINISH_REQ:
00874 if (!atomic_test_and_clear_bit(EVENT_REQ_COMPLETE,
00875 &mcihost->pending_events))
00876 break;
00877
00878 dbg_verbose(" REQ done: status %d\n",
00879 mcihost->current->status);
00880
00881 req = mcihost->current;
00882 if (!test_bit(SDMMC_REQ_WRITE, &req->flags)) {
00883 mcihost->state = STATE_IDLE;
00884 sdmmc_mcihost_req_close(mcihost);
00885 break;
00886 }
00887
00888
00889
00890
00891
00892
00893
00894
00895
00896
00897 sdmmc_mcihost_check_status(mcihost);
00898 mcihost->state = prev_state = STATE_CARD_STATUS;
00899
00900
00901
00902 case STATE_CARD_STATUS:
00903 if (!atomic_test_and_clear_bit(EVENT_CMD_COMPLETE,
00904 &mcihost->pending_events))
00905 break;
00906
00907 if (mcihost->cmd_status & (MCI_RTOE | MCI_RENDE
00908 | MCI_RCRCE | MCI_RDIRE
00909 | MCI_RINDE)) {
00910
00911
00912 mcihost->state = STATE_IDLE;
00913 sdmmc_mcihost_req_close(mcihost);
00914 break;
00915 }
00916
00917 status = mci_readl(RSPR);
00918 dbg_verbose(" Card status: %08x\n", status);
00919
00920 if (((status >> 9) & 0xf) != 4) {
00921 dbg_verbose("Bad card state: %08x\n", status);
00922 sdmmc_mcihost_check_status(mcihost);
00923 } else {
00924 mcihost->state = STATE_IDLE;
00925 sdmmc_mcihost_req_close(mcihost);
00926 }
00927 break;
00928 }
00929 } while (mcihost->state != prev_state);
00930 }
00931
00932 static void sdmmc_mcihost_interrupt(void *data)
00933 {
00934 struct sdmmc_mcihost *mcihost = data;
00935 uint32_t status;
00936 uint32_t status_masked;
00937
00938 status = mci_readl(SR);
00939 status_masked = status & mci_readl(IMR);
00940
00941 dbg_verbose("mcihost interrupt: status=%08x (%08x)\n",
00942 status, status_masked);
00943
00944 if (status_masked & MCI_DTOE) {
00945 mci_writel(IDR, MCI_DTOE);
00946 mcihost->data_status = status;
00947 set_bit(EVENT_DATA_ERROR, &mcihost->pending_events);
00948 }
00949 if (status_masked & MCI_CMDRDY) {
00950 mci_writel(IDR, MCI_CMDRDY);
00951 mcihost->cmd_status = status;
00952 set_bit(EVENT_CMD_COMPLETE, &mcihost->pending_events);
00953 }
00954 if (status_masked & MCI_FIFOEMPTY) {
00955 mci_writel(IDR, MCI_FIFOEMPTY);
00956 set_bit(EVENT_DATA_COMPLETE, &mcihost->pending_events);
00957 }
00958 if (status_masked & MCI_XFRDONE) {
00959 mci_writel(IDR, ~0UL);
00960 if (!mcihost->data_status)
00961 mcihost->data_status = status;
00962 dbg_verbose("xfrdone: status=0x%x\n", status);
00963 set_bit(EVENT_REQ_COMPLETE, &mcihost->pending_events);
00964 }
00965
00966 softirq_raise(SOFTIRQ_ID_MCIHOST);
00967 }
00968 DEFINE_IRQ_HANDLER(mci, sdmmc_mcihost_interrupt, 0);
00969
00970 static struct sdmmc_slot *sdmmc_mcihost_get_slot(struct sdmmc_host *host,
00971 int slot_id)
00972 {
00973 struct sdmmc_mcihost *mcihost = sdmmc_mcihost_of(host);
00974
00975 assert(slot_id >= 0);
00976 assert(slot_id < SDMMC_SLOT_COUNT);
00977
00978 return &mcihost->slots[slot_id].slot;
00979 }
00980
00981 static bool sdmmc_mcihost_wp_is_active(struct sdmmc_host *host,
00982 struct sdmmc_slot *slot)
00983 {
00984 assert(slot);
00985
00986 switch (sdmmc_mcihost_get_hwslot(slot->id)) {
00987 #if defined(CONFIG_SDMMC_MCIHOST_SLOT_A) && defined(BOARD_MCISLOT_A_WP)
00988 case SDMMC_MCIHOST_SLOT_A:
00989 return gpio_get_value(BOARD_MCISLOT_A_WP);
00990 #endif
00991 #if defined(CONFIG_SDMMC_MCIHOST_SLOT_B) && defined(BOARD_MCISLOT_B_WP)
00992 case SDMMC_MCIHOST_SLOT_B:
00993 return gpio_get_value(BOARD_MCISLOT_B_WP);
00994 #endif
00995 default:
00996
00997 return false;
00998 }
00999 }
01000
01001 static void mcihost_dump_regs(void)
01002 {
01003 unsigned int offset;
01004
01005 dbg_verbose("mcihost register dump:");
01006 for (offset = 0; offset < 0x100; offset += 4) {
01007 if (offset % 16 == 0)
01008 dbg_verbose("\n%04x:", offset);
01009
01010 if (offset == MCI_RDR || offset == MCI_TDR)
01011 dbg_verbose(" xxxxxxxx");
01012 else
01013 dbg_verbose(" %08x",
01014 mmio_read32((void *)(MCI_BASE + offset)));
01015 }
01016 dbg_verbose("\n");
01017 }
01018
01019
01020
01021
01022
01023
01024
01025
01026 static void mcihost_watchdog_worker(void *data)
01027 {
01028 struct sdmmc_mcihost *mcihost = data;
01029
01030 if (mcihost->state != STATE_IDLE
01031 && timeout_has_expired(&mcihost->req_timeout)) {
01032 struct sdmmc_request *req;
01033
01034 req = mcihost->current;
01035 dbg_error("mcihost slot%u: timeout, state %u, status %d\n",
01036 req->slot->id, mcihost->state, req->status);
01037 dbg_verbose(" CMD%u(0x%x) flags 0x%lx status %d resp 0x%x\n",
01038 req->cmd.opcode, req->cmd.arg,
01039 req->cmd.flags, req->cmd.status,
01040 req->cmd.resp[0]);
01041 if (req->blocks)
01042 dbg_verbose(
01043 " %s: %u blocks x %u, buf %p, xfered %zu\n",
01044 (req->flags & SDMMC_REQ_WRITE)
01045 ? "WRITE" : "READ",
01046 req->blocks, req->block_size,
01047 slist_peek_head(&req->buf_list,
01048 struct buffer, node)->addr.ptr,
01049 req->bytes_xfered);
01050 if (req->flags & SDMMC_REQ_STOP)
01051 dbg_verbose(" STOP\n");
01052
01053 mcihost_dump_regs();
01054
01055 dmac_chan_reset(mcihost->dmac_chan);
01056 mcihost_reset_controller(mcihost);
01057
01058 mcihost->state = STATE_IDLE;
01059 sdmmc_mcihost_req_close(mcihost);
01060 }
01061
01062 delayed_work_run_us(&mcihost->watchdog_dw,
01063 &mcihost->watchdog_work,
01064 1000 * MCIHOST_WATCHDOG_PERIOD_MS);
01065 }
01066
01067 static void sdmmc_mcihost_enable(struct sdmmc_host *host)
01068 {
01069 struct sdmmc_mcihost *mcihost = sdmmc_mcihost_of(host);
01070 int i;
01071
01072 board_mcislots_enable(0);
01073
01074 i = 0;
01075 #if defined(CONFIG_SDMMC_MCIHOST_SLOT_A) && defined(BOARD_MCISLOT_A_CD)
01076 sdmmc_cd_enable(mcihost->slots[i].slot.cd);
01077 i++;
01078 #endif
01079 #if defined(CONFIG_SDMMC_MCIHOST_SLOT_B) && defined(BOARD_MCISLOT_B_CD)
01080 sdmmc_cd_enable(mcihost->slots[i].slot.cd);
01081 #endif
01082 mci_writel(CR, MCI_CR_MCIEN);
01083
01084
01085 delayed_work_run_us(&mcihost->watchdog_dw,
01086 &mcihost->watchdog_work,
01087 1000 * MCIHOST_WATCHDOG_PERIOD_MS);
01088 }
01089
01090 struct sdmmc_host *sdmmc_mcihost_init()
01091 {
01092 struct sdmmc_mcihost *mcihost;
01093 int i;
01094
01095 mcihost = malloc(sizeof(struct sdmmc_mcihost));
01096 if (mcihost == NULL) {
01097 dbg_printf("sdmmc_mcihost_init: Out of memory!\n");
01098 return NULL;
01099 }
01100 memset(mcihost, 0, sizeof(struct sdmmc_mcihost));
01101 mcihost->host.f_max = min(CONFIG_MAX_MMC_HZ,
01102 get_mci_pclk_rate(0) / 2);
01103 mcihost->host.f_min = (get_mci_pclk_rate(0) + 511) / 512;
01104 mcihost->host.ocr_avail = SDMMC_OCR_V_32_33 | SDMMC_OCR_V_33_34;
01105 slist_init(&mcihost->req_queue);
01106 mcihost->dmac_chan = dmac_mci_alloc_channel();
01107 if (mcihost->dmac_chan == NULL) {
01108 dbg_panic("sdmmc_mcihost_init: Failed to allocate DMA channel!\n");
01109 free(mcihost);
01110 return NULL;
01111 }
01112 dmac_req_init(&mcihost->dmac_req);
01113 mcihost->dmac_req.reg_width = DMAC_REG_WIDTH_32BIT;
01114 mcihost->dmac_req.req_done = sdmmc_mcihost_dmac_done;
01115 mcihost->dmac_req.context = mcihost;
01116
01117 mcihost->host.enable = sdmmc_mcihost_enable;
01118 mcihost->host.slot_count = SDMMC_SLOT_COUNT;
01119 mcihost->host.get_slot = sdmmc_mcihost_get_slot;
01120 mcihost->host.power_up = sdmmc_mcihost_power_up;
01121 mcihost->host.power_down = sdmmc_mcihost_power_down;
01122 mcihost->host.set_bus_params = sdmmc_mcihost_set_bus_params;
01123 mcihost->host.set_voltage = sdmmc_mcihost_set_voltage;
01124 mcihost->host.wp_is_active = sdmmc_mcihost_wp_is_active;
01125 mcihost->host.submit_req = sdmmc_mcihost_submit_req;
01126 mcihost->host.submit_buf_list = sdmmc_mcihost_submit_buf_list;
01127
01128 i = 0;
01129 #ifdef CONFIG_SDMMC_MCIHOST_SLOT_A
01130 sdmmc_slot_init(&mcihost->slots[i].slot, &mcihost->host, i);
01131 mcihost->slots[i].sdc_reg = MCI_SDCSEL_SLOT_A;
01132 #ifdef BOARD_MCISLOT_A_CD
01133 mcihost->slots[i].slot.cd = sdmmc_cd_init(&mcihost->slots[i].slot,
01134 BOARD_MCISLOT_A_CD);
01135 #endif
01136 i++;
01137 #endif
01138
01139 #ifdef CONFIG_SDMMC_MCIHOST_SLOT_B
01140 sdmmc_slot_init(&mcihost->slots[i].slot, &mcihost->host, i);
01141 mcihost->slots[i].sdc_reg = MCI_SDCSEL_SLOT_B;
01142 #ifdef BOARD_MCISLOT_B_CD
01143 mcihost->slots[i].slot.cd = sdmmc_cd_init(&mcihost->slots[i].slot,
01144 BOARD_MCISLOT_B_CD);
01145 #endif
01146 #endif
01147
01148 mci_writel(CR, MCI_CR_SWRST);
01149 mci_writel(SDCR, 0);
01150
01151 softirq_set_handler(SOFTIRQ_ID_MCIHOST, sdmmc_mcihost_softirq, mcihost);
01152 setup_irq_handler(MCI_IRQ, mci, 0, mcihost);
01153
01154 workqueue_init_item(&mcihost->watchdog_work, mcihost_watchdog_worker,
01155 mcihost);
01156 delayed_work_init(&mcihost->watchdog_dw, &mcihost_watchdog_timer,
01157 &mcihost_watchdog_workqueue);
01158
01159 dbg_printf("sdmmc_host_init: max/min %u/%u Hz\n",
01160 mcihost->host.f_max, mcihost->host.f_min);
01161
01162 return &mcihost->host;
01163 }