00001
00041 #include <assert.h>
00042 #include <bitops.h>
00043 #include <buffer.h>
00044 #include <dmapool.h>
00045 #include <interrupt.h>
00046 #include <io.h>
00047 #include <irq_handler.h>
00048 #include <status-codes.h>
00049 #include <chip/memory-map.h>
00050 #include <chip/irq-map.h>
00051 #include <dmac/dma_controller.h>
00052 #include <dmac/dmaca.h>
00053
00054 #include <app/config_dmapool.h>
00055
00056 #include "dmac_util.h"
00057 #include "dmaca_regs.h"
00058
00059 #ifdef CONFIG_DMACA_NR_CHANNELS
00060 # if CONFIG_DMACA_NR_CHANNELS > CHIP_DMACA_NR_CHANNELS
00061 # error Too many DMACA channels requested
00062 # endif
00063 # define NR_CHANNELS CONFIG_DMACA_NR_CHANNELS
00064 #else
00065 # define NR_CHANNELS CHIP_DMACA_NR_CHANNELS
00066 #endif
00067
00068 #ifndef dmaca_desc_physmem_pool
00069 # define dmaca_desc_physmem_pool hsb_sram_pool
00070 #endif
00071
00073 enum dmaca_chan_flag {
00074 DMACA_CHAN_ALLOCATED,
00075 DMACA_CHAN_ENABLED,
00076 DMACA_CHAN_ACTIVE,
00077 };
00078
00079 struct dmaca_channel {
00080 struct dma_pool desc_pool;
00081 struct slist req_queue;
00082 struct slist buf_queue;
00083 void *regs;
00084 uint8_t mask;
00085 unsigned long flags;
00086 phys_addr_t rx_reg_addr;
00087 phys_addr_t tx_reg_addr;
00088 struct dmac_channel dch;
00089 };
00090
00091 static struct dmaca_channel dmaca_channel[NR_CHANNELS];
00092 static int dmaca_use_count;
00093
00094 static struct dmaca_channel *dmaca_channel_of(struct dmac_channel *ch)
00095 {
00096 return container_of(ch, struct dmaca_channel, dch);
00097 }
00098
00099 static void dmaca_controller_get(void)
00100 {
00101 if (dmaca_use_count++ == 0)
00102 dmaca_write_reg(DMACFG, DMACA_DMACFG_DMA_EN);
00103 }
00104
00105 static void dmaca_controller_put(void)
00106 {
00107 if (--dmaca_use_count == 0)
00108 dmaca_write_reg(DMACFG, 0);
00109 }
00110
00111 static unsigned int dmaca_chan_id(struct dmaca_channel *chan)
00112 {
00113 return ilog2(chan->mask);
00114 }
00115
00116 static inline struct dmac_request *dmaca_chan_peek_head_req(
00117 struct dmaca_channel *chan)
00118 {
00119 return slist_peek_head(&chan->req_queue, struct dmac_request, node);
00120 }
00121
00122 static inline struct dmac_request *dmaca_chan_pop_head_req(
00123 struct dmaca_channel *chan)
00124 {
00125 return slist_pop_head(&chan->req_queue, struct dmac_request, node);
00126 }
00127
00128 static inline struct buffer *dmaca_chan_peek_head_buf(
00129 struct dmaca_channel *chan)
00130 {
00131 return slist_peek_head(&chan->buf_queue, struct buffer, node);
00132 }
00133
00134 static struct dmaca_hw_desc *dmaca_chan_alloc_desc(struct dmaca_channel *chan,
00135 phys_addr_t *phys)
00136 {
00137 return dma_pool_alloc(&chan->desc_pool, phys);
00138 }
00139
00140 static void dmaca_chan_free_desc(struct dmaca_channel *chan,
00141 struct dmaca_hw_desc *desc)
00142 {
00143 dma_pool_free(&chan->desc_pool, desc);
00144 }
00145
00146 static void dmaca_chan_buf_done(struct dmaca_channel *chan, struct buffer *buf)
00147 {
00148 struct dmaca_hw_desc *desc = buf->dma_desc;
00149
00150 dmaca_chan_free_desc(chan, desc);
00151 buf->dma_desc = NULL;
00152 }
00153
00154 static void dmaca_chan_req_done(struct dmaca_channel *chan,
00155 struct dmac_request *req, int status)
00156 {
00157 req->status = status;
00158 slist_give_back_head(&req->buf_list, &chan->buf_queue);
00159 if (req->req_done)
00160 req->req_done(&chan->dch, req);
00161 }
00162
00163 static void dmaca_chan_reset(struct dmac_channel *dch)
00164 {
00165 struct dmaca_channel *chan = dmaca_channel_of(dch);
00166 struct dmac_request *req;
00167 struct buffer *buf;
00168 unsigned long iflags;
00169
00170 dbg_verbose("dmaca ch%u reset: e%x s%08x d%08x l%08x c%08x:%08x\n",
00171 dmaca_chan_id(chan), dmaca_read_reg(CH_EN),
00172 dmaca_chan_read_reg(chan, SAR),
00173 dmaca_chan_read_reg(chan, DAR),
00174 dmaca_chan_read_reg(chan, LLP),
00175 dmaca_chan_read_reg(chan, CTLH),
00176 dmaca_chan_read_reg(chan, CTLL));
00177 dbg_verbose(" status %x\n", dmaca_read_reg(STATUS_INT));
00178
00179
00180 iflags = cpu_irq_save();
00181 clear_bit(DMACA_CHAN_ENABLED, &chan->flags);
00182 clear_bit(DMACA_CHAN_ACTIVE, &chan->flags);
00183 dmaca_clear_chan_bit(chan, MASK_TFR);
00184 dmaca_clear_chan_bit(chan, MASK_BLOCK);
00185 dmaca_clear_chan_bit(chan, MASK_ERR);
00186 cpu_irq_restore(iflags);
00187
00188
00189 dmaca_clear_chan_bit(chan, CH_EN);
00190 while (dmaca_test_chan_bit(chan, CH_EN))
00191 barrier();
00192
00193
00194 slist_for_each(&chan->buf_queue, buf, node) {
00195 if (!buf->dma_desc)
00196 break;
00197
00198 dmaca_chan_buf_done(chan, buf);
00199 }
00200
00201
00202 while (!slist_is_empty(&chan->req_queue)) {
00203 req = dmaca_chan_pop_head_req(chan);
00204 dmaca_chan_req_done(chan, req, -STATUS_IO_ERROR);
00205 }
00206
00207
00208 set_bit(DMACA_CHAN_ENABLED, &chan->flags);
00209 }
00210
00211 static void dmaca_chan_process_queue(struct dmaca_channel *chan)
00212 {
00213 struct dmaca_hw_desc *desc;
00214 struct dmaca_hw_desc *desc_next;
00215 struct buffer *buf;
00216 struct buffer *buf_next;
00217 phys_addr_t phys;
00218 phys_addr_t phys_next;
00219 struct dmac_request *req;
00220
00221 assert(!dmaca_test_chan_bit(chan, CH_EN));
00222 assert(test_bit(DMACA_CHAN_ENABLED, &chan->flags));
00223 assert(!test_bit(DMACA_CHAN_ACTIVE, &chan->flags));
00224
00225 dbg_printf("dmaca ch%u: processing queue...\n", dmaca_chan_id(chan));
00226
00227 if (slist_is_empty(&chan->buf_queue)) {
00228 dbg_printf(" - no buffers\n");
00229 assert(slist_is_empty(&chan->req_queue));
00230 goto queue_is_empty;
00231 }
00232
00233 set_bit(DMACA_CHAN_ACTIVE, &chan->flags);
00234 cpu_irq_enable();
00235
00236 desc = dmaca_chan_alloc_desc(chan, &phys);
00237 if (!desc) {
00238 dbg_error("dmaca ch%u: failed to allocate descriptor\n",
00239 dmaca_chan_id(chan));
00240 cpu_irq_disable();
00241 goto no_desc;
00242 }
00243
00244 dmaca_chan_write_reg(chan, LLP, phys);
00245
00246
00247
00248
00249
00250
00251
00252
00253
00254
00255
00256
00257
00258
00259
00260 cpu_irq_disable();
00261 req = dmaca_chan_peek_head_req(chan);
00262 slist_for_each_safe(&chan->buf_queue, buf, buf_next, node) {
00263 unsigned int xfer_width;
00264 unsigned int burst_length;
00265 uint32_t ctll;
00266
00267 if (!desc)
00268 break;
00269
00270 desc_next = NULL;
00271 if (slist_node_is_valid(&chan->buf_queue, &buf_next->node))
00272 desc_next = dmaca_chan_alloc_desc(chan, &phys_next);
00273
00274 cpu_irq_enable();
00275
00276 xfer_width = req->reg_width;
00277 burst_length = req->burst_length;
00278 assert(xfer_width <= DMAC_REG_WIDTH_32BIT);
00279
00280 ctll = DMACA_CTLL_DST_TR_WIDTH(xfer_width)
00281 | DMACA_CTLL_SRC_TR_WIDTH(xfer_width)
00282 | DMACA_CTLL_DST_MSIZE(burst_length)
00283 | DMACA_CTLL_SRC_MSIZE(burst_length);
00284 if (desc_next)
00285 ctll |= DMACA_CTLL_LLP_D_EN | DMACA_CTLL_LLP_S_EN;
00286
00287 if (req->direction == DMA_FROM_DEVICE) {
00288 ctll |= DMACA_CTLL_DINC_INCREMENT
00289 | DMACA_CTLL_SINC_NO_CHANGE
00290 | DMACA_CTLL_TT_P2M;
00291 desc->sar = chan->rx_reg_addr;
00292 desc->dar = buf->addr.phys;
00293 } else {
00294 ctll |= DMACA_CTLL_DINC_NO_CHANGE
00295 | DMACA_CTLL_SINC_INCREMENT
00296 | DMACA_CTLL_TT_M2P;
00297 desc->sar = buf->addr.phys;
00298 desc->dar = chan->tx_reg_addr;
00299 }
00300
00301 if (!desc_next)
00302 ctll |= DMACA_CTLL_INT_EN;
00303
00304 if (slist_node_is_last(&req->buf_list, &buf->node)) {
00305 if (req->req_done)
00306 ctll |= DMACA_CTLL_INT_EN;
00307 req = slist_peek_next(&req->node,
00308 struct dmac_request, node);
00309 }
00310
00311 desc->llp = phys_next;
00312 desc->ctll = ctll;
00313 desc->ctlh = DMACA_CTLH_BLOCK_TS(buf->len >> xfer_width);
00314
00315 dbg_printf(" D%08lx: s%08x d%08x l%08x c%08x:%08x\n",
00316 phys, desc->sar, desc->dar, desc->llp,
00317 desc->ctlh, desc->ctll);
00318
00319 buf->dma_desc = desc;
00320 desc = desc_next;
00321 phys = phys_next;
00322 cpu_irq_disable();
00323 }
00324
00325
00326 barrier();
00327 dmaca_chan_write_reg(chan, CTLL,
00328 DMACA_CTLL_LLP_D_EN | DMACA_CTLL_LLP_S_EN);
00329
00330
00331 dmaca_set_chan_bit(chan, MASK_TFR);
00332 dmaca_set_chan_bit(chan, MASK_BLOCK);
00333 dmaca_set_chan_bit(chan, MASK_ERR);
00334 dmaca_set_chan_bit(chan, CH_EN);
00335
00336 dbg_printf(" CH_EN: %x\n", dmaca_read_reg(CH_EN));
00337
00338 return;
00339
00340 no_desc:
00341 clear_bit(DMACA_CHAN_ACTIVE, &chan->flags);
00342
00343 queue_is_empty:
00344 dmaca_clear_chan_bit(chan, MASK_TFR);
00345 dmaca_clear_chan_bit(chan, MASK_BLOCK);
00346 dmaca_clear_chan_bit(chan, MASK_ERR);
00347 dmaca_controller_put();
00348 }
00349
00350 static void dmaca_chan_submit_req(struct dmac_channel *dch,
00351 struct dmac_request *req)
00352 {
00353 struct dmaca_channel *chan = dmaca_channel_of(dch);
00354 bool queued = true;
00355
00356 dbg_printf("dmaca ch%u: submit req %p\n", dmaca_chan_id(chan), req);
00357
00358 assert(cpu_irq_is_enabled());
00359 assert(test_bit(DMACA_CHAN_ALLOCATED, &chan->flags));
00360 dmac_verify_req(req);
00361
00362 req->bytes_xfered = 0;
00363 req->status = -STATUS_IN_PROGRESS;
00364
00365 cpu_irq_disable();
00366 if (likely(test_bit(DMACA_CHAN_ENABLED, &chan->flags))) {
00367 slist_insert_tail(&chan->req_queue, &req->node);
00368 slist_borrow_to_tail(&chan->buf_queue, &req->buf_list);
00369 if (!test_bit(DMACA_CHAN_ACTIVE, &chan->flags)) {
00370 dmaca_controller_get();
00371 dmaca_chan_process_queue(chan);
00372 }
00373 } else {
00374 queued = false;
00375 }
00376 cpu_irq_enable();
00377
00378 if (!queued)
00379 dmaca_chan_req_done(chan, req, -STATUS_IO_ERROR);
00380 }
00381
00382 static void dmaca_chan_all_done(struct dmaca_channel *chan)
00383 {
00384 struct dmac_request *req;
00385
00386 assert(!dmaca_test_chan_bit(chan, CH_EN));
00387
00388 dmaca_write_reg(CLEAR_TFR, chan->mask);
00389 dmaca_write_reg(CLEAR_BLOCK, chan->mask);
00390
00391 dmaca_clear_chan_bit(chan, MASK_TFR);
00392 dmaca_clear_chan_bit(chan, MASK_BLOCK);
00393
00394 dbg_printf("dmaca ch%u: all done\n", dmaca_chan_id(chan));
00395
00396
00397 while (!slist_is_empty(&chan->req_queue)) {
00398 struct buffer *buf;
00399 size_t bytes_xfered = 0;
00400
00401 req = dmaca_chan_peek_head_req(chan);
00402 do {
00403 buf = dmaca_chan_peek_head_buf(chan);
00404 if (!buf->dma_desc) {
00405 req->bytes_xfered += bytes_xfered;
00406 goto done;
00407 }
00408
00409 slist_pop_head_node(&chan->buf_queue);
00410 bytes_xfered += buf->len;
00411 dmaca_chan_buf_done(chan, buf);
00412 } while (&buf->node != req->buf_list.last);
00413
00414 dmaca_chan_pop_head_req(chan);
00415 req->bytes_xfered += bytes_xfered;
00416 dmaca_chan_req_done(chan, req, 0);
00417 }
00418
00419 done:
00420 clear_bit(DMACA_CHAN_ACTIVE, &chan->flags);
00421 dmaca_chan_process_queue(chan);
00422 }
00423
00424 static void dmaca_chan_scan_queue(struct dmaca_channel *chan)
00425 {
00426 struct dmac_request *req;
00427 size_t bytes_xfered;
00428 phys_addr_t llp;
00429
00430 dmaca_write_reg(CLEAR_BLOCK, chan->mask);
00431
00432 llp = dmaca_chan_read_reg(chan, LLP);
00433
00434
00435 if (!dmaca_test_chan_bit(chan, CH_EN)) {
00436 dmaca_chan_all_done(chan);
00437 return;
00438 }
00439
00440 dbg_printf("dmaca ch%u scan queue: LLP=%08lx\n",
00441 dmaca_chan_id(chan), llp);
00442
00443 assert(!slist_is_empty(&chan->req_queue));
00444 assert(!slist_is_empty(&chan->buf_queue));
00445
00446 req = dmaca_chan_peek_head_req(chan);
00447 bytes_xfered = 0;
00448 while (1) {
00449 struct dmaca_hw_desc *desc;
00450 struct buffer *buf;
00451 struct buffer *buf_next;
00452
00453 buf = dmaca_chan_peek_head_buf(chan);
00454 if (slist_node_is_last(&chan->buf_queue, &buf->node))
00455 break;
00456
00457 buf_next = slist_entry(&buf->node.next, struct buffer, node);
00458 if (!buf_next->dma_desc)
00459 break;
00460
00461 desc = buf->dma_desc;
00462 if (desc->llp == llp)
00463 break;
00464
00465 slist_pop_head_node(&chan->buf_queue);
00466 req->bytes_xfered += buf->len;
00467 dmaca_chan_buf_done(chan, buf);
00468
00469 if (slist_node_is_last(&req->buf_list, &buf->node)) {
00470 slist_pop_head_node(&chan->req_queue);
00471 dmaca_chan_req_done(chan, req, 0);
00472 req = dmaca_chan_peek_head_req(chan);
00473 }
00474 }
00475 }
00476
00477 static void dmaca_chan_error(struct dmaca_channel *chan)
00478 {
00479 struct dmac_request *req;
00480
00481 dbg_printf("dmaca ch%u error\n", dmaca_chan_id(chan));
00482 dbg_printf(" SAR %08x DAR %08x\n", dmaca_chan_read_reg(chan, SAR),
00483 dmaca_chan_read_reg(chan, DAR));
00484
00485
00486 dmaca_chan_scan_queue(chan);
00487
00488 assert(!slist_is_empty(&chan->req_queue));
00489 assert(!slist_is_empty(&chan->buf_queue));
00490 assert(!dmaca_test_chan_bit(chan, CH_EN));
00491
00492
00493 req = dmaca_chan_pop_head_req(chan);
00494
00495
00496 dmaca_write_reg(CLEAR_TFR, chan->mask);
00497 dmaca_write_reg(CLEAR_BLOCK, chan->mask);
00498 dmaca_write_reg(CLEAR_ERR, chan->mask);
00499 dmaca_clear_chan_bit(chan, MASK_TFR);
00500 dmaca_clear_chan_bit(chan, MASK_BLOCK);
00501 dmaca_clear_chan_bit(chan, MASK_ERR);
00502
00503
00504 clear_bit(DMACA_CHAN_ACTIVE, &chan->flags);
00505
00506
00507
00508
00509
00510 dmaca_chan_req_done(chan, req, -STATUS_BAD_ADDRESS);
00511
00512 dbg_printf("dmaca ch%u error recovery complete\n", dmaca_chan_id(chan));
00513 }
00514
00515 static void dmaca_interrupt(void *data)
00516 {
00517 unsigned int i;
00518 uint32_t status_tfr;
00519 uint32_t status_block;
00520 uint32_t status_err;
00521
00522 status_block = dmaca_read_reg(STATUS_BLOCK);
00523 status_tfr = dmaca_read_reg(STATUS_TFR);
00524 status_err = dmaca_read_reg(STATUS_ERR);
00525
00526 dbg_printf("dmaca interrupt: %x/%x/%x\n", status_block,
00527 status_tfr, status_err);
00528
00529 for (i = 0; i < NR_CHANNELS; i++) {
00530 struct dmaca_channel *chan;
00531
00532 chan = &dmaca_channel[i];
00533 if (status_err & chan->mask)
00534 dmaca_chan_error(chan);
00535 else if (status_tfr & chan->mask)
00536 dmaca_chan_all_done(chan);
00537 else if (status_block & chan->mask)
00538 dmaca_chan_scan_queue(chan);
00539 }
00540 }
00541 DEFINE_IRQ_HANDLER(dmaca, dmaca_interrupt, 0);
00542
00543 static void dmaca_chan_init(struct dmaca_channel *chan, unsigned int index,
00544 enum dmac_periph_id rx_periph, enum dmac_periph_id tx_periph,
00545 phys_addr_t rx_reg_addr, phys_addr_t tx_reg_addr)
00546 {
00547 uint32_t cfgh;
00548
00549 build_assert(NR_CHANNELS < 8);
00550 assert(index < NR_CHANNELS);
00551 assert(rx_periph != DMAC_PERIPH_NONE || tx_periph != DMAC_PERIPH_NONE);
00552
00553 slist_init(&chan->req_queue);
00554 slist_init(&chan->buf_queue);
00555 chan->regs = (void *)(DMACA_BASE + index * DMACA_CHAN_REGS_SIZE);
00556 chan->mask = 1 << index;
00557 chan->rx_reg_addr = rx_reg_addr;
00558 chan->tx_reg_addr = tx_reg_addr;
00559
00560 chan->dch.submit_req = dmaca_chan_submit_req;
00561 chan->dch.reset = dmaca_chan_reset;
00562 chan->dch.max_buffer_size = 2048;
00563
00564 cfgh = 0;
00565 if (rx_periph != DMAC_PERIPH_NONE)
00566 cfgh |= DMACA_CFGH_SRC_PER(dmaca_get_periph_id(rx_periph));
00567 if (tx_periph != DMAC_PERIPH_NONE)
00568 cfgh |= DMACA_CFGH_DST_PER(dmaca_get_periph_id(tx_periph));
00569
00570 dmaca_chan_write_reg(chan, LLP, 0);
00571 dmaca_chan_write_reg(chan, CTLL, 0);
00572 dmaca_chan_write_reg(chan, CTLH, 0);
00573 dmaca_chan_write_reg(chan, CFGL, 0);
00574 dmaca_chan_write_reg(chan, CFGH, cfgh);
00575
00576 set_bit(DMACA_CHAN_ENABLED, &chan->flags);
00577 };
00578
00579 struct dmac_channel *dmaca_alloc_channel(struct dma_controller *dmac,
00580 enum dmac_periph_id rx_periph, enum dmac_periph_id tx_periph,
00581 phys_addr_t rx_reg_addr, phys_addr_t tx_reg_addr)
00582 {
00583 struct dmaca_channel *chan;
00584 unsigned int i;
00585
00586 dbg_printf("dmaca alloc_channel: %u[%08lx]/%u[%08lx]\n",
00587 rx_periph, rx_reg_addr, tx_periph, tx_reg_addr);
00588
00589 assert(cpu_irq_is_enabled());
00590
00591 for (i = 0; i < ARRAY_LEN(dmaca_channel); i++) {
00592 chan = &dmaca_channel[i];
00593
00594 dbg_printf(" ch%u flags: 0x%lx\n", i, chan->flags);
00595 if (!atomic_test_and_set_bit(DMACA_CHAN_ALLOCATED,
00596 &chan->flags)) {
00597 dmaca_chan_init(chan, i, rx_periph, tx_periph,
00598 rx_reg_addr, tx_reg_addr);
00599 break;
00600 }
00601 chan = NULL;
00602 }
00603
00604 return chan ? &chan->dch : NULL;
00605 }
00606
00607 void dmaca_free_channel(struct dma_controller *dmac,
00608 struct dmac_channel *dch)
00609 {
00610 struct dmaca_channel *chan = dmaca_channel_of(dch);
00611
00612
00613 assert(slist_is_empty(&chan->req_queue));
00614 assert(slist_is_empty(&chan->buf_queue));
00615 assert(test_bit(DMACA_CHAN_ALLOCATED, &chan->flags));
00616 assert(test_bit(DMACA_CHAN_ENABLED, &chan->flags));
00617 assert(!test_bit(DMACA_CHAN_ACTIVE, &chan->flags));
00618
00619 atomic_clear_bit(DMACA_CHAN_ENABLED, &chan->flags);
00620 atomic_clear_bit(DMACA_CHAN_ALLOCATED, &chan->flags);
00621 }
00622
00623 struct dma_controller dmaca_controller = {
00624 .alloc_chan = dmaca_alloc_channel,
00625 .free_chan = dmaca_free_channel,
00626 };
00627
00628 void dmaca_init(void)
00629 {
00630 unsigned int i;
00631
00632
00633
00634
00635
00636
00637
00638
00639
00640
00641 for (i = 0; i < ARRAY_LEN(dmaca_channel); i++) {
00642 struct dmaca_channel *chan;
00643
00644 chan = &dmaca_channel[i];
00645 dma_pool_init_coherent_physmem(&chan->desc_pool,
00646 &dmaca_desc_physmem_pool,
00647 CONFIG_DMACA_NR_DESCRIPTORS,
00648 sizeof(struct dmaca_hw_desc), 2);
00649 }
00650
00651 setup_irq_handler(DMACA_IRQ, dmaca, 0, &dmaca_controller);
00652 }