00001
00041 #include <assert.h>
00042 #include <bitops.h>
00043 #include <buffer.h>
00044 #include <interrupt.h>
00045 #include <io.h>
00046 #include <irq_handler.h>
00047 #include <status-codes.h>
00048 #include <string.h>
00049 #include <chip/memory-map.h>
00050 #include <chip/irq-map.h>
00051 #include <dmac/dma_controller.h>
00052 #include <dmac/pdca.h>
00053
00054 #include "dmac_util.h"
00055 #include "pdca_regs.h"
00056
00057 #ifdef CONFIG_PDCA_NR_CHANNELS
00058 # if CONFIG_PDCA_NR_CHANNELS > CHIP_PDCA_NR_CHANNELS
00059 # error Too many PDCA channels requested
00060 # endif
00061 # define NR_CHANNELS CONFIG_PDCA_NR_CHANNELS
00062 #else
00063 # define NR_CHANNELS CHIP_PDCA_NR_CHANNELS
00064 #endif
00065
00069 enum pdca_chan_flag {
00070 PDCA_CHAN_ALLOCATED,
00071 PDCA_CHAN_ENABLED,
00072 };
00073
00074 struct pdca_channel {
00075 struct slist req_queue;
00076 struct buffer *current_buf;
00077 struct buffer *next_buf;
00078 void *regs;
00079 unsigned int rx_pid;
00080 unsigned int tx_pid;
00081 unsigned long flags;
00082 struct dmac_channel dch;
00083 };
00084
00085 static struct pdca_channel pdca_channel[NR_CHANNELS];
00086
00092 static struct pdca_channel *pdca_channel_of(struct dmac_channel *ch)
00093 {
00094 return container_of(ch, struct pdca_channel, dch);
00095 }
00096
00097 static unsigned int pdca_chan_id(struct pdca_channel *chan)
00098 {
00099
00100 return ((unsigned int)chan->regs >> 6) & 0x1f;
00101 }
00102
00103 static void pdca_chan_idle(struct pdca_channel *chan)
00104 {
00105 pdca_chan_write_reg(chan, IDR, ~0UL);
00106 pdca_chan_write_reg(chan, CR, PDCA_CR_TDIS | PDCA_CR_ECLR);
00107 }
00108
00109 static void pdca_chan_stop(struct pdca_channel *chan)
00110 {
00111 pdca_chan_write_reg(chan, TCRR, 0);
00112 pdca_chan_write_reg(chan, TCR, 0);
00113 pdca_chan_idle(chan);
00114 }
00115
00116 static void pdca_chan_req_done(struct pdca_channel *chan,
00117 struct dmac_request *req, int status)
00118 {
00119 req->status = status;
00120 if (req->req_done)
00121 req->req_done(&chan->dch, req);
00122 }
00123
00124 static void pdca_chan_error(struct pdca_channel *chan, struct buffer *buf)
00125 {
00126 struct dmac_request *req;
00127
00128 dbg_printf("pdca ch%u error: MAR %08x TCR %u\n",
00129 pdca_chan_id(chan), pdca_chan_read_reg(chan, MAR),
00130 pdca_chan_read_reg(chan, TCR));
00131
00132
00133
00134
00135
00136
00137 pdca_chan_stop(chan);
00138
00139 req = slist_pop_head(&chan->req_queue, struct dmac_request, node);
00140 pdca_chan_req_done(chan, req, -STATUS_BAD_ADDRESS);
00141 }
00142
00143 static void pdca_chan_interrupt(struct pdca_channel *chan)
00144 {
00145 struct dmac_request *req;
00146 struct buffer *buf;
00147 struct buffer *buf_next;
00148 uint32_t status;
00149
00150 assert(!slist_is_empty(&chan->req_queue));
00151
00152 status = pdca_chan_read_reg(chan, ISR);
00153 req = slist_peek_head(&chan->req_queue, struct dmac_request, node);
00154 buf = chan->current_buf;
00155 buf_next = chan->next_buf;
00156
00157
00158 if (buf) {
00159
00160
00161
00162
00163 if (buf_next && (status & PDCA_INT_RCZ)) {
00164 req->bytes_xfered += buf->len;
00165 buf = buf_next;
00166 buf_next = NULL;
00167 }
00168
00169
00170
00171
00172
00173 if (status & PDCA_INT_TERR) {
00174 assert(!(status & PDCA_INT_TRC));
00175 pdca_chan_error(chan, buf);
00176 return;
00177 }
00178
00179
00180 if (status & PDCA_INT_TRC) {
00181 req->bytes_xfered += buf->len;
00182 if (slist_node_is_last(&req->buf_list, &buf->node)) {
00183
00184 slist_pop_head_node(&chan->req_queue);
00185 pdca_chan_req_done(chan, req, STATUS_OK);
00186 req = slist_peek_head(&chan->req_queue,
00187 struct dmac_request, node);
00188 buf = NULL;
00189 } else {
00190 buf = slist_entry(buf->node.next,
00191 struct buffer, node);
00192 }
00193 }
00194 }
00195
00196 if (slist_is_empty(&chan->req_queue)) {
00197 pdca_chan_idle(chan);
00198 goto out;
00199 }
00200
00201 if (!buf) {
00202 assert(req->reg_width < 2);
00203
00204
00205
00206
00207 switch (req->direction) {
00208 case DMA_FROM_DEVICE:
00209 assert(chan->rx_pid < PDCA_NR_PERIPH_IDS);
00210 pdca_chan_write_reg(chan, PSR,
00211 PDCA_PSR_PID(chan->rx_pid));
00212 break;
00213 case DMA_TO_DEVICE:
00214 assert(chan->tx_pid < PDCA_NR_PERIPH_IDS);
00215 pdca_chan_write_reg(chan, PSR,
00216 PDCA_PSR_PID(chan->tx_pid));
00217 break;
00218 default:
00219 unhandled_case(req->direction);
00220 break;
00221 }
00222
00223 pdca_chan_write_reg(chan, MR, PDCA_MR_SIZE(req->reg_width));
00224 buf = slist_peek_head(&req->buf_list, struct buffer, node);
00225 }
00226
00227
00228 assert(status & PDCA_INT_RCZ);
00229
00230 if (status & PDCA_INT_TRC) {
00231 pdca_chan_write_reg(chan, MAR, buf->addr.phys);
00232 pdca_chan_write_reg(chan, TCR, buf->len >> req->reg_width);
00233 }
00234
00235 if (!slist_node_is_last(&req->buf_list, &buf->node)) {
00236 buf_next = slist_entry(buf->node.next, struct buffer, node);
00237 pdca_chan_write_reg(chan, MARR, buf_next->addr.phys);
00238 pdca_chan_write_reg(chan, TCRR,
00239 buf_next->len >> req->reg_width);
00240 pdca_chan_write_reg(chan, IER, PDCA_INT_RCZ);
00241 } else {
00242
00243
00244
00245
00246 assert(!buf_next);
00247 pdca_chan_write_reg(chan, IDR, PDCA_INT_RCZ);
00248 }
00249
00250 out:
00251 chan->current_buf = buf;
00252 chan->next_buf = buf_next;
00253 }
00254
00255 static void pdca_interrupt(void *data)
00256 {
00257 unsigned int chan_id;
00258 unsigned long pending_mask;
00259
00260 pending_mask = get_irq_group_requests(PDCA_IRQ);
00261 assert(pending_mask);
00262
00263 while (pending_mask) {
00264 chan_id = __fls(pending_mask);
00265 clear_bit(chan_id, &pending_mask);
00266 pdca_chan_interrupt(&pdca_channel[chan_id]);
00267 }
00268 }
00269 DEFINE_IRQ_HANDLER(pdca, pdca_interrupt, 0);
00270
00271 static void pdca_chan_submit_req(struct dmac_channel *dch,
00272 struct dmac_request *req)
00273 {
00274 struct pdca_channel *chan = pdca_channel_of(dch);
00275 bool queued = true;
00276
00277
00278 assert(cpu_irq_is_enabled());
00279 assert(test_bit(PDCA_CHAN_ALLOCATED, &chan->flags));
00280 dmac_verify_req(req);
00281
00282
00283 req->bytes_xfered = 0;
00284 req->status = -STATUS_IN_PROGRESS;
00285
00286
00287
00288
00289
00290 cpu_irq_disable();
00291 if (likely(test_bit(PDCA_CHAN_ENABLED, &chan->flags))) {
00292 slist_insert_tail(&chan->req_queue, &req->node);
00293 pdca_chan_write_reg(chan, CR, PDCA_CR_TEN);
00294 pdca_chan_write_reg(chan, IER, PDCA_INT_TRC | PDCA_INT_TERR);
00295 } else {
00296 queued = false;
00297 }
00298 cpu_irq_enable();
00299
00300
00301 if (!queued)
00302 pdca_chan_req_done(chan, req, -STATUS_IO_ERROR);
00303 }
00304
00305 static void pdca_chan_reset(struct dmac_channel *dch)
00306 {
00307 struct pdca_channel *chan = pdca_channel_of(dch);
00308 struct dmac_request *req;
00309
00310 assert(cpu_irq_is_enabled());
00311
00312
00313 cpu_irq_disable();
00314 clear_bit(PDCA_CHAN_ENABLED, &chan->flags);
00315 pdca_chan_stop(chan);
00316 cpu_irq_enable();
00317
00318 chan->current_buf = NULL;
00319 chan->next_buf = NULL;
00320
00321
00322 while (!slist_is_empty(&chan->req_queue)) {
00323 req = slist_pop_head(&chan->req_queue,
00324 struct dmac_request, node);
00325 pdca_chan_req_done(chan, req, -STATUS_IO_ERROR);
00326 }
00327
00328
00329 set_bit(PDCA_CHAN_ENABLED, &chan->flags);
00330 }
00331
00332 static void pdca_chan_init(struct pdca_channel *chan, unsigned int index,
00333 enum dmac_periph_id rx_periph, enum dmac_periph_id tx_periph)
00334 {
00335 build_assert(NR_CHANNELS < 32);
00336 assert(test_bit(PDCA_CHAN_ALLOCATED, &chan->flags));
00337 assert(index < NR_CHANNELS);
00338 assert(rx_periph != DMAC_PERIPH_NONE || tx_periph != DMAC_PERIPH_NONE);
00339
00340
00341 chan->current_buf = NULL;
00342 chan->next_buf = NULL;
00343
00344
00345
00346
00347
00348 chan->flags = 1 << PDCA_CHAN_ALLOCATED;
00349
00350
00351 slist_init(&chan->req_queue);
00352 chan->regs = (void *)(PDCA_BASE + index * PDCA_CHAN_REGS_SIZE);
00353 chan->rx_pid = pdca_get_periph_id(rx_periph);
00354 chan->tx_pid = pdca_get_periph_id(tx_periph);
00355
00356
00357 chan->dch.submit_req = pdca_chan_submit_req;
00358 chan->dch.reset = pdca_chan_reset;
00359 chan->dch.max_buffer_size = 32768;
00360
00361
00362 pdca_chan_write_reg(chan, IDR, ~0UL);
00363 pdca_chan_write_reg(chan, CR, PDCA_CR_ECLR | PDCA_CR_TDIS);
00364
00365
00366 set_bit(PDCA_CHAN_ENABLED, &chan->flags);
00367 }
00368
00369 struct dmac_channel *pdca_alloc_channel(struct dma_controller *dmac,
00370 enum dmac_periph_id rx_periph, enum dmac_periph_id tx_periph,
00371 phys_addr_t rx_reg_addr, phys_addr_t tx_reg_addr)
00372 {
00373 struct pdca_channel *chan;
00374 unsigned int i;
00375
00376
00377 for (i = 0; i < ARRAY_LEN(pdca_channel); i++) {
00378 chan = &pdca_channel[i];
00379
00380 if (!atomic_test_and_set_bit(PDCA_CHAN_ALLOCATED,
00381 &chan->flags)) {
00382
00383 pdca_chan_init(chan, i, rx_periph, tx_periph);
00384 break;
00385 }
00386 chan = NULL;
00387 }
00388
00389 return chan ? &chan->dch : NULL;
00390 }
00391
00392 void pdca_free_channel(struct dma_controller *dmac,
00393 struct dmac_channel *dch)
00394 {
00395 struct pdca_channel *chan = pdca_channel_of(dch);
00396
00397
00398 assert(slist_is_empty(&chan->req_queue));
00399 assert(test_bit(PDCA_CHAN_ALLOCATED, &chan->flags));
00400 assert(test_bit(PDCA_CHAN_ENABLED, &chan->flags));
00401 assert(!(pdca_chan_read_reg(chan, SR) & PDCA_SR_TEN));
00402
00403
00404 atomic_clear_bit(PDCA_CHAN_ENABLED, &chan->flags);
00405 atomic_clear_bit(PDCA_CHAN_ALLOCATED, &chan->flags);
00406 }
00407
00408 struct dma_controller pdca_controller = {
00409 .alloc_chan = pdca_alloc_channel,
00410 .free_chan = pdca_free_channel,
00411 };
00412
00413 void pdca_init(void)
00414 {
00415 setup_irq_handler(PDCA_IRQ, pdca, 0, &pdca_controller);
00416 }