00001
00044 #include <buffer.h>
00045 #include <debug.h>
00046 #include <malloc.h>
00047 #include <string.h>
00048 #include <types.h>
00049 #include <workqueue.h>
00050 #include <app/config_dmapool.h>
00051 #include <app/workqueue.h>
00052 #include <block/device.h>
00053 #include <block/fill.h>
00054
00055 #define BLOCK_FILL_BUF_SIZE CONFIG_DMAPOOL_LARGE_OBJ_SIZE
00056 #define BLOCK_FILL_MAX_BUFFERS (CONFIG_DMAPOOL_NR_LARGE_OBJS / 2)
00057
00058 struct block_fill_request {
00059 uint32_t blocks_remaining;
00060 uint32_t blocks_allocated;
00061 uint32_t nr_blocks;
00062 block_addr_t lba;
00063 struct block_device *bdev;
00064 struct block_request *breq;
00065 struct workqueue_item work;
00066 block_fill_callback_t completion_callback;
00067 void *context;
00068 };
00069
00070 static void block_fill_complete(struct block_fill_request *req, int status)
00071 {
00072 if (req->completion_callback)
00073 req->completion_callback(req, status,
00074 req->nr_blocks - req->blocks_remaining,
00075 req->context);
00076 }
00077
00078 static void block_fill_error(void *data)
00079 {
00080 struct block_fill_request *req = data;
00081 struct block_request *breq = req->breq;
00082
00083 req->blocks_remaining -= breq->bytes_xfered / req->bdev->block_size;
00084 block_fill_complete(req, breq->status);
00085 }
00086
00087 static void block_fill_worker(void *data)
00088 {
00089 struct block_fill_request *req = data;
00090 struct block_request *breq = req->breq;
00091 uint32_t blocks_remaining;
00092 uint32_t blocks_allocated;
00093 struct slist buf_list;
00094
00095 blocks_remaining = req->blocks_remaining;
00096 blocks_allocated = req->blocks_allocated;
00097 assert(blocks_remaining >= blocks_allocated);
00098 blocks_remaining -= blocks_allocated;
00099 req->blocks_remaining = blocks_remaining;
00100
00101 dbg_verbose("block_fill: %u / %u blocks done\n",
00102 req->nr_blocks - blocks_remaining, req->nr_blocks);
00103
00104 if (blocks_remaining == 0) {
00105 block_fill_complete(req, 0);
00106 return;
00107 }
00108
00109 req->lba += blocks_allocated;
00110
00111 slist_init(&buf_list);
00112 slist_move_to_tail(&buf_list, &breq->buf_list);
00113 if (blocks_remaining < blocks_allocated) {
00114 unsigned int blocks_per_buf;
00115 unsigned int block_size;
00116
00117 block_size = req->bdev->block_size;
00118 blocks_per_buf = BLOCK_FILL_BUF_SIZE / block_size;
00119
00120
00121 while (blocks_allocated - blocks_per_buf >= blocks_remaining) {
00122 struct buffer *buf;
00123
00124 buf = slist_pop_head(&buf_list, struct buffer, node);
00125 buffer_dma_free(buf, BLOCK_FILL_BUF_SIZE);
00126 blocks_allocated -= blocks_per_buf;
00127 }
00128
00129
00130 if (blocks_allocated > blocks_remaining) {
00131 struct buffer *buf;
00132
00133 buf = slist_peek_tail(&buf_list, struct buffer, node);
00134 buffer_resize(buf, BLOCK_FILL_BUF_SIZE
00135 - block_size * (blocks_allocated
00136 - blocks_remaining));
00137 }
00138
00139 req->blocks_allocated = blocks_allocated;
00140 }
00141
00142 block_prepare_req(req->bdev, req->breq, req->lba, blocks_allocated,
00143 BLK_OP_WRITE);
00144 slist_move_to_tail(&req->breq->buf_list, &buf_list);
00145 block_submit_req(req->bdev, req->breq);
00146 }
00147
00148 static void block_fill_write_done(struct block_device *bdev,
00149 struct block_request *breq)
00150 {
00151 struct block_fill_request *req = breq->context;
00152
00153 assert(bdev == req->bdev);
00154 assert(breq == req->breq);
00155
00156
00157 if (breq->status)
00158 workqueue_init_item(&req->work, block_fill_error, req);
00159
00160 workqueue_add_item(&block_workqueue, &req->work);
00161 }
00162
00163 struct block_fill_request *block_fill_alloc(struct block_device *bdev,
00164 block_addr_t first_lba, uint32_t nr_blocks, void *context)
00165 {
00166 struct block_fill_request *req;
00167 struct block_request *breq;
00168 unsigned int block_size;
00169 uint32_t blocks_allocated;
00170 uint32_t blocks_remaining;
00171 struct slist buf_list;
00172 unsigned int i;
00173
00174 block_size = bdev->block_size;
00175
00176
00177
00178
00179
00180 if (BLOCK_FILL_BUF_SIZE < block_size)
00181 return NULL;
00182 if (BLOCK_FILL_BUF_SIZE % block_size)
00183 return NULL;
00184
00185 req = malloc(sizeof(struct block_fill_request));
00186 if (unlikely(!req))
00187 return NULL;
00188 memset(req, 0, sizeof(struct block_fill_request));
00189
00190 breq = block_alloc_request(bdev);
00191 if (unlikely(!breq))
00192 goto err_alloc_breq;
00193
00194
00195
00196
00197
00198 slist_init(&buf_list);
00199 blocks_remaining = nr_blocks;
00200 for (i = 0; i < BLOCK_FILL_MAX_BUFFERS; i++) {
00201 struct buffer *buf;
00202 uint32_t buf_blocks;
00203
00204 buf = buffer_dma_alloc(BLOCK_FILL_BUF_SIZE);
00205 if (!buf)
00206 break;
00207
00208 buf_blocks = BLOCK_FILL_BUF_SIZE / block_size;
00209 slist_insert_tail(&buf_list, &buf->node);
00210 if (buf_blocks >= blocks_remaining) {
00211 if (buf_blocks > blocks_remaining) {
00212 buf_blocks = blocks_remaining;
00213 buffer_resize(buf, buf_blocks * block_size);
00214 }
00215 blocks_remaining = 0;
00216 break;
00217 }
00218
00219 blocks_remaining -= buf_blocks;
00220 }
00221
00222 blocks_allocated = nr_blocks - blocks_remaining;
00223 if (unlikely(!blocks_allocated))
00224
00225 goto err_alloc_buf;
00226
00227
00228
00229
00230
00231 block_prepare_req(bdev, breq, first_lba, blocks_allocated,
00232 BLK_OP_WRITE);
00233 breq->req_done = block_fill_write_done;
00234 breq->context = req;
00235 slist_move_to_tail(&breq->buf_list, &buf_list);
00236
00237 req->bdev = bdev;
00238 req->breq = breq;
00239 req->lba = first_lba;
00240 req->nr_blocks = nr_blocks;
00241 req->blocks_remaining = nr_blocks;
00242 req->blocks_allocated = blocks_allocated;
00243 req->context = context;
00244
00245 return req;
00246
00247 err_alloc_buf:
00248 block_free_request(bdev, breq);
00249 err_alloc_breq:
00250 free(req);
00251 return NULL;
00252 }
00253
00254 void block_fill_free(struct block_fill_request *req)
00255 {
00256 while (!slist_is_empty(&req->breq->buf_list)) {
00257 struct buffer *buf;
00258
00259 buf = slist_pop_head(&req->breq->buf_list, struct buffer, node);
00260
00261
00262 buffer_dma_free(buf, BLOCK_FILL_BUF_SIZE);
00263 }
00264
00265 block_free_request(req->bdev, req->breq);
00266 free(req);
00267 }
00268
00269 void block_fill_set_byte_pattern(struct block_fill_request *req,
00270 uint8_t pattern)
00271 {
00272 struct buffer *buf;
00273
00274 assert(req);
00275 assert(!slist_is_empty(&req->breq->buf_list));
00276
00277 blk_req_for_each_buffer(req->breq, buf)
00278 memset(buf->addr.ptr, pattern, buf->len);
00279 }
00280
00281 void block_fill_set_completion_callback(struct block_fill_request *req,
00282 block_fill_callback_t callback)
00283 {
00284 assert(req);
00285
00286 req->completion_callback = callback;
00287 }
00288
00289 void block_fill_submit(struct block_fill_request *req)
00290 {
00291 assert(req);
00292 assert(req->bdev);
00293 assert(req->breq);
00294 assert(!slist_is_empty(&req->breq->buf_list));
00295
00296 workqueue_init_item(&req->work, block_fill_worker, req);
00297
00298 block_submit_req(req->bdev, req->breq);
00299 }