00001
00052 #include <assert.h>
00053 #include <atomic.h>
00054 #include <byteorder.h>
00055 #include <dmapool.h>
00056 #include <interrupt.h>
00057 #include <physmem.h>
00058 #include <status-codes.h>
00059 #include <string.h>
00060 #include <types.h>
00061 #include <util.h>
00062 #include <block/device.h>
00063 #include <scsi/cdb.h>
00064 #include <scsi/spc_protocol.h>
00065 #include <scsi/sbc_protocol.h>
00066 #include <usb/function_core.h>
00067 #include <usb/msc_function.h>
00068 #include <usb/msc_protocol.h>
00069 #include <usb/request.h>
00070 #include <usb/usb_protocol.h>
00071 #include <usb/udc.h>
00072
00073 #include <app/config_usb.h>
00074
00075 #ifndef CONFIG_MSC_FS_BULK_EP_SIZE
00076 # define CONFIG_MSC_FS_BULK_EP_SIZE 64
00077 #endif
00078
00079 #define MSC_DATA_BUFFER_SIZE CONFIG_DMAPOOL_LARGE_OBJ_SIZE
00080 #define MSC_MAX_NR_BUFFERS (CONFIG_DMAPOOL_NR_LARGE_OBJS / 8)
00081 #define MSC_MAX_DATA_LEN (MSC_MAX_NR_BUFFERS * MSC_DATA_BUFFER_SIZE)
00082
00084 #ifdef CONFIG_USE_AES
00085
00086
00087 # define MSC_MAX_NR_SEGS (1)
00088 #else
00089 # define MSC_MAX_NR_SEGS (2)
00090 #endif
00091
00092
00093 #define MSC_VPD_SERIAL_BUF_SIZE (MSC_MAX_SERIAL_LEN + SCSI_VPD_HEADER_SIZE)
00094
00095
00096
00097
00098
00099
00100
00101
00102
00103 #define MSC_VPD_ID_BUF_SIZE (SCSI_VPD_HEADER_SIZE \
00104 + SCSI_VPD_ID_HEADER_SIZE \
00105 + 8 + 16 \
00106 + MSC_MAX_SERIAL_LEN)
00107
00108 struct msc_bulk_iface_block {
00109 struct usb_interface_descriptor iface;
00110 struct usb_endpoint_descriptor in_ep;
00111 struct usb_endpoint_descriptor out_ep;
00112 };
00113
00118 struct msc_func {
00120 struct usb_func_iface iface;
00122 struct udc *udc;
00124 struct block_device *bdev;
00126 usb_ep_id_t bulk_in_ep;
00128 usb_ep_id_t bulk_out_ep;
00130 uint8_t *sense_data;
00132 phys_addr_t sense_data_phys;
00134 uint32_t blocks_queued;
00136 uint32_t blocks_total;
00138 uint32_t first_lba;
00140 atomic_t blk_segs_pending;
00142 atomic_t usb_reqs_pending;
00144 struct usb_msc_cbw *cbw;
00146 struct usb_msc_csw *csw;
00148 struct usb_request *cbw_csw_req;
00150 struct block_request *block_req;
00152 uint16_t busy_asc;
00154 void (*busy_cb)(void *);
00156 void *busy_cb_data;
00158 bool queue_locked;
00160 bool not_ready;
00162 bool xfer_in_progress;
00163 };
00164
00165 static inline struct msc_func *msc_func_of(struct usb_func_iface *iface)
00166 {
00167 return container_of(iface, struct msc_func, iface);
00168 }
00169
00170 union msc_mode_param_header {
00171 struct scsi_mode_param_header6 h6;
00172 struct scsi_mode_param_header10 h10;
00173 };
00174 #define MSC_MODE_PARAM_HDR_BUF_LEN \
00175 (sizeof(union msc_mode_param_header) \
00176 + sizeof(struct sbc_slba_block_desc))
00177
00178 static const struct msc_bulk_iface_block msc_bulk_fs_iface = {
00179 .iface = {
00180 .bLength = sizeof(struct usb_interface_descriptor),
00181 .bDescriptorType = USB_DT_INTERFACE,
00182 .bInterfaceNumber = CONFIG_MSC_BULK_INTERFACE_ID,
00183 .bAlternateSetting = 0,
00184 .bNumEndpoints = 2,
00185 .bInterfaceClass = USB_INTERFACE_CLASS_MSC,
00186
00187 .bInterfaceSubClass = USB_MSC_SUBCLASS_TRANSPARENT,
00188
00189 .bInterfaceProtocol = USB_MSC_PROTOCOL_BULK,
00190 },
00191 .in_ep = {
00192 .bLength = sizeof(struct usb_endpoint_descriptor),
00193 .bDescriptorType = USB_DT_ENDPOINT,
00194 .bEndpointAddress = USB_DIR_IN | CONFIG_MSC_BULK_IN_EP,
00195 .bmAttributes = USB_EP_XFER_BULK,
00196 .wMaxPacketSize = LE16(CONFIG_MSC_FS_BULK_EP_SIZE),
00197 },
00198 .out_ep = {
00199 .bLength = sizeof(struct usb_endpoint_descriptor),
00200 .bDescriptorType = USB_DT_ENDPOINT,
00201 .bEndpointAddress = USB_DIR_OUT | CONFIG_MSC_BULK_OUT_EP,
00202 .bmAttributes = USB_EP_XFER_BULK,
00203 .wMaxPacketSize = LE16(CONFIG_MSC_FS_BULK_EP_SIZE),
00204 },
00205 };
00206
00207 #ifdef CONFIG_UDC_HIGH_SPEED
00208 static const struct msc_bulk_iface_block msc_bulk_hs_iface = {
00209 .iface = {
00210 .bLength = sizeof(struct usb_interface_descriptor),
00211 .bDescriptorType = USB_DT_INTERFACE,
00212 .bInterfaceNumber = CONFIG_MSC_BULK_INTERFACE_ID,
00213 .bAlternateSetting = 0,
00214 .bNumEndpoints = 2,
00215 .bInterfaceClass = USB_INTERFACE_CLASS_MSC,
00216
00217 .bInterfaceSubClass = USB_MSC_SUBCLASS_TRANSPARENT,
00218
00219 .bInterfaceProtocol = USB_MSC_PROTOCOL_BULK,
00220 },
00221 .in_ep = {
00222 .bLength = sizeof(struct usb_endpoint_descriptor),
00223 .bDescriptorType = USB_DT_ENDPOINT,
00224 .bEndpointAddress = USB_DIR_IN | CONFIG_MSC_BULK_IN_EP,
00225 .bmAttributes = USB_EP_XFER_BULK,
00226 .wMaxPacketSize = LE16(512),
00227 },
00228 .out_ep = {
00229 .bLength = sizeof(struct usb_endpoint_descriptor),
00230 .bDescriptorType = USB_DT_ENDPOINT,
00231 .bEndpointAddress = USB_DIR_OUT | CONFIG_MSC_BULK_OUT_EP,
00232 .bmAttributes = USB_EP_XFER_BULK,
00233 .wMaxPacketSize = LE16(512),
00234 },
00235 };
00236 #endif
00237
00238 static const struct scsi_inquiry_data msc_inquiry_data = {
00239 .pq_pdt = SCSI_INQ_PQ_CONNECTED | SCSI_INQ_DT_DIR_ACCESS,
00240 .flags1 = 0,
00241 .version = SCSI_INQ_VER_SPC3,
00242 .flags3 = SCSI_INQ_HISUP | SCSI_INQ_RSP_SPC2,
00243 .addl_len = SCSI_INQ_ADDL_LEN(sizeof(msc_inquiry_data)),
00244 .vendor_id = { CONFIG_MSC_INQ_VENDOR_ID },
00245 .product_id = { CONFIG_MSC_INQ_PRODUCT_ID },
00246 .product_rev = { CONFIG_MSC_INQ_PRODUCT_VERSION },
00247 };
00248
00249 static struct usb_msc_cbw *msc_alloc_cbw(struct msc_func *msc,
00250 phys_addr_t *phys)
00251 {
00252 return dma_alloc(phys, sizeof(struct usb_msc_cbw));
00253 }
00254
00255 static void msc_free_cbw(struct msc_func *msc, struct usb_msc_cbw *cbw)
00256 {
00257 dma_free(cbw, sizeof(struct usb_msc_cbw));
00258 }
00259
00260 static struct usb_msc_csw *msc_alloc_csw(struct msc_func *msc,
00261 phys_addr_t *phys)
00262 {
00263 struct usb_msc_csw *csw;
00264
00265 csw = dma_alloc(phys, sizeof(struct usb_msc_csw));
00266 if (csw)
00267 csw->dCSWSignature = LE32(USB_CSW_SIGNATURE);
00268
00269 return csw;
00270 }
00271
00272 static void msc_free_csw(struct msc_func *msc, struct usb_msc_csw *csw)
00273 {
00274 dma_free(csw, sizeof(struct usb_msc_csw));
00275 }
00276
00277 static void msc_free_dma_buf_list(struct slist *buf_list)
00278 {
00279 struct buffer *buf;
00280
00281 while (!slist_is_empty(buf_list)) {
00282 buf = slist_pop_head(buf_list, struct buffer, node);
00283 buffer_dma_free(buf, MSC_DATA_BUFFER_SIZE);
00284 }
00285 }
00286
00292 static void msc_queue_empty(struct msc_func *msc)
00293 {
00294 unsigned long iflags;
00295
00296 iflags = cpu_irq_save();
00297
00298 dbg_verbose("msc: queue empty, not_ready=%d busy_cb=%p\n",
00299 msc->not_ready, msc->busy_cb);
00300
00301 msc->xfer_in_progress = false;
00302 if (msc->not_ready && msc->busy_cb) {
00303 void (*busy_cb)(void *) = msc->busy_cb;
00304 void *busy_data = msc->busy_cb_data;
00305
00306 msc->busy_cb = NULL;
00307 cpu_irq_restore(iflags);
00308 busy_cb(busy_data);
00309 } else {
00310 cpu_irq_restore(iflags);
00311 }
00312 }
00313
00314 static void msc_init_sense(struct msc_func *msc, uint8_t sense_key,
00315 uint16_t add_sense, uint32_t lba)
00316 {
00317 uint8_t *data = msc->sense_data;
00318
00319 memset(data, 0, 18);
00320 data[0] = SCSI_SENSE_VALID | SCSI_SENSE_CURRENT;
00321 data[2] = sense_key;
00322 data[3] = lba >> 24;
00323 data[4] = lba >> 16;
00324 data[5] = lba >> 8;
00325 data[6] = lba;
00326 data[7] = SCSI_SENSE_ADDL_LEN(18);
00327 data[12] = add_sense >> 8;
00328 data[13] = add_sense;
00329 }
00330
00331 static unsigned int msc_sense_len(struct msc_func *msc)
00332 {
00333 return msc->sense_data[7] + 8;
00334 }
00335
00336 static void msc_cbw_received(struct udc *udc, struct usb_request *req);
00337 static void msc_csw_sent(struct udc *udc, struct usb_request *req);
00338
00339 static void msc_prepare_csw(struct msc_func *msc, uint32_t residue,
00340 uint8_t status)
00341 {
00342 struct usb_msc_csw *csw;
00343 struct usb_msc_cbw *cbw;
00344 struct usb_request *req;
00345 struct buffer *buf;
00346
00347 csw = msc->csw;
00348 cbw = msc->cbw;
00349 csw->dCSWTag = cbw->dCBWTag;
00350 csw->dCSWDataResidue = cpu_to_le32(residue);
00351 csw->bCSWStatus = status;
00352
00353 req = msc->cbw_csw_req;
00354 req->bytes_xfered = 0;
00355 req->req_done = msc_csw_sent;
00356 buf = usb_req_get_first_buffer(req);
00357 buffer_init_tx(buf, csw, sizeof(struct usb_msc_csw));
00358
00359 dbg_verbose("msc: CSW t%08x r%u s%u\n", le32_to_cpu(csw->dCSWTag),
00360 residue, status);
00361 }
00362
00363 static void msc_request_data_done(struct udc *udc, struct msc_func *msc)
00364 {
00365 if (msc->csw->dCSWDataResidue != LE32(0) && msc->bulk_in_ep) {
00366 if (msc->cbw->bmCBWFlags & USB_CBW_DIRECTION_IN)
00367 udc_ep_set_halt(udc, msc->bulk_in_ep);
00368 else
00369 udc_ep_set_halt(udc, msc->bulk_out_ep);
00370 udc_ep_submit_in_req(udc, msc->bulk_in_ep, msc->cbw_csw_req);
00371 }
00372
00373 dbg_verbose("msc data done: t%08x r%u s%u %s\n",
00374 le32_to_cpu(msc->csw->dCSWTag),
00375 le32_to_cpu(msc->csw->dCSWDataResidue),
00376 msc->csw->bCSWStatus,
00377 (msc->cbw->bmCBWFlags & USB_CBW_DIRECTION_IN)
00378 ? "IN" : "OUT");
00379 }
00380
00381 static void msc_request_done(struct udc *udc, struct msc_func *msc,
00382 uint32_t residue)
00383 {
00384
00385
00386
00387
00388
00389
00390 if (!residue)
00391 udc_ep_submit_in_req(udc, msc->bulk_in_ep, msc->cbw_csw_req);
00392
00393 dbg_verbose("msc req done\n");
00394 }
00395
00396 static void msc_request_done_nodata(struct udc *udc, struct msc_func *msc,
00397 uint32_t residue)
00398 {
00399 if (residue) {
00400 if (msc->cbw->bmCBWFlags & USB_CBW_DIRECTION_IN)
00401 udc_ep_set_halt(udc, msc->bulk_in_ep);
00402 else
00403 udc_ep_set_halt(udc, msc->bulk_out_ep);
00404 }
00405
00406 udc_ep_submit_in_req(udc, msc->bulk_in_ep, msc->cbw_csw_req);
00407 }
00408
00409 static void msc_request_failed(struct msc_func *msc, uint32_t residue,
00410 uint8_t csw_status, uint8_t sense_key, uint16_t add_sense)
00411 {
00412 dbg_warning("msc: req %02x failed: SK %02x ASC(Q) %04x\n",
00413 scsi_cdb_get_opcode(msc->cbw->CDB),
00414 sense_key, add_sense);
00415
00416 msc_init_sense(msc, sense_key, add_sense, 0);
00417 msc_prepare_csw(msc, residue, csw_status);
00418 msc_request_done_nodata(msc->udc, msc, residue);
00419 }
00420
00421 static void msc_phase_error(struct msc_func *msc)
00422 {
00423 struct usb_msc_cbw *cbw = msc->cbw;
00424 uint32_t cbw_xfer_len;
00425
00426 cbw_xfer_len = le32_to_cpu(cbw->dCBWDataTransferLength);
00427
00428 dbg_error("msc: Phase Error (opcode %02x)\n",
00429 scsi_cdb_get_opcode(cbw->CDB));
00430 dbg_verbose("msc: CBW bmCBWFlags = 0x%02x\n", cbw->bmCBWFlags);
00431 dbg_verbose("msc: CBW dCBWDataTransferLength = 0x%x\n", cbw_xfer_len);
00432
00433 msc_prepare_csw(msc, cbw_xfer_len, USB_CSW_STATUS_PE);
00434 msc_request_done_nodata(msc->udc, msc, cbw_xfer_len);
00435 }
00436
00437 static void msc_out_of_memory(struct msc_func *msc)
00438 {
00439 dbg_error("msc: Out of memory!\n");
00440 msc_phase_error(msc);
00441 }
00442
00443 static void msc_csw_sent(struct udc *udc, struct usb_request *req)
00444 {
00445 struct msc_func *msc = req->context;
00446 struct buffer *buf;
00447
00448 buf = usb_req_get_first_buffer(req);
00449 assert(buf->addr.ptr == msc->csw);
00450
00451 dbg_verbose("msc: CSW sent, status=%d\n", req->status);
00452
00453 msc_queue_empty(msc);
00454
00455
00456 if (req->status)
00457 return;
00458
00459
00460 buffer_init_rx(buf, msc->cbw, sizeof(*msc->cbw));
00461 req->bytes_xfered = 0;
00462 req->req_done = msc_cbw_received;
00463
00464 udc_ep_submit_out_req(udc, msc->bulk_out_ep, req);
00465 }
00466
00467 static void msc_data_sent(struct udc *udc, struct usb_request *req)
00468 {
00469 struct msc_func *msc = req->context;
00470 int status = req->status;
00471
00472 usb_req_free(req);
00473
00474 if (!status)
00475 msc_request_data_done(udc, msc);
00476 }
00477
00478 static void msc_buf_sent(struct udc *udc, struct usb_request *req)
00479 {
00480 struct buffer *buf;
00481
00482 buf = usb_req_get_first_buffer(req);
00483 buffer_free(buf);
00484 msc_data_sent(udc, req);
00485 }
00486
00487 static void msc_capacity_sent(struct udc *udc, struct usb_request *req)
00488 {
00489 struct buffer *buf;
00490
00491 buf = usb_req_get_first_buffer(req);
00492 buffer_dma_free(buf, sizeof(struct sbc_read_capacity10_data));
00493 msc_data_sent(udc, req);
00494 }
00495
00496 static void msc_vpd_serial_sent(struct udc *udc, struct usb_request *req)
00497 {
00498 struct buffer *buf;
00499
00500 buf = usb_req_get_first_buffer(req);
00501 buffer_dma_free(buf, MSC_VPD_SERIAL_BUF_SIZE);
00502 msc_data_sent(udc, req);
00503 }
00504
00505 static void msc_vpd_id_sent(struct udc *udc, struct usb_request *req)
00506 {
00507 struct buffer *buf;
00508
00509 buf = usb_req_get_first_buffer(req);
00510 buffer_dma_free(buf, MSC_VPD_ID_BUF_SIZE);
00511 msc_data_sent(udc, req);
00512 }
00513
00514 static void msc_sense_data_sent(struct udc *udc, struct usb_request *req)
00515 {
00516 struct msc_func *msc = req->context;
00517 struct buffer *buf;
00518 int status = req->status;
00519
00520 dbg_verbose("msc sense data sent: %zu bytes, status %d\n",
00521 req->bytes_xfered, req->status);
00522
00523 msc_init_sense(msc, SCSI_SK_NO_SENSE,
00524 SCSI_ASC_NO_ADDITIONAL_SENSE_INFO, 0);
00525 buf = usb_req_get_first_buffer(req);
00526 buffer_free(buf);
00527 usb_req_free(req);
00528
00529 if (!status)
00530 msc_request_data_done(udc, msc);
00531 }
00532
00533 static void msc_mode_params_sent(struct udc *udc, struct usb_request *req)
00534 {
00535 struct buffer *buf;
00536
00537 buf = usb_req_get_first_buffer(req);
00538 buffer_dma_free(buf, MSC_MODE_PARAM_HDR_BUF_LEN);
00539 msc_data_sent(udc, req);
00540 }
00541
00542 static long msc_validate_req(struct msc_func *msc, struct usb_msc_cbw *cbw,
00543 uint32_t alloc_len, uint8_t dir_flag)
00544 {
00545 uint32_t cbw_len;
00546
00547 cbw_len = le32_to_cpu(cbw->dCBWDataTransferLength);
00548
00549
00550
00551
00552
00553
00554
00555
00556
00557
00558 if ((cbw->bmCBWFlags ^ dir_flag) & USB_CBW_DIRECTION_IN
00559 || cbw_len < alloc_len) {
00560 msc_phase_error(msc);
00561 return -1;
00562 }
00563
00564
00565
00566
00567
00568
00569
00570
00571
00572 return cbw_len - alloc_len;
00573 }
00574
00575 static void msc_test_unit_ready(struct msc_func *msc, struct udc *udc,
00576 uint32_t cbw_data_len)
00577 {
00578 dbg_verbose("msc TEST UNIT READY len %u\n", cbw_data_len);
00579
00580 if (msc->not_ready) {
00581 msc_request_failed(msc, cbw_data_len, USB_CSW_STATUS_FAIL,
00582 SCSI_SK_NOT_READY,
00583 msc->busy_asc);
00584 } else if (test_bit(BDEV_PRESENT, &msc->bdev->flags)) {
00585 msc_prepare_csw(msc, cbw_data_len, USB_CSW_STATUS_PASS);
00586 msc_request_done_nodata(udc, msc, cbw_data_len);
00587 } else {
00588 msc_request_failed(msc, cbw_data_len, USB_CSW_STATUS_FAIL,
00589 SCSI_SK_NOT_READY,
00590 SCSI_ASC_MEDIUM_NOT_PRESENT);
00591 }
00592 }
00593
00594 static void msc_request_sense(struct msc_func *msc, struct udc *udc,
00595 struct usb_msc_cbw *cbw)
00596 {
00597 struct usb_request *req;
00598 struct buffer *buf;
00599 const uint8_t *cdb = cbw->CDB;
00600 long residue;
00601 uint8_t alloc_len;
00602
00603 alloc_len = scsi_cdb_get_alloc_len(cdb, SCSI_CMD_REQUEST_SENSE);
00604
00605 dbg_verbose("msc: REQUEST SENSE len %u (sense len %u)\n",
00606 alloc_len, msc_sense_len(msc));
00607
00608 residue = msc_validate_req(msc, cbw, alloc_len, USB_CBW_DIRECTION_IN);
00609 if (residue < 0)
00610 return;
00611
00612 if (alloc_len > 0) {
00613
00614
00615
00616
00617
00618
00619 req = usb_req_alloc();
00620 if (!req)
00621 goto err_req_alloc;
00622 buf = buffer_alloc();
00623 if (!buf)
00624 goto err_buf_alloc;
00625
00626 buffer_init_tx(buf, msc->sense_data,
00627 min(alloc_len, msc_sense_len(msc)));
00628 usb_req_add_buffer(req, buf);
00629 req->context = msc;
00630 req->req_done = msc_sense_data_sent;
00631
00632 residue += alloc_len - buf->len;
00633 msc_prepare_csw(msc, residue, USB_CSW_STATUS_PASS);
00634 udc_ep_submit_in_req(udc, msc->bulk_in_ep, req);
00635
00636 msc_request_done(udc, msc, residue);
00637 } else {
00638
00639
00640
00641
00642 msc_init_sense(msc, SCSI_SK_NO_SENSE,
00643 SCSI_ASC_NO_ADDITIONAL_SENSE_INFO, 0);
00644 msc_prepare_csw(msc, residue, USB_CSW_STATUS_PASS);
00645 msc_request_done_nodata(udc, msc, residue);
00646 }
00647
00648 return;
00649
00650 err_buf_alloc:
00651 usb_req_free(req);
00652 err_req_alloc:
00653 msc_out_of_memory(msc);
00654 }
00655
00656 static int msc_vpd_supported_pages(struct msc_func *msc,
00657 struct usb_request *req, size_t alloc_len)
00658 {
00659 static const uint8_t vpd_page[] = {
00660 SCSI_INQ_PQ_CONNECTED | SCSI_INQ_DT_DIR_ACCESS,
00661 SCSI_VPD_SUPPORTED_PAGES,
00662 0x00,
00663 3,
00664 SCSI_VPD_SUPPORTED_PAGES,
00665 SCSI_VPD_UNIT_SERIAL_NUMBER,
00666 SCSI_VPD_DEVICE_IDENTIFICATION,
00667 };
00668 struct buffer *buf;
00669 size_t total_len = 0;
00670
00671 if (alloc_len) {
00672 buf = buffer_alloc();
00673 if (!buf)
00674 return -1;
00675
00676 total_len = min(alloc_len, sizeof(vpd_page));
00677 buffer_init_tx(buf, &vpd_page, total_len);
00678 usb_req_add_buffer(req, buf);
00679 req->req_done = msc_buf_sent;
00680 }
00681
00682 return total_len;
00683 }
00684
00685 static int msc_vpd_serial_number(struct msc_func *msc,
00686 struct usb_request *req, size_t alloc_len)
00687 {
00688 struct buffer *buf;
00689 uint8_t *vpd_page;
00690 const char *serial;
00691 size_t serial_len;
00692 size_t total_len = 0;
00693
00694 if (alloc_len) {
00695 serial = get_serial_number();
00696 serial_len = strlen(serial);
00697
00698
00699
00700
00701
00702
00703
00704 assert(serial_len <= MSC_MAX_SERIAL_LEN);
00705
00706 buf = buffer_dma_alloc(MSC_VPD_SERIAL_BUF_SIZE);
00707 if (!buf)
00708 return -1;
00709
00710 vpd_page = buf->addr.ptr;
00711 vpd_page[0] = SCSI_INQ_PQ_CONNECTED | SCSI_INQ_DT_DIR_ACCESS;
00712 vpd_page[1] = SCSI_VPD_UNIT_SERIAL_NUMBER;
00713 vpd_page[2] = 0;
00714 vpd_page[3] = MSC_MAX_SERIAL_LEN;
00715 memset(vpd_page + SCSI_VPD_HEADER_SIZE,
00716 ' ', MSC_MAX_SERIAL_LEN - serial_len);
00717 memcpy(vpd_page + MSC_VPD_SERIAL_BUF_SIZE - serial_len,
00718 serial, serial_len);
00719
00720 total_len = MSC_VPD_SERIAL_BUF_SIZE;
00721 if (alloc_len < total_len) {
00722 total_len = alloc_len;
00723 buffer_resize(buf, total_len);
00724 }
00725
00726 usb_req_add_buffer(req, buf);
00727 req->req_done = msc_vpd_serial_sent;
00728 }
00729
00730 return total_len;
00731 }
00732
00733 static int msc_vpd_device_id(struct msc_func *msc,
00734 struct usb_request *req, size_t alloc_len)
00735 {
00736 struct buffer *buf;
00737 const char *serial;
00738 size_t serial_len;
00739 uint8_t *vpd_page;
00740 uint8_t *p;
00741 size_t total_len = 0;
00742
00743 if (alloc_len) {
00744 serial = get_serial_number();
00745 serial_len = strlen(serial);
00746
00747
00748
00749
00750
00751
00752
00753 assert(serial_len <= MSC_MAX_SERIAL_LEN);
00754
00755 buf = buffer_dma_alloc(MSC_VPD_ID_BUF_SIZE);
00756 if (!buf)
00757 return -1;
00758
00759 vpd_page = buf->addr.ptr;
00760 vpd_page[0] = SCSI_INQ_PQ_CONNECTED | SCSI_INQ_DT_DIR_ACCESS;
00761 vpd_page[1] = SCSI_VPD_DEVICE_IDENTIFICATION;
00762 vpd_page[2] = 0;
00763 vpd_page[3] = MSC_VPD_ID_BUF_SIZE - SCSI_VPD_HEADER_SIZE;
00764 vpd_page[4] = SCSI_VPD_CODE_SET_ASCII;
00765 vpd_page[5] = SCSI_VPD_ID_TYPE_T10;
00766 vpd_page[6] = 0;
00767 vpd_page[7] = MSC_VPD_ID_BUF_SIZE - SCSI_VPD_HEADER_SIZE
00768 - SCSI_VPD_ID_HEADER_SIZE;
00769
00770
00771 p = vpd_page + SCSI_VPD_HEADER_SIZE + SCSI_VPD_ID_HEADER_SIZE;
00772 memcpy(p, msc_inquiry_data.vendor_id, 8 + 16);
00773
00774
00775 p += 8 + 16;
00776 memset(p, ' ', MSC_MAX_SERIAL_LEN - serial_len);
00777 p += MSC_MAX_SERIAL_LEN - serial_len;
00778 memcpy(p, serial, serial_len);
00779
00780 total_len = MSC_VPD_ID_BUF_SIZE;
00781 if (alloc_len < total_len) {
00782 total_len = alloc_len;
00783 buffer_resize(buf, total_len);
00784 }
00785
00786 usb_req_add_buffer(req, buf);
00787 req->req_done = msc_vpd_id_sent;
00788 }
00789
00790 return total_len;
00791 }
00792
00793 static void msc_inquiry(struct msc_func *msc, struct udc *udc,
00794 struct usb_msc_cbw *cbw)
00795 {
00796 struct usb_request *req;
00797 struct buffer *buf;
00798 long residue;
00799 long ret;
00800 uint16_t alloc_len;
00801 const uint8_t *cdb = cbw->CDB;
00802 size_t total_len;
00803 uint8_t page_code;
00804
00805 alloc_len = scsi_cdb_get_u16(cdb, 3);
00806
00807 dbg_verbose("msc: INQUIRY %u (inq len %zu)\n",
00808 alloc_len, sizeof(msc_inquiry_data));
00809
00810 residue = msc_validate_req(msc, cbw, alloc_len, USB_CBW_DIRECTION_IN);
00811 if (residue < 0)
00812 return;
00813
00814 req = usb_req_alloc();
00815 if (!req)
00816 goto err_req_alloc;
00817 req->context = msc;
00818
00819 total_len = 0;
00820 page_code = cdb[2];
00821
00822 if (cdb[1] & SCSI_INQ_REQ_EVPD) {
00823
00824 switch (page_code) {
00825 case SCSI_VPD_SUPPORTED_PAGES:
00826 ret = msc_vpd_supported_pages(msc, req, alloc_len);
00827 break;
00828 case SCSI_VPD_UNIT_SERIAL_NUMBER:
00829 ret = msc_vpd_serial_number(msc, req, alloc_len);
00830 break;
00831 case SCSI_VPD_DEVICE_IDENTIFICATION:
00832 ret = msc_vpd_device_id(msc, req, alloc_len);
00833 break;
00834 default:
00835 dbg_info("msc: unsupported VPD page %02x requested\n",
00836 page_code);
00837 goto illegal_request;
00838 }
00839
00840 if (ret < 0)
00841 goto err_buf_alloc;
00842
00843 total_len = ret;
00844 } else if (page_code != 0) {
00845
00846 dbg_info("msc: INQUIRY PC=%02x but EVPD not set\n", page_code);
00847 goto illegal_request;
00848 } else if (alloc_len) {
00849
00850 buf = buffer_alloc();
00851 if (!buf)
00852 goto err_buf_alloc;
00853
00854 total_len = min(alloc_len, sizeof(msc_inquiry_data));
00855 buffer_init_tx(buf, &msc_inquiry_data, total_len);
00856 usb_req_add_buffer(req, buf);
00857 req->req_done = msc_buf_sent;
00858 }
00859
00860 residue += alloc_len - total_len;
00861 msc_prepare_csw(msc, residue, USB_CSW_STATUS_PASS);
00862
00863 if (total_len) {
00864 udc_ep_submit_in_req(udc, msc->bulk_in_ep, req);
00865 msc_request_done(udc, msc, residue);
00866 } else {
00867 usb_req_free(req);
00868 msc_request_done_nodata(udc, msc, residue);
00869 }
00870
00871 return;
00872
00873 illegal_request:
00874 msc_request_failed(msc, alloc_len + residue,
00875 USB_CSW_STATUS_FAIL,
00876 SCSI_SK_ILLEGAL_REQUEST,
00877 SCSI_ASC_INVALID_FIELD_IN_CDB);
00878 usb_req_free(req);
00879 return;
00880
00881 err_buf_alloc:
00882 usb_req_free(req);
00883 err_req_alloc:
00884 msc_out_of_memory(msc);
00885 }
00886
00887 static long msc_add_mode_pages(struct msc_func *msc,
00888 struct usb_request *req, uint32_t avail_len,
00889 const uint8_t *cdb, uint32_t cbw_data_len)
00890 {
00891
00892 if (scsi_mode_sense_get_pc(cdb) == SCSI_MS_PC_CHANGEABLE) {
00893 dbg_info("msc: changeable mode pages not supported\n");
00894 goto invalid_request;
00895 }
00896
00897
00898
00899
00900
00901
00902 switch (scsi_mode_sense_get_page_code(cdb)) {
00903 case 0:
00904
00905 break;
00906 case SCSI_MS_PAGE_ALL:
00907
00908 break;
00909 default:
00910 dbg_info("msc: unsupported mode page 0x%x\n",
00911 scsi_mode_sense_get_page_code(cdb));
00912 goto invalid_request;
00913 }
00914
00915 return 0;
00916
00917 invalid_request:
00918 msc_request_failed(msc, cbw_data_len, USB_CSW_STATUS_FAIL,
00919 SCSI_SK_ILLEGAL_REQUEST,
00920 SCSI_ASC_INVALID_FIELD_IN_CDB);
00921 return -1;
00922 }
00923
00924 static void msc_mode_sense(struct msc_func *msc, struct udc *udc,
00925 struct usb_msc_cbw *cbw, uint32_t alloc_len)
00926 {
00927 union msc_mode_param_header *header;
00928 struct usb_request *req;
00929 struct buffer *buf;
00930 const uint8_t *cdb = cbw->CDB;
00931 long residue;
00932 long ret;
00933 size_t total_size;
00934
00935 dbg_verbose("msc MODE SENSE(N) page %u PC%u len %u\n",
00936 scsi_mode_sense_get_page_code(cdb),
00937 scsi_mode_sense_get_pc(cdb),
00938 alloc_len);
00939
00940 residue = msc_validate_req(msc, cbw, alloc_len, USB_CBW_DIRECTION_IN);
00941 if (residue < 0)
00942 return;
00943
00944 req = usb_req_alloc();
00945 if (!req)
00946 goto err_req_alloc;
00947 req->context = msc;
00948 req->req_done = msc_mode_params_sent;
00949
00950
00951
00952
00953
00954 buf = buffer_dma_alloc(MSC_MODE_PARAM_HDR_BUF_LEN);
00955 if (!buf)
00956 goto err_buf_alloc;
00957 usb_req_add_buffer(req, buf);
00958
00959 header = buf->addr.ptr;
00960 memset(header, 0, MSC_MODE_PARAM_HDR_BUF_LEN);
00961
00962
00963 if (scsi_cdb_get_opcode(cdb) == SCSI_CMD_MODE_SENSE6) {
00964 if (!test_bit(BDEV_WRITEABLE, &msc->bdev->flags))
00965 header->h6.device_specific_parameter = SCSI_MS_SBC_WP;
00966 total_size = sizeof(header->h6);
00967 } else {
00968 if (!test_bit(BDEV_WRITEABLE, &msc->bdev->flags))
00969 header->h10.device_specific_parameter = SCSI_MS_SBC_WP;
00970 total_size = sizeof(header->h10);
00971 }
00972
00973
00974
00975
00976
00977 if (!scsi_mode_sense_dbd_is_set(cdb)) {
00978 struct sbc_slba_block_desc *desc;
00979
00980 total_size += sizeof(*desc);
00981 if (scsi_cdb_get_opcode(cdb) == SCSI_CMD_MODE_SENSE6) {
00982 header->h6.block_descriptor_length = sizeof(*desc);
00983 desc = (struct sbc_slba_block_desc *)(&header->h6 + 1);
00984 } else {
00985 header->h10.block_descriptor_length
00986 = cpu_to_be16(sizeof(*desc));
00987 desc = (struct sbc_slba_block_desc *)(&header->h10 + 1);
00988 }
00989 desc->nr_blocks = cpu_to_be32(msc->bdev->nr_blocks);
00990 desc->block_len = cpu_to_be32(msc->bdev->block_size);
00991 assert(!(be32_to_cpu(desc->block_len)
00992 & ~SBC_SLBA_BLOCK_LEN_MASK));
00993 }
00994
00995 buffer_resize(buf, total_size);
00996
00997
00998 ret = msc_add_mode_pages(msc, req, alloc_len - total_size, cdb,
00999 alloc_len + residue);
01000 if (ret < 0) {
01001 usb_req_free(req);
01002 buffer_dma_free(buf, MSC_MODE_PARAM_HDR_BUF_LEN);
01003 return;
01004 }
01005
01006 total_size += ret;
01007 if (scsi_cdb_get_opcode(cdb) == SCSI_CMD_MODE_SENSE6)
01008 header->h6.mode_data_length = total_size - 1;
01009 else
01010 header->h10.mode_data_length = cpu_to_be16(total_size - 2);
01011
01012
01013
01014
01015
01016 residue += alloc_len - min(alloc_len, total_size);
01017 msc_prepare_csw(msc, residue, USB_CSW_STATUS_PASS);
01018 if (alloc_len > 0) {
01019 udc_ep_submit_in_req(udc, msc->bulk_in_ep, req);
01020 msc_request_done(udc, msc, residue);
01021 } else {
01022 usb_req_free(req);
01023 buffer_dma_free(buf, MSC_MODE_PARAM_HDR_BUF_LEN);
01024 msc_request_done_nodata(udc, msc, residue);
01025 }
01026
01027 return;
01028
01029 err_buf_alloc:
01030 usb_req_free(req);
01031 err_req_alloc:
01032 msc_out_of_memory(msc);
01033 }
01034
01035 static void msc_read_capacity(struct msc_func *msc, struct udc *udc,
01036 struct usb_msc_cbw *cbw)
01037 {
01038 struct sbc_read_capacity10_data *response;
01039 struct usb_request *req;
01040 struct buffer *buf;
01041 uint32_t residue;
01042
01043 build_assert(sizeof(*response) == 8);
01044
01045 dbg_verbose("msc READ CAPACITY LBA %x blklen %u\n",
01046 msc->bdev->nr_blocks - 1, msc->bdev->block_size);
01047
01048 residue = msc_validate_req(msc, cbw, 8, USB_CBW_DIRECTION_IN);
01049 if (residue < 0)
01050 return;
01051
01052 req = usb_req_alloc();
01053 if (!req)
01054 goto err_req_alloc;
01055 req->req_done = msc_capacity_sent;
01056 req->context = msc;
01057
01058 buf = buffer_dma_alloc(sizeof(*response));
01059 if (!buf)
01060 goto err_buf_alloc;
01061 usb_req_add_buffer(req, buf);
01062
01063 response = buf->addr.ptr;
01064 response->max_lba = cpu_to_be32(msc->bdev->nr_blocks - 1);
01065 response->block_len = cpu_to_be32(msc->bdev->block_size);
01066
01067 msc_prepare_csw(msc, residue, USB_CSW_STATUS_PASS);
01068 udc_ep_submit_in_req(udc, msc->bulk_in_ep, req);
01069 msc_request_done(udc, msc, residue);
01070
01071 return;
01072
01073 err_buf_alloc:
01074 usb_req_free(req);
01075 err_req_alloc:
01076 msc_out_of_memory(msc);
01077 }
01078
01079 static uint32_t msc_fill_buffer_list(struct slist *buf_list,
01080 unsigned int block_size, uint32_t nr_blocks)
01081 {
01082 uint32_t blocks_remaining;
01083 uint32_t blocks_per_buf;
01084 unsigned int i;
01085
01086 blocks_remaining = nr_blocks;
01087 blocks_per_buf = MSC_DATA_BUFFER_SIZE / block_size;
01088
01089 for (i = 0; i < MSC_MAX_NR_BUFFERS; i++) {
01090 struct buffer *buf;
01091
01092 buf = buffer_dma_alloc(MSC_DATA_BUFFER_SIZE);
01093 if (!buf)
01094 break;
01095
01096 if (blocks_per_buf > blocks_remaining) {
01097 blocks_per_buf = blocks_remaining;
01098 buffer_resize(buf, blocks_per_buf * block_size);
01099 }
01100
01101 slist_insert_tail(buf_list, &buf->node);
01102 blocks_remaining -= blocks_per_buf;
01103 if (!blocks_remaining)
01104 break;
01105 }
01106
01107 return nr_blocks - blocks_remaining;
01108 }
01109
01122 static int msc_submit_read_buffers(struct msc_func *msc,
01123 struct block_device *bdev, struct block_request *breq,
01124 uint32_t nr_blocks)
01125 {
01126 struct slist buf_list;
01127 uint32_t blocks_queued;
01128
01129 slist_init(&buf_list);
01130 blocks_queued = msc_fill_buffer_list(&buf_list, bdev->block_size,
01131 nr_blocks);
01132
01133 dbg_verbose("msc: blocks %u/%u queued for read\n", blocks_queued,
01134 nr_blocks);
01135
01136 if (unlikely(!blocks_queued))
01137 return 0;
01138
01139
01140
01141
01142
01143 atomic_inc(&msc->blk_segs_pending);
01144 if (block_submit_buf_list(bdev, breq, &buf_list)) {
01145 atomic_dec(&msc->blk_segs_pending);
01146 msc_free_dma_buf_list(&buf_list);
01147 return 0;
01148 }
01149
01150 msc->blocks_queued += blocks_queued;
01151
01152 return blocks_queued;
01153 }
01154
01166 static void msc_read_worker(struct msc_func *msc)
01167 {
01168 struct block_device *bdev = msc->bdev;
01169 struct block_request *breq = msc->block_req;
01170 uint32_t blocks_remaining;
01171 uint32_t submitted;
01172
01173 cpu_irq_disable();
01174 dbg_verbose("msc: blk pending %u locked %d\n",
01175 atomic_read(&msc->blk_segs_pending), msc->queue_locked);
01176 while (atomic_read(&msc->blk_segs_pending) < MSC_MAX_NR_SEGS
01177 && !msc->queue_locked) {
01178 dbg_verbose("msc: read worker: q%u <= t%u s %d\n",
01179 msc->blocks_queued, msc->blocks_total,
01180 breq->status);
01181 assert(msc->blocks_queued <= msc->blocks_total);
01182 blocks_remaining = msc->blocks_total - msc->blocks_queued;
01183 if (!blocks_remaining)
01184 break;
01185
01186 msc->queue_locked = true;
01187 cpu_irq_enable();
01188
01189 submitted = msc_submit_read_buffers(msc, bdev, breq,
01190 blocks_remaining);
01191
01192 cpu_irq_disable();
01193 msc->queue_locked = false;
01194
01195 if (!submitted)
01196 break;
01197 }
01198 cpu_irq_enable();
01199
01200 dbg_verbose("msc read worker done\n");
01201 }
01202
01203 static void msc_read_data_sent(struct udc *udc, struct usb_request *req)
01204 {
01205 struct msc_func *msc = req->context;
01206 uint32_t bytes_xfered;
01207 uint32_t blocks_remaining;
01208 int status;
01209
01210 dbg_verbose("msc: data sent: first=%p last=%p\n",
01211 slist_peek_head_node(&req->buf_list),
01212 slist_peek_tail_node(&req->buf_list));
01213
01214 msc_free_dma_buf_list(&req->buf_list);
01215 status = req->status;
01216 bytes_xfered = req->bytes_xfered;
01217 usb_req_free(req);
01218
01219 assert(atomic_read(&msc->usb_reqs_pending) > 0);
01220 atomic_dec(&msc->usb_reqs_pending);
01221
01222
01223
01224
01225
01226 if (status) {
01227 block_abort_req(msc->bdev, msc->block_req);
01228 return;
01229 }
01230
01231 blocks_remaining = msc->blocks_total - msc->blocks_queued;
01232 if (!blocks_remaining)
01233 msc_request_data_done(udc, msc);
01234 else
01235 msc_read_worker(msc);
01236 }
01237
01238
01239
01240
01241
01242
01243
01244
01245 static void msc_block_read_started(struct block_device *bdev,
01246 struct block_request *breq)
01247 {
01248 struct msc_func *msc = breq->context;
01249
01250 if (msc->blocks_queued < msc->blocks_total)
01251 msc_read_worker(msc);
01252 }
01253
01254 static void msc_block_read_done(struct block_device *bdev,
01255 struct block_request *breq)
01256 {
01257 struct msc_func *msc = breq->context;
01258 uint32_t residue;
01259
01260 assert(breq == msc->block_req);
01261
01262 residue = le32_to_cpu(msc->csw->dCSWDataResidue);
01263
01264 if (breq->status) {
01265 struct usb_msc_csw *csw = msc->csw;
01266 uint32_t blocks_xfered;
01267
01268 blocks_xfered = blk_req_get_blocks_xfered(bdev, breq);
01269
01270 dbg_warning("msc: block read failed: %d (after %u blocks)\n",
01271 breq->status, blocks_xfered);
01272
01273 residue = le32_to_cpu(msc->cbw->dCBWDataTransferLength);
01274 residue -= bdev->block_size * blocks_xfered;
01275 csw->dCSWDataResidue = cpu_to_le32(residue);
01276 csw->bCSWStatus = USB_CSW_STATUS_FAIL;
01277
01278 msc_init_sense(msc, SCSI_SK_MEDIUM_ERROR,
01279 SCSI_ASC_UNRECOVERED_READ_ERROR,
01280 msc->first_lba + blocks_xfered);
01281 }
01282
01283 msc_request_done(msc->udc, msc, residue);
01284 }
01285
01286 static void msc_block_read_buffers_done(struct block_device *bdev,
01287 struct block_request *breq, struct slist *buf_list)
01288 {
01289 struct msc_func *msc = breq->context;
01290 struct usb_request *req;
01291
01292 assert(!slist_is_empty(buf_list));
01293
01294 dbg_verbose("msc: read bufs done: status %d\n", breq->status);
01295
01296 if (breq->status != -STATUS_IN_PROGRESS || !msc->bulk_in_ep) {
01297 dbg_verbose(" request terminated, discarding buffers\n");
01298 msc_free_dma_buf_list(buf_list);
01299 return;
01300 }
01301
01302 req = usb_req_alloc();
01303 if (!req) {
01304 block_abort_req(bdev, breq);
01305 msc_free_dma_buf_list(buf_list);
01306 msc_out_of_memory(msc);
01307 return;
01308 }
01309
01310 assert(atomic_read(&msc->blk_segs_pending) > 0);
01311 atomic_inc(&msc->usb_reqs_pending);
01312 atomic_dec(&msc->blk_segs_pending);
01313
01314 slist_move_to_tail(&req->buf_list, buf_list);
01315 req->req_done = msc_read_data_sent;
01316 req->context = msc;
01317 dbg_verbose(" submitting IN request...\n");
01318 udc_ep_submit_in_req(msc->udc, msc->bulk_in_ep, req);
01319 }
01320
01329 static void msc_do_read(struct msc_func *msc, struct udc *udc,
01330 struct usb_msc_cbw *cbw, uint32_t lba, uint32_t nr_blocks)
01331 {
01332 struct block_device *bdev = msc->bdev;
01333 struct block_request *breq;
01334 long residue;
01335 uint32_t cdb_data_len;
01336 uint32_t blocks_queued;
01337 unsigned long iflags;
01338
01339 dbg_verbose("msc READ(x) %u blocks, LBA %u\n", nr_blocks, lba);
01340
01341 assert(!msc->xfer_in_progress);
01342
01343
01344
01345
01346
01347 cdb_data_len = nr_blocks * bdev->block_size;
01348
01349 residue = msc_validate_req(msc, cbw, cdb_data_len,
01350 USB_CBW_DIRECTION_IN);
01351 if (unlikely(residue < 0))
01352 return;
01353
01354 iflags = cpu_irq_save();
01355 if (msc->not_ready) {
01356 cpu_irq_restore(iflags);
01357 msc_request_failed(msc,
01358 le32_to_cpu(cbw->dCBWDataTransferLength),
01359 USB_CSW_STATUS_FAIL,
01360 SCSI_SK_NOT_READY, msc->busy_asc);
01361 return;
01362 }
01363
01364 msc->xfer_in_progress = true;
01365 cpu_irq_restore(iflags);
01366
01367 msc_prepare_csw(msc, residue, USB_CSW_STATUS_PASS);
01368
01369
01370 if (unlikely(nr_blocks == 0)) {
01371 msc_request_done_nodata(udc, msc, residue);
01372 return;
01373 }
01374
01375 msc->blocks_total = nr_blocks;
01376 msc->blocks_queued = 0;
01377 msc->queue_locked = true;
01378 atomic_write(&msc->blk_segs_pending, 0);
01379 atomic_write(&msc->usb_reqs_pending, 0);
01380
01381 breq = msc->block_req;
01382 breq->req_started = msc_block_read_started;
01383 breq->req_done = msc_block_read_done;
01384 breq->buf_list_done = msc_block_read_buffers_done;
01385 breq->context = msc;
01386 block_queue_req(bdev, breq, lba, nr_blocks, BLK_OP_READ);
01387
01388 blocks_queued = msc_submit_read_buffers(msc, bdev, breq, nr_blocks);
01389 if (blocks_queued == 0) {
01390 block_abort_req(bdev, breq);
01391 msc_out_of_memory(msc);
01392 }
01393 msc->queue_locked = false;
01394 }
01395
01396 static void msc_write_data_received(struct udc *udc, struct usb_request *req);
01397
01409 static int msc_submit_write_data_req(struct msc_func *msc,
01410 struct block_device *bdev, uint32_t nr_blocks)
01411 {
01412 struct usb_request *req;
01413 uint32_t blocks_queued;
01414
01415 req = usb_req_alloc();
01416 if (!req)
01417 return 0;
01418
01419 req->context = msc;
01420 req->req_done = msc_write_data_received;
01421
01422 blocks_queued = msc_fill_buffer_list(&req->buf_list, bdev->block_size,
01423 nr_blocks);
01424
01425 dbg_verbose("msc: blocks %u/%u queued for write\n", blocks_queued,
01426 nr_blocks);
01427
01428 if (unlikely(!blocks_queued)) {
01429 usb_req_free(req);
01430 return 0;
01431 }
01432
01433 msc->blocks_queued += blocks_queued;
01434
01435 atomic_inc(&msc->usb_reqs_pending);
01436 udc_ep_submit_out_req(msc->udc, msc->bulk_out_ep, req);
01437
01438 return blocks_queued;
01439 }
01440
01453 static void msc_write_worker(void *data)
01454 {
01455 struct msc_func *msc = data;
01456 struct block_device *bdev = msc->bdev;
01457 uint32_t blocks_remaining;
01458 uint32_t submitted;
01459
01460 while (atomic_read(&msc->usb_reqs_pending) < MSC_MAX_NR_SEGS
01461 && !msc->queue_locked) {
01462 dbg_verbose("msc: write worker: q%u <= 5%u s %d\n",
01463 msc->blocks_queued, msc->blocks_total,
01464 msc->block_req->status);
01465 assert(msc->blocks_queued <= msc->blocks_total);
01466 blocks_remaining = msc->blocks_total - msc->blocks_queued;
01467 if (!blocks_remaining)
01468 break;
01469
01470 msc->queue_locked = true;
01471 cpu_irq_enable();
01472
01473 submitted = msc_submit_write_data_req(msc, bdev,
01474 blocks_remaining);
01475
01476 cpu_irq_disable();
01477 msc->queue_locked = false;
01478
01479 if (!submitted)
01480 break;
01481 }
01482 }
01483
01484 static void msc_block_write_started(struct block_device *bdev,
01485 struct block_request *breq)
01486 {
01487 struct msc_func *msc = breq->context;
01488
01489 if (msc->blocks_queued < msc->blocks_total)
01490 msc_write_worker(msc);
01491 }
01492
01493 static void msc_block_write_done(struct block_device *bdev,
01494 struct block_request *breq)
01495 {
01496 struct msc_func *msc = breq->context;
01497 uint32_t residue;
01498
01499 assert(breq == msc->block_req);
01500
01501 residue = le32_to_cpu(msc->csw->dCSWDataResidue);
01502
01503 if (breq->status) {
01504 struct usb_msc_csw *csw = msc->csw;
01505 uint32_t blocks_xfered;
01506
01507 blocks_xfered = blk_req_get_blocks_xfered(bdev, breq);
01508
01509 dbg_warning("msc: block write failed: %d (after %u blocks)\n",
01510 breq->status, blocks_xfered);
01511
01512 residue = le32_to_cpu(msc->cbw->dCBWDataTransferLength);
01513 residue -= bdev->block_size * blocks_xfered;
01514 csw->dCSWDataResidue = cpu_to_le32(residue);
01515 csw->bCSWStatus = USB_CSW_STATUS_FAIL;
01516
01517 msc_init_sense(msc, SCSI_SK_MEDIUM_ERROR,
01518 SCSI_ASC_WRITE_ERROR,
01519 msc->first_lba + blocks_xfered);
01520 }
01521
01522 msc_request_done(msc->udc, msc, residue);
01523 msc_request_data_done(msc->udc, msc);
01524 }
01525
01526 static void msc_block_write_buffers_done(struct block_device *bdev,
01527 struct block_request *breq, struct slist *buf_list)
01528 {
01529 struct msc_func *msc = breq->context;
01530
01531 msc_free_dma_buf_list(buf_list);
01532
01533 assert(atomic_read(&msc->blk_segs_pending) > 0);
01534 atomic_dec(&msc->blk_segs_pending);
01535
01536 assert(msc->blocks_queued <= msc->blocks_total);
01537 if (msc->blocks_queued < msc->blocks_total)
01538 msc_write_worker(msc);
01539 }
01540
01541 static void msc_write_data_received(struct udc *udc, struct usb_request *req)
01542 {
01543 struct slist buf_list;
01544 struct msc_func *msc = req->context;
01545 struct block_device *bdev;
01546 struct block_request *breq;
01547 int status;
01548
01549 status = req->status;
01550 slist_init(&buf_list);
01551 slist_move_to_tail(&buf_list, &req->buf_list);
01552 usb_req_free(req);
01553
01554 bdev = msc->bdev;
01555 breq = msc->block_req;
01556
01557 if (!status) {
01558 atomic_inc(&msc->blk_segs_pending);
01559 atomic_dec(&msc->usb_reqs_pending);
01560
01561 if (block_submit_buf_list(bdev, breq, &buf_list)) {
01562 atomic_dec(&msc->blk_segs_pending);
01563 msc_free_dma_buf_list(&buf_list);
01564 }
01565 } else {
01566 block_abort_req(bdev, breq);
01567 }
01568 }
01569
01570 static void msc_do_write(struct msc_func *msc, struct udc *udc,
01571 struct usb_msc_cbw *cbw, uint32_t lba, uint32_t nr_blocks)
01572 {
01573 struct block_device *bdev = msc->bdev;
01574 struct block_request *breq;
01575 long residue;
01576 uint32_t cdb_data_len;
01577 uint32_t blocks_queued;
01578 unsigned long iflags;
01579
01580 dbg_verbose("msc WRITE(x) %u blocks, LBA %u\n", nr_blocks, lba);
01581
01582 assert(!msc->xfer_in_progress);
01583
01584
01585
01586
01587
01588 cdb_data_len = nr_blocks * bdev->block_size;
01589
01590 residue = msc_validate_req(msc, cbw, cdb_data_len, 0);
01591 if (unlikely(residue < 0))
01592 return;
01593
01594 iflags = cpu_irq_save();
01595 if (msc->not_ready) {
01596 cpu_irq_restore(iflags);
01597 msc_request_failed(msc,
01598 le32_to_cpu(cbw->dCBWDataTransferLength),
01599 USB_CSW_STATUS_FAIL,
01600 SCSI_SK_NOT_READY, msc->busy_asc);
01601 return;
01602 }
01603
01604 msc->xfer_in_progress = true;
01605 cpu_irq_restore(iflags);
01606
01607 msc_prepare_csw(msc, residue, USB_CSW_STATUS_PASS);
01608
01609
01610 if (unlikely(nr_blocks == 0)) {
01611 msc_request_done_nodata(udc, msc, residue);
01612 return;
01613 }
01614
01615 msc->blocks_total = nr_blocks;
01616 msc->blocks_queued = 0;
01617 msc->queue_locked = true;
01618 atomic_write(&msc->blk_segs_pending, 0);
01619 atomic_write(&msc->usb_reqs_pending, 0);
01620
01621 breq = msc->block_req;
01622 breq->req_started = msc_block_write_started;
01623 breq->req_done = msc_block_write_done;
01624 breq->buf_list_done = msc_block_write_buffers_done;
01625 breq->context = msc;
01626 block_queue_req(bdev, breq, lba, nr_blocks, BLK_OP_WRITE);
01627
01628 blocks_queued = msc_submit_write_data_req(msc, bdev, nr_blocks);
01629 if (blocks_queued == 0) {
01630 block_abort_req(bdev, breq);
01631 msc_out_of_memory(msc);
01632 }
01633 msc->queue_locked = false;
01634 }
01635
01636 static void msc_verify_read(struct msc_func *msc, struct block_device *bdev,
01637 uint32_t first_lba, uint32_t nr_blocks);
01638 static void msc_verify_bytchk(struct msc_func *msc, struct block_device *bdev,
01639 uint32_t first_lba, uint32_t nr_blocks);
01640
01641 static void msc_verify_bytchk(struct msc_func *msc, struct block_device *bdev,
01642 uint32_t first_lba, uint32_t nr_blocks)
01643 {
01644 msc_request_failed(msc, le32_to_cpu(msc->csw->dCSWDataResidue),
01645 USB_CSW_STATUS_FAIL, SCSI_SK_ILLEGAL_REQUEST,
01646 SCSI_ASC_INVALID_FIELD_IN_CDB);
01647 }
01648
01649 static void msc_verify_read_done(struct block_device *bdev,
01650 struct block_request *breq)
01651 {
01652 struct msc_func *msc = breq->context;
01653
01654 assert(breq == msc->block_req);
01655
01656 if (breq->status) {
01657 uint32_t blocks_xfered;
01658
01659 blocks_xfered = blk_req_get_blocks_xfered(bdev, breq);
01660
01661 msc->csw->bCSWStatus = USB_CSW_STATUS_FAIL;
01662 msc_init_sense(msc, SCSI_SK_MEDIUM_ERROR,
01663 SCSI_ASC_UNRECOVERED_READ_ERROR,
01664 msc->first_lba + blocks_xfered);
01665 }
01666
01667 msc_request_done_nodata(msc->udc, msc,
01668 le32_to_cpu(msc->csw->dCSWDataResidue));
01669 }
01670
01671 static void msc_verify_read_buffers_done(struct block_device *bdev,
01672 struct block_request *breq, struct slist *buf_list)
01673 {
01674 struct slist new_buf_list;
01675 struct msc_func *msc = breq->context;
01676 unsigned int block_size;
01677 uint32_t blocks_queued;
01678 uint32_t blocks_total;
01679 unsigned int blocks_per_buf;
01680
01681 blocks_total = msc->blocks_total;
01682 blocks_queued = msc->blocks_queued;
01683 block_size = bdev->block_size;
01684 blocks_per_buf = MSC_DATA_BUFFER_SIZE / block_size;
01685 slist_init(&new_buf_list);
01686
01687 while (!slist_is_empty(buf_list)) {
01688 struct buffer *buf;
01689
01690 assert(blocks_queued <= blocks_total);
01691 if (blocks_queued == blocks_total)
01692 break;
01693
01694 buf = slist_pop_head(buf_list, struct buffer, node);
01695 blocks_queued += blocks_per_buf;
01696
01697 if (blocks_queued > blocks_total) {
01698 blocks_per_buf -= blocks_queued - blocks_total;
01699 buffer_resize(buf, block_size * blocks_per_buf);
01700 blocks_queued = blocks_total;
01701 }
01702 slist_insert_tail(&new_buf_list, &buf->node);
01703 }
01704
01705 if (!slist_is_empty(&new_buf_list)) {
01706 if (block_submit_buf_list(bdev, breq, &new_buf_list))
01707 msc_free_dma_buf_list(&new_buf_list);
01708 else
01709 msc->blocks_queued = blocks_queued;
01710 }
01711
01712
01713 msc_free_dma_buf_list(buf_list);
01714 }
01715
01716 static void msc_verify_read(struct msc_func *msc, struct block_device *bdev,
01717 uint32_t first_lba, uint32_t nr_blocks)
01718 {
01719 struct slist buf_list;
01720 struct block_request *breq;
01721 unsigned int block_size;
01722 uint32_t blocks_queued;
01723
01724
01725
01726
01727
01728 msc->blocks_total = nr_blocks;
01729 breq = msc->block_req;
01730 breq->req_started = NULL;
01731 breq->req_done = msc_verify_read_done;
01732 breq->buf_list_done = msc_verify_read_buffers_done;
01733 breq->context = msc;
01734 block_queue_req(bdev, breq, first_lba, nr_blocks, BLK_OP_READ);
01735
01736 block_size = bdev->block_size;
01737 slist_init(&buf_list);
01738
01739 blocks_queued = msc_fill_buffer_list(&buf_list, bdev->block_size,
01740 nr_blocks);
01741
01742 if (unlikely(blocks_queued == 0)) {
01743 block_abort_req(bdev, breq);
01744 msc_out_of_memory(msc);
01745 return;
01746 }
01747
01748 msc->blocks_queued = blocks_queued;
01749 if (block_submit_buf_list(bdev, breq, &buf_list)) {
01750 block_abort_req(bdev, breq);
01751 msc_free_dma_buf_list(&buf_list);
01752 msc_out_of_memory(msc);
01753 }
01754 }
01755
01756 static void msc_do_verify(struct msc_func *msc, struct udc *udc,
01757 struct usb_msc_cbw *cbw, uint32_t lba, uint32_t nr_blocks,
01758 bool bytchk)
01759 {
01760 struct block_device *bdev = msc->bdev;
01761 long residue;
01762 uint32_t cdb_data_len = 0;
01763 unsigned long iflags;
01764
01765 dbg_verbose("msc VERIFY(x) %u blocks, LBA %u\n", nr_blocks, lba);
01766
01767
01768 if (bytchk)
01769 cdb_data_len = nr_blocks * bdev->block_size;
01770
01771 residue = msc_validate_req(msc, cbw, cdb_data_len, 0);
01772 if (unlikely(residue < 0))
01773 return;
01774
01775 iflags = cpu_irq_save();
01776 if (msc->not_ready) {
01777 cpu_irq_restore(iflags);
01778 msc_request_failed(msc,
01779 le32_to_cpu(cbw->dCBWDataTransferLength),
01780 USB_CSW_STATUS_FAIL,
01781 SCSI_SK_NOT_READY, msc->busy_asc);
01782 return;
01783 }
01784
01785 msc->xfer_in_progress = true;
01786 cpu_irq_restore(iflags);
01787
01788 msc_prepare_csw(msc, residue, USB_CSW_STATUS_PASS);
01789
01790 if (unlikely(nr_blocks == 0)) {
01791
01792 msc_request_done_nodata(udc, msc, residue);
01793 return;
01794 }
01795
01796 if (bytchk)
01797 msc_verify_bytchk(msc, bdev, lba, nr_blocks);
01798 else
01799 msc_verify_read(msc, bdev, lba, nr_blocks);
01800 }
01801
01802 static void msc_cbw_received(struct udc *udc, struct usb_request *req)
01803 {
01804 struct msc_func *msc = req->context;
01805 struct usb_msc_cbw *cbw;
01806 uint8_t opcode;
01807
01808 dbg_verbose("cbw received: status %d len %zu\n",
01809 req->status, req->bytes_xfered);
01810
01811 cbw = msc->cbw;
01812 assert(req == msc->cbw_csw_req);
01813 assert(cbw == usb_req_get_first_buffer(req)->addr.ptr);
01814
01815
01816 if (req->status)
01817 return;
01818
01819
01820 if (cbw->dCBWSignature != LE32(USB_CBW_SIGNATURE)
01821 || req->bytes_xfered != 31) {
01822
01823
01824
01825
01826 udc_ep_set_wedge(udc, msc->bulk_in_ep);
01827 udc_ep_set_wedge(udc, msc->bulk_out_ep);
01828 return;
01829 }
01830
01831 opcode = scsi_cdb_get_opcode(cbw->CDB);
01832
01833
01834 switch (opcode) {
01835 case SCSI_CMD_TEST_UNIT_READY:
01836 msc_test_unit_ready(msc, udc,
01837 le32_to_cpu(cbw->dCBWDataTransferLength));
01838 break;
01839
01840 case SCSI_CMD_REQUEST_SENSE:
01841 msc_request_sense(msc, udc, cbw);
01842 break;
01843
01844 case SCSI_CMD_READ6:
01845 msc_do_read(msc, udc, cbw, scsi_cdb_get_lba(cbw->CDB, 6),
01846 scsi_cdb_get_xfer_len(cbw->CDB, 6));
01847 break;
01848
01849 case SCSI_CMD_WRITE6:
01850 msc_do_write(msc, udc, cbw, scsi_cdb_get_lba(cbw->CDB, 6),
01851 scsi_cdb_get_xfer_len(cbw->CDB, 6));
01852 break;
01853
01854 case SCSI_CMD_INQUIRY:
01855 msc_inquiry(msc, udc, cbw);
01856 break;
01857
01858 case SCSI_CMD_MODE_SENSE6:
01859 msc_mode_sense(msc, udc, cbw,
01860 scsi_cdb_get_alloc_len(cbw->CDB, SCSI_CMD_MODE_SENSE6));
01861 break;
01862
01863 case SCSI_CMD_READ_CAPACITY10:
01864 msc_read_capacity(msc, udc, cbw);
01865 break;
01866
01867 case SCSI_CMD_READ10:
01868 msc_do_read(msc, udc, cbw, scsi_cdb_get_lba(cbw->CDB, 10),
01869 scsi_cdb_get_xfer_len(cbw->CDB, 10));
01870 break;
01871
01872 case SCSI_CMD_WRITE10:
01873 msc_do_write(msc, udc, cbw, scsi_cdb_get_lba(cbw->CDB, 10),
01874 scsi_cdb_get_xfer_len(cbw->CDB, 10));
01875 break;
01876
01877 case SCSI_CMD_VERIFY10:
01878 msc_do_verify(msc, udc, cbw, scsi_cdb_get_lba(cbw->CDB, 10),
01879 scsi_cdb_get_xfer_len(cbw->CDB, 10),
01880 scsi_cdb_bytchk_is_set(cbw->CDB, 10));
01881 break;
01882
01883 case SCSI_CMD_MODE_SENSE10:
01884 msc_mode_sense(msc, udc, cbw,
01885 scsi_cdb_get_alloc_len(cbw->CDB,
01886 SCSI_CMD_MODE_SENSE10));
01887 break;
01888
01889 default:
01890 dbg_verbose("MSC: Unhandled opcode %02x\n", opcode);
01891
01892 msc_request_failed(msc,
01893 le32_to_cpu(cbw->dCBWDataTransferLength),
01894 USB_CSW_STATUS_FAIL,
01895 SCSI_SK_ILLEGAL_REQUEST,
01896 SCSI_ASC_INVALID_COMMAND_OPERATION_CODE);
01897 break;
01898 }
01899 }
01900
01901 static int msc_iface_enable(struct udc *udc, struct usb_func_iface *iface)
01902 {
01903 const struct msc_bulk_iface_block *desc;
01904 struct msc_func *msc = msc_func_of(iface);
01905 struct usb_msc_cbw *cbw;
01906 struct usb_request *req;
01907 struct buffer *buf;
01908 phys_addr_t phys;
01909
01910 msc_queue_empty(msc);
01911
01912 switch (udc->speed) {
01913 case USB_SPEED_FULL:
01914 desc = &msc_bulk_fs_iface;
01915 break;
01916 #ifdef CONFIG_UDC_HIGH_SPEED
01917 case USB_SPEED_HIGH:
01918 desc = &msc_bulk_hs_iface;
01919 break;
01920 #endif
01921 default:
01922 return -1;
01923 }
01924
01925 msc->udc = udc;
01926
01927 msc->bulk_in_ep = udc_ep_create(udc, &desc->in_ep,
01928 CONFIG_MSC_BULK_NR_BANKS);
01929 msc->bulk_out_ep = udc_ep_create(udc, &desc->out_ep,
01930 CONFIG_MSC_BULK_NR_BANKS);
01931 if (msc->bulk_in_ep < 0 || msc->bulk_out_ep < 0)
01932 goto fail;
01933
01934 msc->block_req = block_alloc_request(msc->bdev);
01935 assert(msc->block_req);
01936
01937 msc->csw = msc_alloc_csw(msc, &phys);
01938 assert(msc->csw);
01939
01940 cbw = msc_alloc_cbw(msc, &phys);
01941 assert(cbw);
01942 msc->cbw = cbw;
01943
01944 req = usb_req_alloc();
01945 assert(req);
01946 req->req_done = msc_cbw_received;
01947 req->context = msc;
01948 msc->cbw_csw_req = req;
01949
01950 buf = buffer_alloc();
01951 assert(buf);
01952 buffer_init_rx(buf, cbw, sizeof(*cbw));
01953 usb_req_add_buffer(req, buf);
01954
01955 udc_ep_submit_out_req(udc, msc->bulk_out_ep, req);
01956
01957 return 0;
01958
01959 fail:
01960 if (msc->bulk_in_ep > 0) {
01961 usb_ep_id_t ep = msc->bulk_in_ep;
01962 msc->bulk_in_ep = 0;
01963 udc_ep_destroy(udc, ep);
01964 }
01965 if (msc->bulk_out_ep > 0) {
01966 usb_ep_id_t ep = msc->bulk_out_ep;
01967 msc->bulk_out_ep = 0;
01968 udc_ep_destroy(udc, ep);
01969 }
01970
01971 return -1;
01972 }
01973
01974 static void msc_iface_disable(struct udc *udc, struct usb_func_iface *iface)
01975 {
01976 struct msc_func *msc = msc_func_of(iface);
01977 usb_ep_id_t in, out;
01978
01979 msc_queue_empty(msc);
01980
01981 in = msc->bulk_in_ep;
01982 msc->bulk_in_ep = 0;
01983 out = msc->bulk_out_ep;
01984 msc->bulk_out_ep = 0;
01985
01986 if (in > 0)
01987 udc_ep_destroy(udc, in);
01988 if (out > 0)
01989 udc_ep_destroy(udc, out);
01990
01991 msc_free_cbw(msc, msc->cbw);
01992 msc_free_csw(msc, msc->csw);
01993 buffer_free(usb_req_get_first_buffer(msc->cbw_csw_req));
01994 usb_req_free(msc->cbw_csw_req);
01995 block_free_request(msc->bdev, msc->block_req);
01996 }
01997
01998 static int msc_bulk_reset(struct udc *udc, struct msc_func *msc)
01999 {
02000 struct usb_request *req;
02001 struct buffer *buf;
02002
02003 dbg_info("MSC Bulk Reset\n");
02004
02005
02006
02007
02008
02009
02010
02011 if (msc->bulk_in_ep > 0) {
02012 udc_ep_flush(udc, msc->bulk_in_ep);
02013 udc_ep_clear_wedge(udc, msc->bulk_in_ep);
02014 }
02015 if (msc->bulk_out_ep > 0) {
02016 udc_ep_flush(udc, msc->bulk_out_ep);
02017 udc_ep_clear_wedge(udc, msc->bulk_out_ep);
02018 }
02019
02020 msc_queue_empty(msc);
02021
02022
02023 req = msc->cbw_csw_req;
02024 buf = usb_req_get_first_buffer(req);
02025 usb_req_init(req);
02026
02027 buffer_init_rx(buf, msc->cbw, sizeof(*msc->cbw));
02028 usb_req_add_buffer(req, buf);
02029 req->req_done = msc_cbw_received;
02030
02031 udc_ep_submit_out_req(udc, msc->bulk_out_ep, req);
02032
02033 return 0;
02034 }
02035
02036 static int msc_iface_setup(struct udc *udc, struct usb_func_iface *iface,
02037 struct usb_setup_req *req)
02038 {
02039 uint16_t value = le16_to_cpu(req->wValue);
02040 uint16_t len = le16_to_cpu(req->wLength);
02041 uint8_t byte;
02042
02043 if (usb_req_type(req) != USB_REQTYPE_CLASS)
02044 return -1;
02045
02046 switch (req->bRequest) {
02047 case USB_MSC_REQ_BULK_RESET:
02048 if (len || value || usb_req_is_in(req))
02049 return -1;
02050
02051 if (msc_bulk_reset(udc, msc_func_of(iface)))
02052 return -1;
02053
02054 udc_ep0_send_status(udc);
02055 break;
02056
02057 case USB_MSC_REQ_GET_MAX_LUN:
02058 if (len != 1 || value || usb_req_is_out(req))
02059 return -1;
02060
02061
02062 byte = 0;
02063 udc_ep0_write_sync(udc, &byte, sizeof(byte));
02064 udc_ep0_expect_status(udc);
02065 break;
02066
02067 default:
02068 return -1;
02069 }
02070
02071 return 0;
02072 }
02073
02074 static const struct usb_func_iface_ops msc_iface_ops = {
02075 .enable = msc_iface_enable,
02076 .disable = msc_iface_disable,
02077 .setup = msc_iface_setup,
02078 };
02079
02080 void usb_msc_set_busy(struct usb_func_iface *iface, uint16_t asc,
02081 void (*queue_empty)(void *data), void *data)
02082 {
02083 struct msc_func *msc = msc_func_of(iface);
02084 unsigned long iflags;
02085
02086 iflags = cpu_irq_save();
02087 msc->not_ready = true;
02088 msc->busy_asc = asc;
02089 dbg_verbose("msc_set_busy: ASC(Q) %04x in_progress: %d\n",
02090 asc, msc->xfer_in_progress);
02091 if (msc->xfer_in_progress) {
02092 msc->busy_cb = queue_empty;
02093 msc->busy_cb_data = data;
02094 cpu_irq_restore(iflags);
02095 } else {
02096 cpu_irq_restore(iflags);
02097 queue_empty(data);
02098 }
02099 }
02100
02101 void usb_msc_set_ready(struct usb_func_iface *iface)
02102 {
02103 struct msc_func *msc = msc_func_of(iface);
02104
02105 msc->not_ready = false;
02106 }
02107
02108 static struct msc_func msc_func = {
02109 .iface.nr_settings = 1,
02110 .iface.setting[0] = {
02111 .ops = &msc_iface_ops,
02112 .fs_desc = &msc_bulk_fs_iface.iface,
02113 .fs_desc_size = sizeof(msc_bulk_fs_iface),
02114 #ifdef CONFIG_UDC_HIGH_SPEED
02115 .hs_desc = &msc_bulk_hs_iface.iface,
02116 .hs_desc_size = sizeof(msc_bulk_hs_iface),
02117 #endif
02118 },
02119 };
02120
02121 struct usb_func_iface *usb_msc_func_init(struct block_device *bdev)
02122 {
02123 struct msc_func *msc = &msc_func;
02124
02125 msc->bdev = bdev;
02126
02127 build_assert(CONFIG_DMAPOOL_SMALL_OBJ_SIZE % 4 == 0);
02128 build_assert(MSC_DATA_BUFFER_SIZE % 512 == 0);
02129
02130 msc->sense_data = dma_alloc(&msc->sense_data_phys, 32);
02131 msc_init_sense(msc, SCSI_SK_NO_SENSE,
02132 SCSI_ASC_NO_ADDITIONAL_SENSE_INFO, 0);
02133
02134 return &msc->iface;
02135 }