• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* ----------------------------------------------------------------------------
2  * Copyright (c) Huawei Technologies Co., Ltd. 2015-2019. All rights reserved.
3  * Description: LiteOS USB Driver Mass Storage Protocol
4  * Author: huangjieliang
5  * Create: 2015-07-30
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  * 1. Redistributions of source code must retain the above copyright notice, this list of
9  * conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright notice, this list
11  * of conditions and the following disclaimer in the documentation and/or other materials
12  * provided with the distribution.
13  * 3. Neither the name of the copyright holder nor the names of its contributors may be used
14  * to endorse or promote products derived from this software without specific prior written
15  * permission.
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  * --------------------------------------------------------------------------- */
28 /* ----------------------------------------------------------------------------
29  * Notice of Export Control Law
30  * ===============================================
31  * Huawei LiteOS may be subject to applicable export control laws and regulations, which might
32  * include those applicable to Huawei LiteOS of U.S. and the country in which you are located.
33  * Import, export and usage of Huawei LiteOS in any manner by you shall be in compliance with such
34  * applicable export control laws and regulations.
35  * --------------------------------------------------------------------------- */
36 
37 #include <linux/wait.h>
38 #include "gadget/f_mass_storage.h"
39 #include "implementation/global_implementation.h"
40 
41 #ifdef __cplusplus
42 #if __cplusplus
43 extern "C" {
44 #endif /* __cplusplus */
45 #endif /* __cplusplus */
46 
47 #undef USB_DEBUG_VAR
48 #define USB_DEBUG_VAR   g_fmass_debug
49 #ifdef LOSCFG_USB_DEBUG
50 static int g_fmass_debug = 0;
51 
fmass_debug_func(int level)52 void fmass_debug_func(int level)
53 {
54   g_fmass_debug = level;
55   PRINTK("The level of usb fmass debug is %d\n", level);
56 }
57 DEBUG_MODULE(fmass, fmass_debug_func);
58 #endif
59 
60 extern void __init_waitqueue_head(wait_queue_head_t *wait);
61 extern void __wake_up_interruptible(wait_queue_head_t *wait);
62 
63 int usbdev_mass_initialize(struct module *mod, int n, void *arg);
64 
65 static devclass_t g_fmass_devclass;
66 static struct mass_softc *g_fmass = NULL;
67 
68 static const driver_t g_fmass_driver =
69 {
70   .name    = "fmass",
71   .methods = NULL,
72   .size    = sizeof(struct mass_softc),
73 };
74 
75 DRIVER_MODULE(fmass, simple, g_fmass_driver, g_fmass_devclass, usbdev_mass_initialize, 0);
76 
77 static int usbclass_mass_bind(struct usbdevclass_driver_s *driver, struct usbdev_s *dev);
78 static int usbclass_mass_unbind(struct usbdevclass_driver_s *driver, struct usbdev_s *dev);
79 static int usbclass_mass_setup(struct usbdevclass_driver_s *driver, struct usbdev_s *dev,
80                                const struct usb_device_request *ctrl, uint8_t *dataout, size_t outlen);
81 static void usbclass_mass_disconnect(struct usbdevclass_driver_s *driver, struct usbdev_s *dev);
82 
83 /* USB driver operations */
84 
85 static const struct usbdevclass_driverops_s g_mass_driverops =
86 {
87   usbclass_mass_bind,
88   usbclass_mass_unbind,
89   usbclass_mass_setup,
90   usbclass_mass_disconnect,
91   NULL,
92   NULL
93 };
94 
95 static int fmass_thread_init(struct mass_dev_s *fmass, struct usbdev_s *dev);
96 static void fmass_main_thread_signal(struct mass_dev_s *fmass, fmass_task_state state);
97 static void fmass_task_change_state(struct mass_dev_s *fmass, fmass_task_state new_state);
98 
99 static int fmass_dev_open(struct mass_dev_s *fmass);
100 static int fmass_dev_close(struct mass_dev_s *fmass);
101 static void fmass_dev_capacity(struct mass_dev_s *fmass);
102 
103 static size_t fmass_dev_read(struct mass_dev_s *fmass,
104                              uint8_t *buffer,
105                              size_t stsector,
106                              uint32_t nsectors);
107 
108 static size_t fmass_dev_write(struct mass_dev_s *fmass,
109                               uint8_t *buffer,
110                               size_t stsector,
111                               uint32_t nsectors);
112 
113 static void fmass_report_usb_status(struct mass_dev_s *fmass);
114 static void fmass_notify_report(struct mass_dev_s *fmass, int status);
115 
116 #define fmass_wait_intr(fm, cond, tm)    \
117   wait_event_interruptible_timeout((((struct mass_dev_s *)fm)->xfer_wait), cond, tm)
118 
119 #define fmass_wakeup(fm)    \
120   wake_up_interruptible(&(((struct mass_dev_s *)fm)->xfer_wait))
121 
122 static const char *g_mass_device_type[] =
123 {
124   "/dev/mmcblk0p",
125   "/dev/mmcblk1p",
126   "/dev/uramp",
127 };
128 
129 struct fmass_notify_cb
130 {
131   struct fmass_notify notify[MAX_NOFIFY_NUM];
132   struct mtx notify_mtx;
133   bool init;
134 } g_notify = { .notify_mtx = PTHREAD_MUTEX_INITIALIZER, .init = false };
135 
fmass_notify_init(void)136 static inline void fmass_notify_init(void)
137 {
138   mtx_lock(&g_notify.notify_mtx);
139   if (g_notify.init == false)
140     {
141       g_notify.init = true;
142       (void)memset_s(g_notify.notify, sizeof(struct fmass_notify) * MAX_NOFIFY_NUM,
143                      0, sizeof(struct fmass_notify) * MAX_NOFIFY_NUM);
144     }
145   mtx_unlock(&g_notify.notify_mtx);
146 }
147 
148 #ifdef LOSCFG_DRIVERS_USB3_DEVICE_CONTROLLER
149 #define FMASS_MAX_PACKET_SIZE   0x0400
150 #else
151 #define FMASS_MAX_PACKET_SIZE   0x0200
152 #endif
153 
154 #define DEIVICE_VENDOR_ID     0x0525
155 #define DEIVICE_PRODUCT_ID    0xa4a5
156 #define DEIVICE_VERSION       0x0100
157 
158 static const struct usb_device_descriptor g_fmass_device_desc =
159 {
160   .bLength            = sizeof(struct usb_device_descriptor),
161   .bDescriptorType    = UDESC_DEVICE,
162   HSETW(.bcdUSB, UD_BCD_USB), /* USB version 0x0200 */
163   .bDeviceClass       = UDCLASS_IN_INTERFACE,
164   .bDeviceSubClass    = 0,
165   .bDeviceProtocol    = 0,
166   .bMaxPacketSize     = UD_USB_MPS,
167   HSETW(.idVendor, DEIVICE_VENDOR_ID),   /* vendor */
168   HSETW(.idProduct, DEIVICE_PRODUCT_ID), /* product */
169   HSETW(.bcdDevice, 0x0318),             /* device version */
170   .iManufacturer      = 0,
171   .iProduct           = 0,
172   .iSerialNumber      = 0,
173   .bNumConfigurations = 1,
174 };
175 
176 struct fmass_config_desc
177 {
178   struct usb_config_descriptor confd;
179   struct usb_interface_descriptor ifcd;
180   struct usb_endpoint_descriptor iepd;
181 #ifdef LOSCFG_DRIVERS_USB3_DEVICE_CONTROLLER
182   struct usb_endpoint_ss_comp_descriptor icompd;
183 #endif
184   struct usb_endpoint_descriptor oepd;
185 #ifdef LOSCFG_DRIVERS_USB3_DEVICE_CONTROLLER
186   struct usb_endpoint_ss_comp_descriptor ocompd;
187 #endif
188 } __packed;
189 
190 static struct fmass_config_desc g_fmass_confd =
191 {
192   .confd =
193     {
194       .bLength             = sizeof(struct usb_config_descriptor),
195       .bDescriptorType     = UDESC_CONFIG,
196       HSETW(.wTotalLength, sizeof(g_fmass_confd)),
197       .bNumInterface       = 1,
198       .bConfigurationValue = 1,
199       .iConfiguration      = 0,
200       .bmAttributes        = UC_SELF_POWERED | UC_BUS_POWERED,
201       .bMaxPower           = 1 /* max power */
202     },
203   .ifcd =
204     {
205       .bLength             = sizeof(struct usb_interface_descriptor),
206       .bDescriptorType     = UDESC_INTERFACE,
207       .bInterfaceNumber    = 0,
208       .bAlternateSetting   = 0,
209       .bNumEndpoints       = 2,
210       .bInterfaceClass     = UICLASS_MASS,
211       .bInterfaceSubClass  = UISUBCLASS_SCSI,
212       .bInterfaceProtocol  = UIPROTO_MASS_BULK,
213       .iInterface          = 1
214     },
215   .iepd =
216     {
217       .bLength             = sizeof(struct usb_endpoint_descriptor),
218       .bDescriptorType     = UDESC_ENDPOINT,
219       .bEndpointAddress    = UE_DIR_IN,
220       .bmAttributes        = UE_BULK,
221       HSETW(.wMaxPacketSize, FMASS_MAX_PACKET_SIZE),
222       .bInterval           = 0
223     },
224 #ifdef LOSCFG_DRIVERS_USB3_DEVICE_CONTROLLER
225   .icompd =
226     {
227       .bLength             = 6,
228       .bDescriptorType     = 0x30,
229       .bMaxBurst           = 0xf,
230       .bmAttributes        = 0,
231       .wBytesPerInterval   = {0}
232     },
233 #endif
234   .oepd =
235     {
236       .bLength             = sizeof(struct usb_endpoint_descriptor),
237       .bDescriptorType     = UDESC_ENDPOINT,
238       .bEndpointAddress    = UE_DIR_OUT,
239       .bmAttributes        = UE_BULK,
240       HSETW(.wMaxPacketSize, FMASS_MAX_PACKET_SIZE),
241       .bInterval           = 0
242     },
243 #ifdef LOSCFG_DRIVERS_USB3_DEVICE_CONTROLLER
244   .ocompd =
245     {
246       .bLength             = 6,
247       .bDescriptorType     = 0x30,
248       .bMaxBurst           = 0xf,
249       .bmAttributes        = 0,
250       .wBytesPerInterval   = {0}
251     },
252 #endif
253 };
254 
255 #define DT_STRING_ID_LEN   4
256 static const char g_dt_string_id[DT_STRING_ID_LEN] =
257 {
258   DT_STRING_ID_LEN,
259   UDESC_STRING,
260   0x09, 0x04,
261 };
262 
263 #define DT_STRING_VID_LEN   16
264 static const char g_dt_string_vid[DT_STRING_VID_LEN] =
265 {
266   DT_STRING_VID_LEN,
267   UDESC_STRING,
268   'D', 0, 'W', 0, 'C', 0, '-', 0, 'O', 0, 'T', 0, 'G', 0
269 };
270 
271 #define DT_STRING_PID_LEN   16
272 static const char g_dt_string_pid[DT_STRING_PID_LEN] =
273 {
274   DT_STRING_PID_LEN,
275   UDESC_STRING,
276   'M', 0, 8, 0, '-', 0, 'C', 0, 'H', 0, 'I', 0, 'P', 0
277 
278 };
279 
280 #define DT_STRING_SERIAL_LEN   18
281 static const char g_dt_string_serial[DT_STRING_SERIAL_LEN] =
282 {
283   DT_STRING_SERIAL_LEN,
284   UDESC_STRING,
285   '2', 0, '0', 0, '1', 0, '5', 0, '0', 0, '7', 0, '3', 0, '0', 0
286 };
287 
288 static char g_dt_string_buf[40] =
289 {
290   0x28, 3, 0x4d, 0, 0x61, 0, 0x73, 0, 0x73, 0,
291   0x20, 0, 0x53, 0, 0x74, 0, 0x6f, 0, 0x72, 0,
292   0x61, 0, 0x67, 0, 0x65, 0, 0x20, 0, 0x47, 0,
293   0x61, 0, 0x64, 0, 0x67, 0, 0x65, 0, 0x74, 0
294 };
295 
taskstate_match(struct mass_dev_s * fmass,fmass_task_state tarstate)296 inline static bool taskstate_match(struct mass_dev_s *fmass, fmass_task_state tarstate)
297 {
298   return fmass->task_state & tarstate;
299 }
300 
taskstate_unmatch(struct mass_dev_s * fmass,fmass_task_state tarstate)301 inline static bool taskstate_unmatch(struct mass_dev_s *fmass, fmass_task_state tarstate)
302 {
303   return (!(fmass->task_state & tarstate));
304 }
305 
fmass_device_status(struct mass_dev_s * fmass)306 static int fmass_device_status(struct mass_dev_s *fmass)
307 {
308   return fmass->dev_status;
309 }
310 
fmass_handle_bulkin_callback(struct usbdev_ep_s * ep,struct usbdev_req_s * req)311 static void fmass_handle_bulkin_callback(struct usbdev_ep_s *ep,
312                                          struct usbdev_req_s *req)
313 {
314   struct mass_dev_s *fmass = (struct mass_dev_s *)(ep->priv);
315   if (taskstate_match(fmass, FMASS_TASK_REQ_DATA_PHASE))
316     {
317       fmass_task_change_state(fmass, FMASS_TASK_DONE_DATA_PHASE);
318     }
319   else if (taskstate_match(fmass, FMASS_TASK_REQ_STATUS_PHASE))
320     {
321       fmass_task_change_state(fmass, FMASS_TASK_DONE_STATUS_PHASE);
322     }
323   fmass_wakeup(fmass);
324 }
325 
fmass_handle_bulkout_callback(struct usbdev_ep_s * ep,struct usbdev_req_s * req)326 static void fmass_handle_bulkout_callback(struct usbdev_ep_s *ep,
327                                           struct usbdev_req_s *req)
328 {
329   struct mass_dev_s *fmass = (struct mass_dev_s *)(ep->priv);
330   if (taskstate_match(fmass, FMASS_TASK_REQ_COMMAND_PHASE))
331     {
332       fmass_task_change_state(fmass, FMASS_TASK_DONE_COMMAND_PHASE);
333     }
334   else if (taskstate_match(fmass, FMASS_TASK_REQ_DATA_PHASE))
335     {
336       fmass_task_change_state(fmass, FMASS_TASK_DONE_DATA_PHASE);
337     }
338   fmass_wakeup(fmass);
339 }
340 
fmass_bulkin_request(struct mass_dev_s * fmass,struct usbdev_req_s * req)341 static int fmass_bulkin_request(struct mass_dev_s *fmass,
342                                 struct usbdev_req_s *req)
343 {
344   struct usbdev_ep_s *ep = fmass->bulkin;
345   req->callback = fmass_handle_bulkin_callback;
346   return EP_SUBMIT(ep, req);
347 }
348 
fmass_bulkout_request(struct mass_dev_s * fmass,struct usbdev_req_s * req)349 static int fmass_bulkout_request(struct mass_dev_s *fmass,
350                                  struct usbdev_req_s *req)
351 {
352   struct usbdev_ep_s *ep = fmass->bulkout;
353   req->callback = fmass_handle_bulkout_callback;
354   return EP_SUBMIT(ep, req);
355 }
356 
report_scsi_command_error(struct mass_dev_s * fmass,uint32_t sense_data)357 static void report_scsi_command_error(struct mass_dev_s *fmass, uint32_t sense_data)
358 {
359   fmass->sense_data      = sense_data;
360   fmass->sense_data_info = 0;
361   fmass->info_valid      = 1;
362   fmass->csw.bCSWStatus  = BULK_CSW_STAT_FAIL;
363 }
364 
done_scsi_test_unit_ready(struct mass_dev_s * fmass)365 int done_scsi_test_unit_ready(struct mass_dev_s *fmass)
366 {
367   if (fmass_device_status(fmass))
368     {
369       if (fmass->nluns == 0)
370         {
371           (void)fmass_dev_open(fmass);
372           fmass_dev_capacity(fmass);
373           fmass->sense_data = SCSI_NOT_READY_TO_READY_TRANSITION;
374           fmass->csw.bCSWStatus = BULK_CSW_STAT_FAIL;
375           DPRINTFN(1, "this unint is not ready\n");
376         }
377     }
378   else
379     {
380       fmass->sense_data = SCSI_MEDIUM_NOT_PRESENT;
381       fmass->csw.bCSWStatus = BULK_CSW_STAT_FAIL;
382     }
383   return 0;
384 }
385 
done_scsi_request_sense(struct mass_dev_s * fmass)386 int done_scsi_request_sense(struct mass_dev_s *fmass)
387 {
388   uint8_t *buf = (uint8_t *)fmass->bulkreq.buf;
389   uint32_t sd, sdinfo;
390   uint32_t valid;
391   int ret;
392 
393   ret = memset_s(buf, (MAX_BLOCK_RW_SECTORS << 9), 0, 18);
394   if (ret != EOK)
395     {
396       return 0;
397     }
398 
399   if (!fmass->nluns && fmass_device_status(fmass)) /* Unsupported LUNs are okay */
400     {
401       sd     = SCSI_LOGICAL_UNIT_NOT_SUPPORTED;
402       sdinfo = 0;
403       valid  = 0;
404     }
405   else
406     {
407       sd     = fmass->sense_data;
408       sdinfo = fmass->sense_data_info;
409       valid  = fmass->info_valid << 7;
410       fmass->sense_data      = SCSI_NO_SENSE;
411       fmass->sense_data_info = 0;
412       fmass->info_valid      = 0;
413     }
414 
415   buf[0] = valid | 0x70; /* Valid, current error */
416   buf[2] = SK(sd);
417   put_unaligned_be32((&buf[3]), sdinfo); /* Sense information */
418   buf[7]  = 10;  /* Additional sense length: 18 - 8 */
419   buf[12] = ASC(sd);
420   buf[13] = ASCQ(sd);
421   return 18;
422 }
423 
done_scsi_mode_select(struct mass_dev_s * fmass)424 int done_scsi_mode_select(struct mass_dev_s *fmass)
425 {
426   (void)fmass;
427   return 0;
428 }
429 
done_scsi_mode_sense(struct mass_dev_s * fmass)430 int done_scsi_mode_sense(struct mass_dev_s *fmass)
431 {
432   uint8_t *buf = (uint8_t *)fmass->bulkreq.buf;
433   int len;
434 
435   buf[0] = 0x03;  /* mode data length */
436   buf[1] = 0x00;
437   buf[2] = 0x00;
438   buf[3] = 0x00;
439   len    = 4;
440   fmass->residue = len;
441   return len;
442 }
443 
done_scsi_read_format_capacities(struct mass_dev_s * fmass)444 int done_scsi_read_format_capacities(struct mass_dev_s *fmass)
445 {
446   uint8_t *buf = (uint8_t *)fmass->bulkreq.buf;
447 
448   buf[0] = 0;
449   buf[1] = 0;
450   buf[2] = 0;
451   buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
452   buf   += 4;
453 
454   put_unaligned_be32((&buf[0]), fmass->caps[fmass->lun].nsectors);
455   put_unaligned_be32((&buf[4]), fmass->caps[fmass->lun].sectorsize);    /* Block length */
456   buf[4] = 0x02; /* Current capacity */
457 
458   return 12;
459 }
460 
done_scsi_read_capacity(struct mass_dev_s * fmass)461 int done_scsi_read_capacity(struct mass_dev_s *fmass)
462 {
463   uint8_t *buf = (uint8_t *)fmass->bulkreq.buf;
464   uint32_t sectors;
465   uint32_t blksizes;
466 
467   if (!fmass->nluns)
468     {
469       (void)fmass_dev_open(fmass);
470       fmass_dev_capacity(fmass);
471       fmass->sense_data = SCSI_NOT_READY_TO_READY_TRANSITION;
472       fmass->csw.bCSWStatus = BULK_CSW_STAT_FAIL;
473       DPRINTFN(0, "fmass->fileNode is not exist\n");
474       return 0;
475     }
476 
477   sectors  = fmass->caps[fmass->lun].nsectors - 1;
478   blksizes = fmass->caps[fmass->lun].sectorsize;
479 
480   put_unaligned_be32(buf, sectors);
481   put_unaligned_be32((buf + 4), blksizes);
482 
483   return 8;
484 }
485 
done_scsi_read(struct mass_dev_s * fmass)486 int done_scsi_read(struct mass_dev_s *fmass)
487 {
488   uint8_t *buf = (uint8_t *)fmass->bulkreq.buf;
489   uint32_t lba;
490   uint32_t amount_left;
491   uint32_t amount;
492   uint32_t nreadsector;
493   uintptr_t num_sectors = fmass->caps[fmass->lun].nsectors;
494   struct fmass_data_buf_t *db;
495   uint32_t doread ;
496 
497   if (fmass->cmd[0] == SCSI_READ_6)
498     {
499       lba = ((((uint32_t)fmass->cmd[1]) << 16) |
500              get_unaligned_be16(&fmass->cmd[2]));
501     }
502   else
503     {
504       lba = get_unaligned_be32(&fmass->cmd[2]);
505 
506       /*
507        * We allow DPO (Disable Page Out = don't save data in the
508        * cache) and FUA (Force Unit Access = write directly to the
509        * medium).  We don't implement DPO; we implement FUA by
510        * performing synchronous output.
511        */
512 
513       if ((fmass->cmd[1] & ~0x18) != 0)
514         {
515           report_scsi_command_error(fmass, SCSI_INVALID_FIELD_IN_CDB);
516           return -EIO;
517         }
518     }
519 
520   if (lba >= num_sectors)
521     {
522       report_scsi_command_error(fmass, SCSI_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE);
523       return -1;
524     }
525 
526   if (unlikely(fmass->data_size_from_cmd == 0))
527     {
528       report_scsi_command_error(fmass, SCSI_UNRECOVERED_READ_ERROR);
529       return -EIO;
530     }
531 
532   nreadsector = fmass->data_size_from_cmd;
533 
534   amount_left = nreadsector;
535 
536   fmass->bulkreq.is_complete = 1;
537 
538   db = fmass->databuf_fill;
539 
540   for (; ;)
541     {
542       uint32_t data_size;
543 
544       amount = min(MAX_BLOCK_RW_SECTORS, amount_left);
545       db = fmass->databuf_fill;
546       if (!fmass_wait_intr(fmass, ((fmass->bulkreq.is_complete != 0) ||
547           taskstate_match(fmass, FMASS_TASK_DISCONNECT)), (2 * HZ)))
548         {
549           fmass->bulkreq.is_complete = 1;
550           report_scsi_command_error(fmass, SCSI_UNRECOVERED_READ_ERROR);
551           return -EIO;
552         }
553 
554       if (taskstate_match(fmass, FMASS_TASK_DISCONNECT))
555         {
556           report_scsi_command_error(fmass, SCSI_UNRECOVERED_READ_ERROR);
557           return -EIO;
558         }
559 
560       buf = (uint8_t *)db->buf;
561       doread = fmass_dev_read(fmass, buf, lba, amount);
562       data_size = doread << 9;
563       lba += doread;
564       amount_left -= doread;
565       fmass->residue -= data_size;    /* doread * 512 */
566       fmass->bulkreq.len = data_size;
567       fmass->bulkreq.buf = buf;
568       DPRINTFN(1, "read: db= 0x%x buf= 0x%x lba= %x doread= %u  nread= %u  amount_left = %u   residue= %u\n",
569                db, buf, lba - doread , doread, amount, amount_left, fmass->residue);
570 
571       if (doread < amount)
572         {
573           report_scsi_command_error(fmass, SCSI_UNRECOVERED_READ_ERROR);
574           break;
575         }
576 
577       if (amount_left == 0)
578         {
579           break;
580         }
581 
582       (void)fmass_bulkin_request(fmass, &fmass->bulkreq);
583       fmass->databuf_fill = db->next;
584     }
585   fmass->bulkreq.is_complete = 1;
586   return -EIO;
587 }
588 
done_scsi_write(struct mass_dev_s * fmass)589 int done_scsi_write(struct mass_dev_s *fmass)
590 {
591   uint8_t *buf = (uint8_t *)fmass->bulkreq.buf;
592   uint32_t lba;
593   uint32_t idx;
594   uint32_t amount, amount_left;
595   uint32_t nwritesector;
596   uintptr_t num_sectors = fmass->caps[fmass->lun].nsectors;
597   uint32_t total_do_write;
598   uint32_t do_write = 0;
599 
600   if (fmass->caps[fmass->lun].read_only)
601     {
602       report_scsi_command_error(fmass,SCSI_WRITE_PROTECTED);
603       return -EINVAL;
604     }
605 
606   if (fmass->cmd[0] == SCSI_WRITE_6)
607     {
608       lba = ((((uint32_t)fmass->cmd[1]) << 16) |
609              get_unaligned_be16(&fmass->cmd[2]));
610     }
611   else
612     {
613       lba = get_unaligned_be32(&fmass->cmd[2]);
614 
615       /*
616        * We allow DPO (Disable Page Out = don't save data in the
617        * cache) and FUA (Force Unit Access = write directly to the
618        * medium).  We don't implement DPO; we implement FUA by
619        * performing synchronous output.
620        */
621 
622       if ((fmass->cmd[1] & ~0x18) != 0)
623         {
624           report_scsi_command_error(fmass, SCSI_INVALID_FIELD_IN_CDB);
625           return -EIO;
626         }
627       if (fmass->cmd[1] & 0x08)
628         {
629           /* FUA */
630           /* XXX set SYNC flag here */
631 
632         }
633     }
634 
635   if (lba >= num_sectors)
636     {
637       report_scsi_command_error(fmass, SCSI_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE);
638       return -1;
639     }
640 
641   if (unlikely(fmass->data_size_from_cmd == 0))
642     {
643       return -EIO; /* No default reply */
644     }
645 
646   fmass_task_change_state(fmass, FMASS_TASK_REQ_DATA_PHASE);
647   DPRINTFN(1, "------ Direct Data Phase ------\n");
648   fmass->bulkreq.len = fmass->data_size;
649   (void)fmass_bulkout_request(fmass, &fmass->bulkreq);
650 
651   if (!fmass_wait_intr(fmass, (taskstate_match(fmass, FMASS_TASK_DONE_DATA_PHASE) ||
652       taskstate_match(fmass, FMASS_TASK_DISCONNECT)), (5*HZ)))
653     {
654       DPRINTFN(0, "error wait intr\n");
655       return -EIO;
656     }
657 
658   if (taskstate_unmatch(fmass, FMASS_TASK_DONE_DATA_PHASE))
659     {
660       DPRINTFN(0, "invalid task state : %u, unfinish transfer Data packet\n", fmass->task_state);
661       return -EIO;
662     }
663 
664   nwritesector = fmass->bulkreq.xfrd >> 9;  /* bytes -> sectors */
665   nwritesector = min(nwritesector, fmass->data_size_from_cmd);
666 
667   amount = nwritesector / MAX_BLOCK_RW_SECTORS;
668   amount_left = nwritesector % MAX_BLOCK_RW_SECTORS;
669 
670   total_do_write = 0;
671   for (idx = 0; idx < amount ; idx++)
672     {
673       do_write = fmass_dev_write(fmass, buf, lba, MAX_BLOCK_RW_SECTORS);
674 
675       lba += MAX_BLOCK_RW_SECTORS;
676       buf += (MAX_BLOCK_RW_SECTORS << 9);
677       fmass->residue -= do_write << 9; /* doread * 512 */
678       total_do_write += do_write;
679     }
680 
681   if (amount == 0 || do_write > 0)
682     {
683       do_write = fmass_dev_write(fmass, buf, lba, amount_left);
684 
685       if (do_write > 0)
686         {
687           fmass->residue -= do_write << 9; /* doread * 512 */
688           total_do_write += do_write;
689         }
690     }
691 
692   if (total_do_write < nwritesector)
693     {
694       fmass->sense_data = SCSI_UNRECOVERED_READ_ERROR;
695       fmass->csw.bCSWStatus = BULK_CSW_STAT_FAIL;
696     }
697 
698   fmass->data_size = 0;
699   return -EIO;
700 }
701 
done_scsi_inquiry(struct mass_dev_s * fmass)702 int done_scsi_inquiry(struct mass_dev_s *fmass)
703 {
704   errno_t ret;
705   uint8_t *buf = (uint8_t *)fmass->bulkreq.buf;
706   buf[0] = 0;        /* Peripheral Qualifier && Device Type */
707   buf[1] = 0x80;     /* Removable Medium */
708   buf[2] = 0;        /* ANSI SCSI level 2 */
709   buf[3] = 1;        /* SCSI-2 INQUIRY data format */
710   buf[4] = 31;       /* Additional length */
711   buf[5] = 0;        /* SCCS SCC Support: 0 */
712   buf[6] = 0;
713   buf[7] = 0;
714 
715   ret = memcpy_s(buf + 8, ((MAX_BLOCK_RW_SECTORS << 9) - 8), fmass->inquiry_str, sizeof(fmass->inquiry_str));
716   if (ret != EOK)
717     {
718       return -EIO;
719     }
720 
721   fmass->bulkreq.len = 36;
722   fmass->residue     = 0;
723   return -EIO;
724 }
725 
done_scsi_allow_medium_removal(struct mass_dev_s * fmass)726 int done_scsi_allow_medium_removal(struct mass_dev_s *fmass)
727 {
728   fmass->sense_data = SCSI_INVALID_COM;
729   fmass->csw.bCSWStatus = BULK_CSW_STAT_FAIL;
730   return 0;
731 }
732 
fmass_check_receive_cbw(struct mass_dev_s * fmass)733 static int fmass_check_receive_cbw(struct mass_dev_s *fmass)
734 {
735   struct bulk_cbw *cbw = (struct bulk_cbw *)fmass->bulkreq.buf;
736   errno_t ret;
737 
738   DPRINTFN(1, "\t -----Received CBW packet -----dCBWTag: 0x%x  bCBWLUN:%u\n", cbw->dCBWTag, cbw->bCBWLUN);
739 
740   /* Check the CBW is validid */
741 
742   if (fmass->bulkreq.xfrd != BULK_CBW_WRAP_LEN ||
743       cbw->dCBWSignature != BULK_CBW_SIGN)
744     {
745       DPRINTFN(0, "invalid CBW: len %u sig 0x%x ,cvt_sig 0x%x  lct_sig 0x%x\n",
746                fmass->bulkreq.xfrd, cbw->dCBWSignature, cbw->dCBWSignature, BULK_CBW_SIGN);
747       return -1;
748     }
749 
750   /* Check the CBW is meaningful */
751 
752   if (cbw->bCBWLUN >= FMASS_MAX_LUNS || cbw->bmCBWFlags & ~BULK_CBW_FLAG_IN ||
753       cbw->bCBWLEN == 0 || cbw->bCBWLEN > MAX_COMMAND_SIZE)
754     {
755       DPRINTFN(0, "non-meaningful CBW: lun = %u, flags = 0x%x, cmdlen = %u\n",
756                cbw->bCBWLUN, cbw->bmCBWFlags, cbw->bCBWLEN);
757       return -1;
758     }
759 
760   /* Save the command for later */
761 
762   fmass->cmd_size = cbw->bCBWLEN;
763   ret = memcpy_s(fmass->cmd, MAX_COMMAND_SIZE, cbw->CDB, fmass->cmd_size);
764   if (ret != EOK)
765     {
766       DPRINTFN(0, "memcpy_s failed, %d\n", ret);
767       return -1;
768     }
769   if (cbw->bmCBWFlags & BULK_CBW_FLAG_IN)
770     {
771       fmass->data_dir = DATA_DIRECT_TO_HOST;
772     }
773   else
774     {
775       fmass->data_dir = DATA_DIRECT_FROM_HOST;
776     }
777   fmass->data_size = cbw->dCBWDataTransferLength;
778   if (fmass->data_size == 0)
779     {
780       fmass->data_dir = DATA_DIRECT_NONE;
781     }
782 
783   fmass->tag = cbw->dCBWTag;
784 
785   if (fmass->data_size < fmass->data_size_from_cmd)
786     {
787       fmass->data_size_from_cmd = fmass->data_size;
788     }
789   fmass->residue = fmass->data_size;
790 
791   return 0;
792 }
793 
fmass_do_scsi_command(struct mass_dev_s * fmass)794 static void fmass_do_scsi_command(struct mass_dev_s *fmass)
795 {
796   int i;
797   int reply = -1;
798   uint32_t reply_temp;
799 
800   uint8_t cmd = fmass->cmd[0];
801 
802   switch (cmd)
803     {
804     case SCSI_TEST_UNIT_READY:
805       fmass->data_size_from_cmd = 0;
806       reply = done_scsi_test_unit_ready(fmass);
807       break;
808 
809     case SCSI_REQUEST_SENSE:
810       fmass->data_size_from_cmd = fmass->cmd[4];
811       reply = done_scsi_request_sense(fmass);
812       break;
813 
814     case SCSI_INQUIRY:
815       fmass->data_size_from_cmd = fmass->cmd[4];
816       reply = done_scsi_inquiry(fmass);
817       break;
818 
819     case SCSI_MODE_SELECT_6:
820       fmass->data_size_from_cmd = fmass->cmd[4];
821       reply = done_scsi_mode_select(fmass);
822       break;
823 
824     case SCSI_MODE_SELECT_10:
825       fmass->data_size_from_cmd = get_unaligned_be16(&fmass->cmd[7]);
826       reply = done_scsi_mode_select(fmass);
827       break;
828 
829     case SCSI_MODE_SENSE_6:
830       fmass->data_size_from_cmd = fmass->cmd[4];
831       reply = done_scsi_mode_sense(fmass);
832       break;
833 
834     case SCSI_MODE_SENSE_10:
835       fmass->data_size_from_cmd = get_unaligned_be16(&fmass->cmd[7]);
836       reply = done_scsi_mode_sense(fmass);
837       break;
838 
839     case SCSI_READ_FORMAT_CAPACITIES:
840       fmass->data_size_from_cmd = get_unaligned_be16(&fmass->cmd[7]);
841       reply = done_scsi_read_format_capacities(fmass);
842       break;
843 
844     case SCSI_READ_CAPACITY:
845       fmass->data_size_from_cmd = 8;
846       if (fmass_device_status(fmass))
847         {
848           reply = done_scsi_read_capacity(fmass);
849         }
850       else
851         {
852           reply = 0;
853         }
854       break;
855 
856     case SCSI_READ_6:
857       i = fmass->cmd[4];
858       fmass->data_size_from_cmd = (i == 0) ? 256 : i;
859       reply = done_scsi_read(fmass);
860       break;
861 
862     case SCSI_READ_10:
863       fmass->data_size_from_cmd = get_unaligned_be16(&fmass->cmd[7]);
864       reply = done_scsi_read(fmass);
865       break;
866 
867     case SCSI_READ_12:
868       fmass->data_size_from_cmd = get_unaligned_be32(&fmass->cmd[6]);
869       reply = done_scsi_read(fmass);
870       break;
871 
872     case SCSI_WRITE_6:
873       i = fmass->cmd[4];
874       fmass->data_size_from_cmd = (i == 0) ? 256 : i;
875       reply = done_scsi_write(fmass);
876       break;
877 
878     case SCSI_WRITE_10:
879       fmass->data_size_from_cmd = get_unaligned_be16(&fmass->cmd[7]);
880       reply = done_scsi_write(fmass);
881       break;
882 
883     case SCSI_WRITE_12:
884       fmass->data_size_from_cmd = get_unaligned_be32(&fmass->cmd[6]);
885       reply = done_scsi_write(fmass);
886       break;
887 
888     case SCSI_ALLOW_MEDIUM_REMOVAL:
889       fmass->data_size_from_cmd = 0;
890       reply = done_scsi_allow_medium_removal(fmass);
891       break;
892 
893     case START_STOP:
894       fmass->data_size_from_cmd = 0;
895       reply = 0;
896       fmass->dev_status = DEV_ST_DISCONNECT;
897       (void)fmass_dev_close(fmass);
898       break;
899 
900     default:
901       DPRINTFN(0, "********[invalid SCSI command! %x]***********\n", cmd);
902       fmass->data_size_from_cmd = 0;
903       report_scsi_command_error(fmass, SCSI_INVALID_COMMAND);
904       break;
905     }
906 
907   if (reply >= 0 && fmass->data_dir == DATA_DIRECT_TO_HOST)
908     {
909       fmass->bulkreq.len = reply;
910       reply_temp = min((uint32_t)reply, fmass->data_size_from_cmd);
911       fmass->residue -= reply_temp;
912     }
913  }
914 
fmass_handle_scsi_data(struct mass_dev_s * fmass)915 void fmass_handle_scsi_data(struct mass_dev_s *fmass)
916 {
917   int is_data_phase = 0;
918 
919   DPRINTFN(1, "\t-----REQ DATA PHASE-----\n");
920   switch (fmass->data_dir)
921     {
922     case DATA_DIRECT_NONE:
923       break;    /* Nothing to send */
924 
925     case DATA_DIRECT_UNKNOWN:
926       break;
927 
928     /* All but the last buffer of data must have already been sent */
929 
930     case DATA_DIRECT_TO_HOST:
931       if (fmass->data_size == 0)
932         {
933           /* do nothing */
934 
935         }
936       else if (fmass->residue == 0)
937         {
938           (void)fmass_bulkin_request(fmass, &fmass->bulkreq);
939         }
940       else
941         {
942           (void)fmass_bulkin_request(fmass, &fmass->bulkreq);
943           if (!fmass_wait_intr(fmass, taskstate_unmatch(fmass, FMASS_TASK_REQ_DATA_PHASE), (2*HZ)))
944             {
945               DPRINTFN(0, "--- [Data Phase] timeout! state: %u  residue : %u ---\n",
946                        fmass->task_state, fmass->residue);
947               if (taskstate_match(fmass, FMASS_TASK_REQ_DATA_PHASE))
948                 {
949                   fmass_task_change_state(fmass, FMASS_TASK_IDLE);
950                 }
951               return ;
952             }
953 
954           (void)EP_RESUME(fmass->bulkin);
955           fmass_task_change_state(fmass, FMASS_TASK_REQ_STATUS_PHASE);
956           return ;
957         }
958       is_data_phase = 1;
959       break;
960 
961     case DATA_DIRECT_FROM_HOST:
962       break;
963     }
964 
965   if (is_data_phase)
966     {
967       /* wait for the data transfer done */
968 
969       if (!fmass_wait_intr(fmass, taskstate_unmatch(fmass, FMASS_TASK_REQ_DATA_PHASE), (5*HZ)))
970         {
971           DPRINTFN(0, "--- [Data Phase] timeout! state: %u ---\n", fmass->task_state);
972           if (taskstate_match(fmass, FMASS_TASK_REQ_DATA_PHASE))
973             {
974               fmass_task_change_state(fmass, FMASS_TASK_IDLE);
975             }
976           return ;
977         }
978       if (taskstate_unmatch(fmass, FMASS_TASK_DONE_DATA_PHASE))
979         {
980           DPRINTFN(0, "--- [Data Phase] unmatch! unexpect state: %u ---\n", fmass->task_state);
981           return ;
982         }
983       fmass_task_change_state(fmass, FMASS_TASK_REQ_STATUS_PHASE);
984     }
985   else
986     {
987       fmass_task_change_state(fmass, FMASS_TASK_REQ_STATUS_PHASE);
988     }
989 }
990 
fmass_set_scsi_status(struct mass_dev_s * fmass)991 void fmass_set_scsi_status(struct mass_dev_s *fmass)
992 {
993   struct bulk_csw *csw = (struct bulk_csw *)(fmass->bulkreq.buf);
994   uint8_t status       = fmass->csw.bCSWStatus;
995 
996   DPRINTFN(1, "\t -----Response CSW packet -------dCBWTag:0x%x   residue: %u   status: %u\n",
997            fmass->tag, fmass->residue, status);
998 
999   /* Store and send the Bulk-only CSW */
1000 
1001   csw->dCSWSignature = BULK_CSW_SIGN;
1002   csw->dCSWTag       = fmass->tag;
1003   csw->dCSWResidue   = fmass->residue;
1004   csw->bCSWStatus    = status;
1005 
1006   /* submit csw packet to controller */
1007 
1008   fmass->bulkreq.len = BULK_CSW_WRAP_LEN;
1009   (void)fmass_bulkin_request(fmass, &fmass->bulkreq);
1010 
1011   /* wait csw packet transfer finish */
1012 
1013   if (!fmass_wait_intr(fmass, taskstate_unmatch(fmass, FMASS_TASK_REQ_STATUS_PHASE), (5 * HZ)))
1014     {
1015       DPRINTFN(0, "--- [CSW Phase] timeout! state: %u ---\n", fmass->task_state);
1016       if (taskstate_match(fmass, FMASS_TASK_REQ_STATUS_PHASE))
1017         {
1018           fmass_task_change_state(fmass, FMASS_TASK_IDLE);
1019         }
1020       return ;
1021     }
1022 
1023   if (taskstate_unmatch(fmass, FMASS_TASK_DONE_STATUS_PHASE))
1024     {
1025       DPRINTFN(0, "--- [CSW Phase] unmatch! unexpect state: %u ---\n", fmass->task_state);
1026       return ;
1027     }
1028 
1029   DPRINTFN(1, "\t -----CSW Transfer Finish -----\n");
1030   fmass_task_change_state(fmass, FMASS_TASK_REQ_COMMAND_PHASE);
1031   fmass->csw.bCSWStatus = BULK_CSW_STAT_OK;
1032 }
1033 
fmass_parts_registered(struct mass_dev_s * fmass,const char * dev_path)1034 static void fmass_parts_registered(struct mass_dev_s *fmass, const char *dev_path)
1035 {
1036 #define NODE_NAME_LEN   0x10
1037   char node_name[NODE_NAME_LEN];
1038   struct inode *inode;
1039   los_part *part;
1040   int i;
1041 
1042   for (i = 0; i < MAX_FILE_STORAGE_LUNS; i++)
1043     {
1044       inode = NULL;
1045       (void)snprintf_s(node_name, sizeof(node_name), sizeof(node_name) - 1, "%s%0d", dev_path, i);
1046       (void)open_blockdriver(node_name, O_RDWR, &inode);
1047       if (inode == NULL)
1048         {
1049           return;
1050         }
1051       part = los_part_find(inode);
1052       if (part == NULL)
1053         {
1054           (void)close_blockdriver(inode);
1055           continue;
1056         }
1057 
1058       if (fmass->nluns == MAX_FILE_STORAGE_LUNS)
1059         {
1060           break;
1061         }
1062 
1063       fmass->fileNode[fmass->nluns] = inode;
1064       fmass->parts[fmass->nluns]    = part;
1065       fmass->nluns++;
1066     }
1067 
1068   return;
1069 }
1070 
fmass_dev_open(struct mass_dev_s * fmass)1071 static int fmass_dev_open(struct mass_dev_s *fmass)
1072 {
1073   uint32_t i;
1074 
1075   for (i = 0; i < sizeof(g_mass_device_type) / sizeof(g_mass_device_type[0]); i++)
1076     {
1077       fmass_parts_registered(fmass, g_mass_device_type[i]);
1078     }
1079 
1080   if (fmass->nluns == 0)
1081     {
1082       usb_err("device inode is unavailable!\n");
1083       return -1;
1084     }
1085 
1086   for (i = 0 ; i < MAX_DATA_BUFFER_NUM; i++)
1087     {
1088       struct fmass_data_buf_t *db = &fmass->databuf[i];
1089 
1090       if (db->buf != NULL)
1091         {
1092           continue;
1093         }
1094 
1095       db->buf = memalign(64, SKB_DATA_ALIGN(MAX_BLOCK_RW_SECTORS << 9));
1096       if (db->buf == NULL)
1097         {
1098             return -1;
1099         }
1100       db->state = DBUF_STATE_EMPTY;
1101       db->next  = db + 1;
1102     }
1103   fmass->databuf[i - 1].next = &fmass->databuf[0];
1104 
1105   fmass->dev_status = DEV_ST_CONNECTTED;
1106   fmass_notify_report(fmass, fmass->dev_status);
1107 
1108   return 0;
1109 }
1110 
fmass_dev_close(struct mass_dev_s * fmass)1111 static int fmass_dev_close(struct mass_dev_s *fmass)
1112 {
1113   uint32_t i;
1114   FAR struct inode *inode;
1115 
1116   if (fmass->nluns == 0)
1117     {
1118       return -1;
1119     }
1120 
1121   for (i = 0 ; i < fmass->nluns ; i++)
1122     {
1123       inode = fmass->fileNode[i];
1124       (void)close_blockdriver(inode);
1125       fmass->fileNode[i] = NULL;
1126       fmass->parts[i]    = NULL;
1127     }
1128 
1129   for (i = 0 ; i < MAX_DATA_BUFFER_NUM; i++)
1130     {
1131       free(fmass->databuf[i].buf);
1132       fmass->databuf[i].buf = NULL;
1133     }
1134   fmass->nluns = 0;
1135   return 0;
1136 }
1137 
fmass_dev_capacity(struct mass_dev_s * fmass)1138 static void fmass_dev_capacity(struct mass_dev_s *fmass)
1139 {
1140   uint32_t i;
1141   los_part *part;
1142   struct fmass_capacity *cap;
1143 
1144   for (i = 0; i < fmass->nluns; i++)
1145     {
1146       part = fmass->parts[i];
1147       cap  = &fmass->caps[i];
1148       cap->sectorsize = 0;
1149       cap->nsectors   = 0;
1150 
1151       (void)los_part_ioctl(part->part_id, GET_SECTOR_COUNT, &cap->nsectors);
1152       cap->nsectors += part->sector_start;
1153       (void)los_part_ioctl(part->part_id, GET_SECTOR_SIZE, &cap->sectorsize);
1154       PRINTK("*** %d  %d Bytes/Sector, Total %d Sectors ***\n", i, (int)cap->sectorsize, (int)cap->nsectors);
1155     }
1156 }
1157 
fmass_dev_read(struct mass_dev_s * fmass,uint8_t * buffer,size_t stsector,uint32_t nsectors)1158 static size_t fmass_dev_read(struct mass_dev_s *fmass,
1159                              uint8_t *buffer,
1160                              size_t stsector,
1161                              uint32_t nsectors)
1162 {
1163   size_t rsectors = 0;
1164   los_part *part  = fmass->parts[fmass->lun];
1165 
1166   if (part != NULL)
1167     {
1168       int ret = los_part_read(part->part_id, buffer, stsector, nsectors);
1169       if (ret == 0)
1170         {
1171           rsectors = nsectors;
1172         }
1173     }
1174   return rsectors;
1175 }
1176 
fmass_dev_write(struct mass_dev_s * fmass,uint8_t * buffer,size_t stsector,uint32_t nsectors)1177 static size_t fmass_dev_write(struct mass_dev_s *fmass,
1178                               uint8_t *buffer,
1179                               size_t stsector,
1180                               uint32_t nsectors)
1181 {
1182   size_t wsectors = 0;
1183   los_part *part  = fmass->parts[fmass->lun];
1184 
1185   if (part != NULL)
1186     {
1187       int ret = los_part_write(part->part_id, buffer, stsector, nsectors);
1188       if (ret == 0)
1189         {
1190           wsectors = nsectors;
1191         }
1192     }
1193   return wsectors;
1194 }
1195 
fmass_set_config(struct mass_dev_s * fmass,struct usbdev_s * dev)1196 void fmass_set_config(struct mass_dev_s *fmass, struct usbdev_s *dev)
1197 {
1198   struct usbdev_ep_s *ep0;
1199   struct usbdev_req_s *req;
1200 
1201   if (dev->ep0 == NULL || dev->ep0->handle_req == NULL)
1202     {
1203       usb_err("set config fail!\n");
1204       return;
1205     }
1206   ep0 = dev->ep0;
1207   req = ep0->handle_req;
1208 
1209   if (fmass->nluns == 0)
1210     {
1211       if (fmass_dev_open(fmass) == 0)
1212         {
1213           fmass_dev_capacity(fmass);
1214         }
1215       else
1216         {
1217           usb_err("can not found inode!\n");
1218           return;
1219         }
1220     }
1221 
1222   if (fmass->bulk_in_enabled)
1223     {
1224       fmass->bulk_in_enabled = 0;
1225       (void)EP_DISABLE(fmass->bulkin);
1226     }
1227 
1228   if (fmass->bulk_out_enabled)
1229     {
1230       fmass->bulk_out_enabled = 0;
1231       (void)EP_DISABLE(fmass->bulkout);
1232     }
1233 
1234   usbd_configep_byspeed(dev, &g_fmass_confd.iepd);
1235   (void)EP_CONFIGURE(fmass->bulkin, (const usb_endpoint_descriptor_t *)&g_fmass_confd.iepd, 0);
1236   fmass->bulk_in_enabled = 1;
1237 
1238   usbd_configep_byspeed(dev, &g_fmass_confd.oepd);
1239   (void)EP_CONFIGURE(fmass->bulkout, (const usb_endpoint_descriptor_t *)&g_fmass_confd.oepd, 0);
1240   fmass->bulk_out_enabled = 1;
1241 
1242   fmass->databuf_fill        = &fmass->databuf[0];
1243   fmass->bulkreq.buf         = (uint8_t *)fmass->databuf[0].buf;
1244   fmass->bulkreq.is_complete = 1;
1245 
1246   req->len = 0;
1247   (void)EP_SUBMIT(ep0, req);
1248 
1249   fmass_task_change_state(fmass, FMASS_TASK_REQ_COMMAND_PHASE);
1250 }
1251 
fmass_set_next_command(struct mass_dev_s * fmass)1252 void fmass_set_next_command(struct mass_dev_s *fmass)
1253 {
1254   fmass_task_state next_state;
1255 
1256   DPRINTFN(1, "\n\t -----REQUEST CBW packet -------\n");
1257 
1258   fmass->bulkreq.len = BULK_CBW_WRAP_LEN;
1259 
1260   /* start a request to receive a CBW packet */
1261 
1262   (void)fmass_bulkout_request(fmass, &fmass->bulkreq);
1263 
1264   /* wait for the CBW packet */
1265 
1266   if (!fmass_wait_intr(fmass, taskstate_unmatch(fmass, FMASS_TASK_REQ_COMMAND_PHASE), osWaitForever))
1267     {
1268       DPRINTFN(0, "--- [CBW Phase] error! state: %u ---\n", fmass->task_state);
1269       if (taskstate_match(fmass, FMASS_TASK_REQ_COMMAND_PHASE))
1270         {
1271           fmass_task_change_state(fmass, FMASS_TASK_IDLE);
1272         }
1273       return;
1274     }
1275 
1276   if (taskstate_unmatch(fmass, FMASS_TASK_DONE_COMMAND_PHASE))
1277     {
1278       DPRINTFN(0, "--- [CBW Phase] unmatch! unexpect state: %u ---\n", fmass->task_state);
1279       return ;
1280     }
1281 
1282   /* check and receive CBW packet */
1283 
1284   if (fmass_check_receive_cbw(fmass) < 0)
1285     {
1286       return ;
1287     }
1288 
1289   /* handle CBW packet */
1290 
1291   fmass_do_scsi_command(fmass);
1292   if (taskstate_unmatch(fmass, FMASS_TASK_DISCONNECT))
1293     {
1294       if (fmass->data_size)
1295         {
1296           next_state = FMASS_TASK_REQ_DATA_PHASE;
1297         }
1298       else
1299         {
1300           next_state = FMASS_TASK_REQ_STATUS_PHASE;
1301         }
1302       fmass_task_change_state(fmass, next_state);
1303     }
1304 }
1305 
fmass_main_thread(UINTPTR para,UINTPTR para1)1306 static void *fmass_main_thread(UINTPTR para, UINTPTR para1)
1307 {
1308   struct mass_dev_s *fmass = (struct mass_dev_s *)para;
1309   struct usbdev_s *dev     = (struct usbdev_s *)para1;
1310   uint32_t oldstate        = FMASS_TASK_IDLE;
1311   uint32_t curstate        = fmass->task_state;
1312   PEVENT_CB_S fmass_event  = &fmass->task_event;
1313   uint32_t ret;
1314 
1315   while (1)
1316     {
1317       if (oldstate == curstate)
1318         {
1319           DPRINTFN(1, "\n -- process thread wait --(curstate:%u)\n", fmass->task_state);
1320           ret = LOS_EventRead(fmass_event, (FMASS_DATA_PROC | FMASS_NEED_EXIT),
1321                               LOS_WAITMODE_OR | LOS_WAITMODE_CLR, LOS_WAIT_FOREVER);
1322           if (ret & FMASS_NEED_EXIT)
1323             {
1324               break;
1325             }
1326           curstate = fmass->task_state;
1327         }
1328       oldstate = curstate;
1329 
1330       if (taskstate_match(fmass, FMASS_TASK_DISCONNECT))
1331         {
1332           fmass_report_usb_status(fmass);
1333           fmass->task_state &= ~FMASS_TASK_DISCONNECT;
1334           ret = LOS_EventPoll(&(fmass_event->uwEventID), FMASS_NEED_EXIT,
1335                               LOS_WAITMODE_OR | LOS_WAITMODE_CLR);
1336           if (ret == FMASS_NEED_EXIT)
1337             {
1338               break;
1339             }
1340         }
1341       else
1342         {
1343           switch (curstate)
1344             {
1345             case FMASS_TASK_CONFIG_CHANGE:
1346               fmass_set_config(fmass, dev);
1347               break;
1348 
1349             case FMASS_TASK_REQ_COMMAND_PHASE:
1350               fmass_set_next_command(fmass);
1351               break;
1352 
1353             case FMASS_TASK_REQ_DATA_PHASE:
1354               fmass_handle_scsi_data(fmass);
1355               break;
1356 
1357             case FMASS_TASK_REQ_STATUS_PHASE:
1358               fmass_set_scsi_status(fmass);
1359               break;
1360 
1361             default:
1362               break;
1363             }
1364         }
1365 
1366       curstate = fmass->task_state;
1367     }
1368 
1369   (void)LOS_EventWrite(&(fmass->task_event), FMASS_THREAD_EXITED);
1370 
1371   return NULL;
1372 }
1373 
fmass_task_change_state(struct mass_dev_s * fmass,fmass_task_state new_state)1374 static void fmass_task_change_state(struct mass_dev_s *fmass, fmass_task_state new_state)
1375 {
1376   uint32_t flags;
1377   uint32_t old_state = fmass->task_state;
1378 
1379   spin_lock_irqsave(&fmass->lock, flags);
1380 
1381   if (taskstate_unmatch(fmass, FMASS_TASK_DISCONNECT))
1382     {
1383       fmass->task_state &= ~old_state;
1384     }
1385   fmass->task_state |= new_state;
1386 
1387   spin_unlock_irqrestore(&fmass->lock, flags);
1388 }
1389 
fmass_main_thread_signal(struct mass_dev_s * fmass,fmass_task_state state)1390 static void fmass_main_thread_signal(struct mass_dev_s *fmass, fmass_task_state state)
1391 {
1392   DPRINTFN(1, " - process thread signal -(curstate:%u   newstate: %d)\n", fmass->task_state, state);
1393   fmass_task_change_state(fmass, state);
1394   (void)LOS_EventWrite(&(fmass->task_event), FMASS_DATA_PROC);
1395 }
1396 
usb_task_creat(uint32_t * taskid,TSK_ENTRY_FUNC func,uint16_t prio,const char * nm,UINTPTR para,UINTPTR para1)1397 static uint32_t usb_task_creat(uint32_t *taskid, TSK_ENTRY_FUNC func, uint16_t prio,
1398                                const char *nm, UINTPTR para, UINTPTR para1)
1399 {
1400   uint32_t ret;
1401   TSK_INIT_PARAM_S attr;
1402 
1403   (void)memset_s(&attr, sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));
1404 
1405   attr.pfnTaskEntry = func;
1406   attr.uwStackSize  = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;
1407   attr.auwArgs[0]   = (UINTPTR)para;
1408   attr.auwArgs[1]   = (UINTPTR)para1;
1409   attr.usTaskPrio   = prio;
1410   attr.pcName       = (char *)nm;
1411   attr.uwResved     = LOS_TASK_STATUS_DETACHED;
1412 
1413   ret = LOS_TaskCreate(taskid, &attr);
1414   if (ret != LOS_OK)
1415     {
1416       usb_err("create %s task error!\n", nm);
1417     }
1418   return ret;
1419 }
1420 
fmass_thread_init(struct mass_dev_s * fmass,struct usbdev_s * dev)1421 static int fmass_thread_init(struct mass_dev_s *fmass, struct usbdev_s *dev)
1422 {
1423   uint32_t td = 0;
1424   uint32_t ret;
1425 
1426   (void)LOS_EventInit(&fmass->task_event);
1427   mtx_init(&fmass->task_mtx, "tmtx", NULL, 0);
1428   init_waitqueue_head(&fmass->xfer_wait);
1429 
1430   fmass_task_change_state(fmass, FMASS_TASK_IDLE);
1431 
1432   ret = usb_task_creat(&td, (TSK_ENTRY_FUNC)fmass_main_thread, 10, "USB_FMASS_Task", (UINTPTR)fmass, (UINTPTR)dev);
1433   return (ret == 0) ? (0) : (-1);
1434 }
1435 
fmass_thread_delete(struct mass_dev_s * fmass)1436 static int fmass_thread_delete(struct mass_dev_s *fmass)
1437 {
1438   uint32_t ret;
1439 
1440   (void)LOS_EventWrite(&(fmass->task_event), FMASS_NEED_EXIT);
1441   ret = LOS_EventRead(&fmass->task_event, FMASS_THREAD_EXITED, LOS_WAITMODE_OR | LOS_WAITMODE_CLR, (10 * HZ));
1442   if (ret == LOS_ERRNO_EVENT_READ_TIMEOUT)
1443     {
1444       usb_err("fmass, wait thread exit timeout\n");
1445       return -1;
1446     }
1447 
1448   (void)LOS_EventDestroy(&fmass->task_event);
1449   mtx_destroy(&fmass->task_mtx);
1450 
1451   return 0;
1452 }
1453 
fmass_notify_report(struct mass_dev_s * fmass,int status)1454 static void fmass_notify_report(struct mass_dev_s *fmass, int status)
1455 {
1456   int i ;
1457   struct fmass_notify *notify;
1458 
1459   DPRINTFN(1, "\n< fmass storage %s >\n", (status == DEV_ST_CONNECTTED) ? "connected" : "disconnected");
1460   for (i = 0; i < MAX_NOFIFY_NUM ; i++)
1461     {
1462       notify = &fmass->notify[i];
1463       if (notify->is_used)
1464         {
1465           if (notify->notifycb != NULL)
1466             {
1467               notify->notifycb(notify->notifydata, status);
1468             }
1469         }
1470     }
1471 }
1472 
fmass_report_usb_status(struct mass_dev_s * fmass)1473 static void fmass_report_usb_status(struct mass_dev_s *fmass)
1474 {
1475   if (fmass->dev_status == DEV_ST_DISCONNECT)
1476     {
1477       if (fmass->nluns == 0) /* device is not ready, skip disconnect status */
1478         {
1479           fmass_task_change_state(fmass, FMASS_TASK_IDLE);
1480           return;
1481         }
1482       (void)fmass_dev_close(fmass);
1483     }
1484 
1485   fmass_notify_report(fmass, fmass->dev_status);
1486   fmass_task_change_state(fmass, FMASS_TASK_IDLE);
1487 }
1488 
1489 /* status 0: disconnect  1: connect */
1490 
fmass_register_notify(void (* notify)(void * context,int status),void * context)1491 int fmass_register_notify(void(*notify)(void *context, int status), void *context)
1492 {
1493   int i;
1494   struct fmass_notify *f_notify = g_notify.notify;
1495   struct mtx *notify_mtx        = &g_notify.notify_mtx;
1496 
1497   fmass_notify_init();
1498 
1499   mtx_lock(notify_mtx);
1500   for (i = 0; i < MAX_NOFIFY_NUM ; i++)
1501     {
1502       if (f_notify[i].is_used == 0)
1503         {
1504           f_notify[i].is_used    = 1;
1505           f_notify[i].notifycb   = notify;
1506           f_notify[i].notifydata = context;
1507           break;
1508         }
1509     }
1510   mtx_unlock(notify_mtx);
1511   return (i < MAX_NOFIFY_NUM) ? (i) : (-1);
1512 }
1513 
fmass_unregister_notify(int handle)1514 int fmass_unregister_notify(int handle)
1515 {
1516   struct fmass_notify *f_notify = g_notify.notify;
1517   struct mtx *notify_mtx        = &g_notify.notify_mtx;
1518 
1519   if (handle < 0 || handle >= MAX_NOFIFY_NUM)
1520     {
1521       return -1;
1522     }
1523 
1524   mtx_lock(notify_mtx);
1525   if (f_notify[handle].is_used == 0)
1526     {
1527       mtx_unlock(notify_mtx);
1528       return -1;
1529     }
1530 
1531   f_notify[handle].is_used    = 0;
1532   f_notify[handle].notifycb   = NULL;
1533   f_notify[handle].notifydata = NULL;
1534   mtx_unlock(notify_mtx);
1535 
1536   return 0;
1537 }
1538 
fmass_partition_startup(const char * path)1539 int fmass_partition_startup(const char *path)
1540 {
1541   struct mass_softc *mass = g_fmass;
1542   struct mass_dev_s *fmass;
1543   FAR struct inode *inode = NULL;
1544   uint32_t i;
1545   int ret;
1546 
1547   if (mass == NULL)
1548     {
1549       return -1;
1550     }
1551   fmass = &mass->dev;
1552 
1553   ret = open_blockdriver(path, O_RDWR, &inode);
1554   if (inode == NULL || ret != ENOERR)
1555     {
1556       DPRINTFN(0, "open blockdriver %s fail  ret = %d\n", path, ret);
1557       return -1;
1558     }
1559 
1560   for (i = 0; i < fmass->nluns; i++)
1561     {
1562       if (inode == fmass->fileNode[i])
1563         {
1564           fmass->lun = i;
1565           break;
1566         }
1567     }
1568 
1569   if (i == fmass->nluns)
1570     {
1571       usb_err("The device path is invalid\n");
1572       return -1;
1573     }
1574 
1575   return 0;
1576 }
1577 
fmass_bind(void)1578 void *fmass_bind(void)
1579 {
1580   return (void *)g_fmass;
1581 }
1582 
fmass_source_free(void)1583 static void fmass_source_free(void)
1584 {
1585   g_fmass = NULL;
1586 }
1587 
usbclass_mass_bind(struct usbdevclass_driver_s * driver,struct usbdev_s * dev)1588 static int usbclass_mass_bind(struct usbdevclass_driver_s *driver, struct usbdev_s *dev)
1589 {
1590   struct usbdev_ep_s *ep;
1591   struct mass_driver_s *mass_drvr;
1592   struct mass_dev_s *mass_dev;
1593 
1594   if (driver == NULL || dev == NULL)
1595     {
1596       return -1;
1597     }
1598 
1599   mass_drvr = (struct mass_driver_s *)driver;
1600   mass_dev  = mass_drvr->dev;
1601   if (mass_dev == NULL)
1602     {
1603       return -1;
1604     }
1605 
1606   (void)snprintf_s(mass_dev->inquiry_str, sizeof(mass_dev->inquiry_str), sizeof(mass_dev->inquiry_str) - 1,
1607                    "%-8s%-16s%-4s","Mass","Storage Device","1.00");
1608   mass_dev->notify = g_notify.notify;
1609   mass_dev->lun    = 0;
1610   mass_dev->nluns  = 0;
1611 
1612   ep = DEV_ALLOCEP(dev, g_fmass_confd.iepd.bEndpointAddress,
1613                    (struct usb_endpoint_descriptor *)&g_fmass_confd.iepd);
1614   if (ep == NULL)
1615     {
1616       PRINT_ERR("%s,%d\n", __FUNCTION__, __LINE__);
1617       return -1;
1618     }
1619   ep->priv = (void *)mass_dev;
1620   ep->handle_req = &mass_dev->bulkreq;
1621   mass_dev->bulkin = ep;
1622   DPRINTFN(1, "bulkin:%#x, %02x\n", ep, ep->eplog);
1623 
1624   ep = DEV_ALLOCEP(dev, g_fmass_confd.oepd.bEndpointAddress,
1625                    (struct usb_endpoint_descriptor *)&g_fmass_confd.oepd);
1626   if (ep == NULL)
1627     {
1628       PRINT_ERR("%s,%d\n", __FUNCTION__, __LINE__);
1629       return -1;
1630     }
1631   ep->priv = (void *)mass_dev;
1632   ep->handle_req = &mass_dev->bulkreq;
1633   mass_dev->bulkout = ep;
1634   DPRINTFN(1, "bulkout:%#x, %02x\n", ep, ep->eplog);
1635 
1636   if (fmass_thread_init(mass_dev, dev) < 0)
1637     {
1638       PRINT_ERR("fmass_thread_init failed\n");
1639       goto fail;
1640     }
1641 
1642   return 0;
1643 fail:
1644   usb_err("composite_fmass_bind failed\n");
1645   (void)usbclass_mass_unbind(driver, dev);
1646   return -1;
1647 }
1648 
usbclass_mass_unbind(struct usbdevclass_driver_s * driver,struct usbdev_s * dev)1649 static int usbclass_mass_unbind(struct usbdevclass_driver_s *driver, struct usbdev_s *dev)
1650 {
1651   struct mass_driver_s *mass_drvr;
1652   struct mass_dev_s *mass_dev;
1653   int ret;
1654 
1655   if (driver == NULL || dev == NULL)
1656     {
1657       return -1;
1658     }
1659 
1660   mass_drvr = (struct mass_driver_s *)driver;
1661   mass_dev  = mass_drvr->dev;
1662   if (mass_dev == NULL)
1663     {
1664       return -1;
1665     }
1666 
1667   if (mass_dev->dev_status == DEV_ST_CONNECTTED)
1668     {
1669       ret = fmass_dev_close(mass_dev);
1670       if (ret < 0)
1671         {
1672           PRINT_ERR("%s fmass dev close fail\n", __FUNCTION__);
1673           return -1;
1674         }
1675     }
1676 
1677   if (driver->ops != NULL && driver->ops->disconnect != NULL)
1678     {
1679       driver->ops->disconnect(driver, dev);
1680     }
1681 
1682   DEV_FREEEP(dev, mass_dev->bulkin);
1683   DEV_FREEEP(dev, mass_dev->bulkout);
1684 
1685   ret = fmass_thread_delete(mass_dev);
1686   if (ret < 0)
1687     {
1688       return -1;
1689     }
1690   fmass_source_free();
1691 
1692   return 0;
1693 }
1694 
usbclass_mass_set_alt(struct mass_dev_s * mass,unsigned intf,unsigned alt)1695 static int usbclass_mass_set_alt(struct mass_dev_s *mass, unsigned intf, unsigned alt)
1696 {
1697   (void)intf;
1698   (void)alt;
1699 
1700   fmass_main_thread_signal(mass, FMASS_TASK_CONFIG_CHANGE);
1701   fmass_wakeup(mass);
1702 
1703   /* the '1' indecates that the invoking of 'usbd_endpoint_request' is in fmass's protocal */
1704 
1705   return 1;
1706 }
1707 
usbclass_mass_setup(struct usbdevclass_driver_s * driver,struct usbdev_s * dev,const struct usb_device_request * ctrl,uint8_t * dataout,size_t outlen)1708 static int usbclass_mass_setup(struct usbdevclass_driver_s *driver, struct usbdev_s *dev,
1709                                const struct usb_device_request *ctrl, uint8_t *dataout, size_t outlen)
1710 {
1711   struct mass_dev_s *mass;
1712   struct mass_driver_s *drvr;
1713   uint16_t w_index;
1714   uint16_t w_value;
1715   uint16_t w_length;
1716   struct usbdev_req_s *req;
1717   errno_t ret;
1718   uint32_t nlun_info;
1719 
1720   (void)dataout;
1721   (void)outlen;
1722 
1723   if (driver == NULL || ctrl == NULL || dev == NULL)
1724     {
1725       return -1;
1726     }
1727 
1728   drvr = (struct mass_driver_s *)driver;
1729   mass = drvr->dev;
1730   if (mass == NULL)
1731     {
1732       return -1;
1733     }
1734   w_index  = UGETW(ctrl->wIndex);
1735   w_value  = UGETW(ctrl->wValue);
1736   w_length = UGETW(ctrl->wLength);
1737 
1738   req = dev->ep0->handle_req;
1739 
1740   switch (ctrl->bRequest)
1741     {
1742     case USB_REQ_SET_CONFIGURATION:
1743     case USB_REQ_SET_INTERFACE:
1744       {
1745         return usbclass_mass_set_alt(mass, w_index, w_value);
1746       }
1747     case USB_BULK_GET_MAX_LUN:
1748       {
1749         if (ctrl->bmRequestType != (USB_DIR_IN |
1750             USB_TYPE_CLASS | USB_RECIP_INTERFACE))
1751           {
1752             break;
1753           }
1754 
1755         if (w_index != 0 || w_value != 0 || w_length != 1)
1756           {
1757             break;
1758           }
1759 
1760         nlun_info = mass->nluns ? (mass->nluns - 1) : (0);
1761         ret = memcpy_s(req->buf, USB_COMP_EP0_BUFSIZ, &nlun_info, sizeof(nlun_info));
1762         if (ret != EOK)
1763           {
1764             usb_err("memcpy_s fail, %d \n", ret);
1765             return -1;
1766           }
1767         req->len = 1;
1768         (void)EP_SUBMIT(dev->ep0, req);
1769         PRINTK("****** USB BULK MAX LUN %u ******\n", mass->nluns);
1770       }
1771       break;
1772 
1773     default:
1774       break;
1775     }
1776   return 0;
1777 }
1778 
usbclass_mass_disconnect(struct usbdevclass_driver_s * driver,struct usbdev_s * dev)1779 static void usbclass_mass_disconnect(struct usbdevclass_driver_s *driver, struct usbdev_s *dev)
1780 {
1781   struct mass_dev_s *mass;
1782   struct mass_driver_s *drvr;
1783   uint32_t flags;
1784 
1785   if (driver == NULL || dev == NULL)
1786     {
1787       return;
1788     }
1789 
1790   drvr = (struct mass_driver_s *)driver;
1791   mass = drvr->dev;
1792   if (mass == NULL)
1793     {
1794       return;
1795     }
1796 
1797   spin_lock_irqsave(&mass->lock, flags);
1798   if (taskstate_match(mass, FMASS_TASK_CONFIG_CHANGE))
1799     {
1800       spin_unlock_irqrestore(&mass->lock, flags);
1801       DPRINTFN(0, "Setting config, no need proc disconnect\n");
1802       return;
1803     }
1804 
1805   mass->dev_status = DEV_ST_DISCONNECT;
1806   spin_unlock_irqrestore(&mass->lock, flags);
1807 
1808   fmass_task_change_state(mass, FMASS_TASK_DISCONNECT);
1809   fmass_wakeup(mass);
1810 
1811   if (mass->bulk_in_enabled)
1812     {
1813       mass->bulk_in_enabled = 0;
1814       (void)EP_DISABLE(mass->bulkin);
1815     }
1816 
1817   if (mass->bulk_out_enabled)
1818     {
1819       mass->bulk_out_enabled = 0;
1820       (void)EP_DISABLE(mass->bulkout);
1821     }
1822 }
1823 
1824 struct usbd_string g_fmass_device_strings[6] =
1825 {
1826   { 0, g_dt_string_id },
1827   { 1, g_dt_string_vid },
1828   { 2, g_dt_string_pid },
1829   { 3, g_dt_string_serial },
1830   { 4, g_dt_string_buf },
1831   USBD_DEVICE_STRINGS_END
1832 };
1833 
mass_mkdevdesc(uint8_t * buf)1834 void mass_mkdevdesc(uint8_t *buf)
1835 {
1836   errno_t ret = memcpy_s(buf, USB_COMP_EP0_BUFSIZ, &g_fmass_device_desc, sizeof(g_fmass_device_desc));
1837   if (ret != EOK)
1838     {
1839       usb_err("memcpy_s fail!, ret:%d\n", ret);
1840       return;
1841     }
1842 }
1843 
mass_mkcfgdesc(uint8_t * buf,struct usbdev_devinfo_s * devinfo)1844 int16_t mass_mkcfgdesc(uint8_t *buf, struct usbdev_devinfo_s *devinfo)
1845 {
1846   uint16_t total = UGETW(g_fmass_confd.confd.wTotalLength);
1847   errno_t ret;
1848 
1849   ret = memcpy_s(buf, USB_COMP_EP0_BUFSIZ, &g_fmass_confd, total);
1850   if (ret != EOK)
1851     {
1852       usb_err("memcpy_s fail!, ret:%d\n", ret);
1853       return -1;
1854     }
1855 
1856   return (int16_t)total;
1857 }
1858 
mass_mkstrdesc(uint8_t id,uint8_t * buf)1859 int mass_mkstrdesc(uint8_t id, uint8_t *buf)
1860 {
1861   errno_t ret;
1862   const char *str;
1863   int i;
1864 
1865   for (i = 0; g_fmass_device_strings[i].s != NULL; i++)
1866     {
1867       str = g_fmass_device_strings[i].s;
1868       if (g_fmass_device_strings[i].id == id)
1869         {
1870           ret = memcpy_s(buf, USB_COMP_EP0_BUFSIZ, str, str[0]);
1871           if (ret != EOK)
1872             {
1873               usb_err("memcpy_s failed, ret = %d\n", ret);
1874               return -1;
1875             }
1876           return str[0];
1877         }
1878     }
1879 
1880   usb_err("Can not find the id = %u of string\n", id);
1881   return -1;
1882 }
1883 
1884 #define MASS_NCONFIGS    1
1885 #define MASS_CONFIGID    0
1886 #define MASS_NINTERFACES 1
1887 #define MASS_NSTRIDS     5
1888 #define MASS_NUM_EPS     2
mass_get_composite_devdesc(struct composite_devdesc_s * dev)1889 void mass_get_composite_devdesc(struct composite_devdesc_s *dev)
1890 {
1891   (void)memset_s(dev, sizeof(struct composite_devdesc_s), 0, sizeof(struct composite_devdesc_s));
1892 
1893   dev->mkdevdesc  = mass_mkdevdesc;
1894   dev->mkconfdesc = mass_mkcfgdesc;
1895   dev->mkstrdesc  = mass_mkstrdesc;
1896 
1897   dev->nconfigs = MASS_NCONFIGS;           /* Number of configurations supported */
1898   dev->configid = MASS_CONFIGID;           /* The only supported configuration ID */
1899 
1900   /* Interfaces.
1901    *
1902    * ifnobase must be provided by board-specific logic
1903    */
1904 
1905   dev->devinfo.ninterfaces = MASS_NINTERFACES; /* Number of interfaces in the configuration */
1906 
1907   /* Strings.
1908    *
1909    * strbase must be provided by board-specific logic
1910    */
1911 
1912   dev->devinfo.nstrings = MASS_NSTRIDS;     /* Number of Strings */
1913 
1914   /* Endpoints.
1915    *
1916    * Endpoint numbers must be provided by board-specific logic.
1917    */
1918 
1919   dev->devinfo.nendpoints = MASS_NUM_EPS;
1920 }
1921 
mass_classobject(int minor,struct usbdev_devinfo_s * devinfo,struct usbdevclass_driver_s ** classdev)1922 int mass_classobject(int minor, struct usbdev_devinfo_s *devinfo,
1923                      struct usbdevclass_driver_s **classdev)
1924 {
1925   struct mass_softc *mass_s ;
1926   struct mass_dev_s *priv;
1927   struct mass_driver_s *drvr;
1928 
1929   (void)minor;
1930   (void)devinfo;
1931 
1932   /* Allocate the structures needed */
1933 
1934   mass_s = (struct mass_softc *)malloc(sizeof(struct mass_softc));
1935   if (mass_s == NULL)
1936     {
1937       return -1;
1938     }
1939   g_fmass = mass_s;
1940 
1941   /* Convenience pointers into the allocated blob */
1942 
1943   priv = &mass_s->dev;
1944   drvr = &mass_s->drvr;
1945 
1946   /* Initialize the USB serial driver structure */
1947 
1948   (void)memset_s(priv, sizeof(struct mass_dev_s), 0, sizeof(struct mass_dev_s));
1949   spin_lock_init(&priv->lock);
1950 
1951   /* Initialize the USB class driver structure */
1952 
1953   drvr->drvr.speed = USB_SPEED_HIGH;
1954   drvr->drvr.ops   = &g_mass_driverops;
1955   drvr->dev        = priv;
1956 
1957   *classdev = &drvr->drvr;
1958   return 0;
1959 }
1960 
mass_uninitialize(struct usbdevclass_driver_s * classdev)1961 void mass_uninitialize(struct usbdevclass_driver_s *classdev)
1962 {
1963   struct mass_driver_s *mass_drvr = (struct mass_driver_s *)classdev;
1964   struct mass_dev_s *priv ;
1965   struct mass_softc *mass_s;
1966 
1967   if (mass_drvr == NULL)
1968     {
1969       return;
1970     }
1971 
1972   priv = mass_drvr->dev;
1973   if (priv == NULL)
1974     {
1975       return;
1976     }
1977 
1978   mass_s = container_of(mass_drvr, struct mass_softc, drvr);
1979   free(mass_s);
1980 }
1981 
usbdev_mass_initialize_sub(struct composite_devdesc_s * dev,int ifnobase,int minor)1982 void usbdev_mass_initialize_sub(struct composite_devdesc_s *dev, int ifnobase, int minor)
1983 {
1984   /* Ask the UAC driver to fill in the constants we didn't
1985    * know here.
1986    */
1987 
1988   mass_get_composite_devdesc(dev);
1989 
1990   /* Overwrite and correct some values... */
1991   /* The callback functions for the UAC class */
1992 
1993   dev->classobject  = mass_classobject;
1994   dev->uninitialize = mass_uninitialize;
1995 
1996   /* Interfaces */
1997 
1998   dev->devinfo.ifnobase = ifnobase; /* Offset to Interface-IDs */
1999   dev->minor            = minor;    /* The minor interface number */
2000 
2001   /* Strings */
2002 
2003   dev->devinfo.strbase = 0;         /* Offset to String Numbers */
2004 }
2005 
usbdev_mass_initialize(struct module * mod,int n,void * arg)2006 int usbdev_mass_initialize(struct module *mod, int n, void *arg)
2007 {
2008   struct composite_softc *com_s = (struct composite_softc *)arg;
2009   struct composite_devdesc_s dev;
2010   int ret;
2011 
2012   (void)mod;
2013   (void)n;
2014   if (com_s == NULL)
2015     {
2016       return -1;
2017     }
2018 
2019   usbdev_mass_initialize_sub(&dev, 0, DEV_MASS);
2020 
2021   ret = composite_initialize(com_s, 1, &dev);
2022   if (ret < 0)
2023     {
2024       return -1;
2025     }
2026 
2027   PRINTK("  ** Mass device initialized successfully! **\n");
2028   return 0;
2029 }
2030 
2031 #undef USB_DEBUG_VAR
2032 
2033 #ifdef __cplusplus
2034 #if __cplusplus
2035 }
2036 #endif /* __cplusplus */
2037 #endif /* __cplusplus */