| /kernel/linux/linux-4.19/drivers/s390/char/ |
| D | tape_std.c | 38 struct tape_request * request = from_timer(request, t, timer); in tape_std_assign_timeout() local 39 struct tape_device * device = request->device; in tape_std_assign_timeout() 46 rc = tape_cancel_io(device, request); in tape_std_assign_timeout() 57 struct tape_request *request; in tape_std_assign() local 59 request = tape_alloc_request(2, 11); in tape_std_assign() 60 if (IS_ERR(request)) in tape_std_assign() 61 return PTR_ERR(request); in tape_std_assign() 63 request->op = TO_ASSIGN; in tape_std_assign() 64 tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata); in tape_std_assign() 65 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); in tape_std_assign() [all …]
|
| D | tape_34xx.c | 56 static void __tape_34xx_medium_sense(struct tape_request *request) in __tape_34xx_medium_sense() argument 58 struct tape_device *device = request->device; in __tape_34xx_medium_sense() 61 if (request->rc == 0) { in __tape_34xx_medium_sense() 62 sense = request->cpdata; in __tape_34xx_medium_sense() 81 request->rc); in __tape_34xx_medium_sense() 82 tape_free_request(request); in __tape_34xx_medium_sense() 87 struct tape_request *request; in tape_34xx_medium_sense() local 90 request = tape_alloc_request(1, 32); in tape_34xx_medium_sense() 91 if (IS_ERR(request)) { in tape_34xx_medium_sense() 93 return PTR_ERR(request); in tape_34xx_medium_sense() [all …]
|
| D | tape_core.c | 292 __tape_cancel_io(struct tape_device *device, struct tape_request *request) in __tape_cancel_io() argument 298 if (request->callback == NULL) in __tape_cancel_io() 303 rc = ccw_device_clear(device->cdev, (long) request); in __tape_cancel_io() 307 request->status = TAPE_REQUEST_DONE; in __tape_cancel_io() 310 request->status = TAPE_REQUEST_CANCEL; in __tape_cancel_io() 434 * request. We refuse to suspend if the device is loaded or in use for the 483 * request. We may prevent this by returning an error. 641 struct tape_request * request; in __tape_discard_requests() local 645 request = list_entry(l, struct tape_request, list); in __tape_discard_requests() 646 if (request->status == TAPE_REQUEST_IN_IO) in __tape_discard_requests() [all …]
|
| D | tape_3590.c | 204 struct tape_request *request; in tape_3592_kekl_query() local 213 request = tape_alloc_request(2, sizeof(*order)); in tape_3592_kekl_query() 214 if (IS_ERR(request)) { in tape_3592_kekl_query() 215 rc = PTR_ERR(request); in tape_3592_kekl_query() 218 order = request->cpdata; in tape_3592_kekl_query() 222 request->op = TO_KEKL_QUERY; in tape_3592_kekl_query() 223 tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); in tape_3592_kekl_query() 224 tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls), in tape_3592_kekl_query() 226 rc = tape_do_io(device, request); in tape_3592_kekl_query() 233 tape_free_request(request); in tape_3592_kekl_query() [all …]
|
| /kernel/linux/linux-5.10/drivers/s390/char/ |
| D | tape_std.c | 38 struct tape_request * request = from_timer(request, t, timer); in tape_std_assign_timeout() local 39 struct tape_device * device = request->device; in tape_std_assign_timeout() 46 rc = tape_cancel_io(device, request); in tape_std_assign_timeout() 57 struct tape_request *request; in tape_std_assign() local 59 request = tape_alloc_request(2, 11); in tape_std_assign() 60 if (IS_ERR(request)) in tape_std_assign() 61 return PTR_ERR(request); in tape_std_assign() 63 request->op = TO_ASSIGN; in tape_std_assign() 64 tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata); in tape_std_assign() 65 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); in tape_std_assign() [all …]
|
| D | tape_34xx.c | 56 static void __tape_34xx_medium_sense(struct tape_request *request) in __tape_34xx_medium_sense() argument 58 struct tape_device *device = request->device; in __tape_34xx_medium_sense() 61 if (request->rc == 0) { in __tape_34xx_medium_sense() 62 sense = request->cpdata; in __tape_34xx_medium_sense() 81 request->rc); in __tape_34xx_medium_sense() 82 tape_free_request(request); in __tape_34xx_medium_sense() 87 struct tape_request *request; in tape_34xx_medium_sense() local 90 request = tape_alloc_request(1, 32); in tape_34xx_medium_sense() 91 if (IS_ERR(request)) { in tape_34xx_medium_sense() 93 return PTR_ERR(request); in tape_34xx_medium_sense() [all …]
|
| D | tape_core.c | 292 __tape_cancel_io(struct tape_device *device, struct tape_request *request) in __tape_cancel_io() argument 298 if (request->callback == NULL) in __tape_cancel_io() 303 rc = ccw_device_clear(device->cdev, (long) request); in __tape_cancel_io() 307 request->status = TAPE_REQUEST_DONE; in __tape_cancel_io() 310 request->status = TAPE_REQUEST_CANCEL; in __tape_cancel_io() 434 * request. We refuse to suspend if the device is loaded or in use for the 483 * request. We may prevent this by returning an error. 641 struct tape_request * request; in __tape_discard_requests() local 645 request = list_entry(l, struct tape_request, list); in __tape_discard_requests() 646 if (request->status == TAPE_REQUEST_IN_IO) in __tape_discard_requests() [all …]
|
| D | tape_3590.c | 204 struct tape_request *request; in tape_3592_kekl_query() local 213 request = tape_alloc_request(2, sizeof(*order)); in tape_3592_kekl_query() 214 if (IS_ERR(request)) { in tape_3592_kekl_query() 215 rc = PTR_ERR(request); in tape_3592_kekl_query() 218 order = request->cpdata; in tape_3592_kekl_query() 222 request->op = TO_KEKL_QUERY; in tape_3592_kekl_query() 223 tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); in tape_3592_kekl_query() 224 tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls), in tape_3592_kekl_query() 226 rc = tape_do_io(device, request); in tape_3592_kekl_query() 233 tape_free_request(request); in tape_3592_kekl_query() [all …]
|
| /kernel/linux/linux-4.19/drivers/gpu/drm/i915/ |
| D | i915_request.h | 45 struct i915_request *request; member 60 * Request queue structure. 62 * The request queue allows us to note sequence numbers that have been emitted 67 * emission time to be associated with the request for tracking how far ahead 83 /** On Which ring this request was generated */ 87 * Context and ring buffer related to this request 88 * Contexts are refcounted, so when this request is associated with a 90 * it persists while any request is linked to it. Requests themselves 91 * are also refcounted, so the request will only be freed when the last 104 * Fences for the various phases in the request's lifetime. [all …]
|
| D | i915_request.c | 42 * may be freed when the request is no longer in use by the GPU. in i915_fence_get_timeline_name() 77 * The request is put onto a RCU freelist (i.e. the address in i915_fence_release() 98 i915_request_remove_from_client(struct i915_request *request) in i915_request_remove_from_client() argument 102 file_priv = request->file_priv; in i915_request_remove_from_client() 107 if (request->file_priv) { in i915_request_remove_from_client() 108 list_del(&request->client_link); in i915_request_remove_from_client() 109 request->file_priv = NULL; in i915_request_remove_from_client() 268 * By incrementing the serial for every request, we know that no in reserve_gt() 271 * of every request from all engines onto just one. in reserve_gt() 295 struct i915_request *request) in i915_gem_retire_noop() argument [all …]
|
| /kernel/linux/linux-5.10/include/media/ |
| D | media-request.h | 3 * Media device request objects 23 * enum media_request_state - media request state 26 * @MEDIA_REQUEST_STATE_VALIDATING: Validating the request, no state changes 29 * @MEDIA_REQUEST_STATE_COMPLETE: Completed, the request is done 30 * @MEDIA_REQUEST_STATE_CLEANING: Cleaning, the request is being re-inited 31 * @MEDIA_REQUEST_STATE_UPDATING: The request is being updated, i.e. 32 * request objects are being added, 34 * @NR_OF_MEDIA_REQUEST_STATE: The number of media request states, used 50 * struct media_request - Media device request 51 * @mdev: Media device this request belongs to [all …]
|
| /kernel/linux/linux-4.19/include/linux/ |
| D | elevator.h | 26 typedef enum elv_merge (elevator_merge_fn) (struct request_queue *, struct request **, 29 typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *); 31 typedef void (elevator_merged_fn) (struct request_queue *, struct request *, enum elv_merge); 34 struct request *, struct bio *); 37 struct request *, struct request *); 40 struct request *, struct bio *); 44 typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); 45 typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); 46 typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); 51 typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, [all …]
|
| /kernel/linux/linux-5.10/drivers/greybus/ |
| D | svc.c | 175 struct gb_svc_pwrmon_sample_get_request request; in gb_svc_pwrmon_sample_get() local 179 request.rail_id = rail_id; in gb_svc_pwrmon_sample_get() 180 request.measurement_type = measurement_type; in gb_svc_pwrmon_sample_get() 183 &request, sizeof(request), in gb_svc_pwrmon_sample_get() 212 struct gb_svc_pwrmon_intf_sample_get_request request; in gb_svc_pwrmon_intf_sample_get() local 216 request.intf_id = intf_id; in gb_svc_pwrmon_intf_sample_get() 217 request.measurement_type = measurement_type; in gb_svc_pwrmon_intf_sample_get() 221 &request, sizeof(request), in gb_svc_pwrmon_intf_sample_get() 259 struct gb_svc_intf_device_id_request request; in gb_svc_intf_device_id() local 261 request.intf_id = intf_id; in gb_svc_intf_device_id() [all …]
|
| /kernel/linux/linux-4.19/drivers/staging/greybus/ |
| D | svc.c | 177 struct gb_svc_pwrmon_sample_get_request request; in gb_svc_pwrmon_sample_get() local 181 request.rail_id = rail_id; in gb_svc_pwrmon_sample_get() 182 request.measurement_type = measurement_type; in gb_svc_pwrmon_sample_get() 185 &request, sizeof(request), in gb_svc_pwrmon_sample_get() 214 struct gb_svc_pwrmon_intf_sample_get_request request; in gb_svc_pwrmon_intf_sample_get() local 218 request.intf_id = intf_id; in gb_svc_pwrmon_intf_sample_get() 219 request.measurement_type = measurement_type; in gb_svc_pwrmon_intf_sample_get() 223 &request, sizeof(request), in gb_svc_pwrmon_intf_sample_get() 261 struct gb_svc_intf_device_id_request request; in gb_svc_intf_device_id() local 263 request.intf_id = intf_id; in gb_svc_intf_device_id() [all …]
|
| /kernel/linux/linux-4.19/drivers/gpu/drm/i915/selftests/ |
| D | i915_request.c | 35 struct i915_request *request; in igt_add_request() local 38 /* Basic preliminary test to create a request and let it loose! */ in igt_add_request() 41 request = mock_request(i915->engine[RCS], in igt_add_request() 44 if (!request) in igt_add_request() 47 i915_request_add(request); in igt_add_request() 59 struct i915_request *request; in igt_wait_request() local 62 /* Submit a request, then wait upon it */ in igt_wait_request() 65 request = mock_request(i915->engine[RCS], i915->kernel_context, T); in igt_wait_request() 66 if (!request) { in igt_wait_request() 71 if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) { in igt_wait_request() [all …]
|
| /kernel/linux/linux-4.19/fs/nfsd/ |
| D | xdr4.h | 89 u32 ac_req_access; /* request */ 95 u32 cl_seqid; /* request */ 96 stateid_t cl_stateid; /* request+response */ 100 u64 co_offset; /* request */ 101 u32 co_count; /* request */ 106 u32 cr_namelen; /* request */ 107 char * cr_name; /* request */ 108 u32 cr_type; /* request */ 109 union { /* request */ 120 u32 cr_bmval[3]; /* request */ [all …]
|
| /kernel/linux/linux-5.10/fs/nfsd/ |
| D | xdr4.h | 89 u32 ac_req_access; /* request */ 95 u32 cl_seqid; /* request */ 96 stateid_t cl_stateid; /* request+response */ 100 u64 co_offset; /* request */ 101 u32 co_count; /* request */ 106 u32 cr_namelen; /* request */ 107 char * cr_name; /* request */ 108 u32 cr_type; /* request */ 109 union { /* request */ 120 u32 cr_bmval[3]; /* request */ [all …]
|
| /kernel/linux/linux-5.10/Documentation/userspace-api/media/mediactl/ |
| D | request-api.rst | 4 .. _media-request-api: 6 Request API 9 The Request API has been designed to allow V4L2 to deal with requirements of 19 Supporting these features without the Request API is not always possible and if 26 The Request API allows a specific configuration of the pipeline (media 31 of request completion are also available for reading. 36 The Request API extends the Media Controller API and cooperates with 37 subsystem-specific APIs to support request usage. At the Media Controller 39 node. Their life cycle is then managed through the request file descriptors in 42 request support, such as V4L2 APIs that take an explicit ``request_fd`` [all …]
|
| D | media-request-ioc-queue.rst | 13 MEDIA_REQUEST_IOC_QUEUE - Queue a request 31 If the media device supports :ref:`requests <media-request-api>`, then 32 this request ioctl can be used to queue a previously allocated request. 34 If the request was successfully queued, then the file descriptor can be 35 :ref:`polled <request-func-poll>` to wait for the request to complete. 37 If the request was already queued before, then ``EBUSY`` is returned. 38 Other errors can be returned if the contents of the request contained 40 common error codes. On error both the request and driver state are unchanged. 42 Once a request is queued, then the driver is required to gracefully handle 43 errors that occur when the request is applied to the hardware. The [all …]
|
| /kernel/linux/linux-5.10/sound/soc/intel/catpt/ |
| D | messages.c | 17 struct catpt_ipc_msg request = {{0}}, reply; in catpt_ipc_get_fw_version() local 20 request.header = msg.val; in catpt_ipc_get_fw_version() 24 ret = catpt_dsp_send_msg(cdev, request, &reply); in catpt_ipc_get_fw_version() 58 struct catpt_ipc_msg request, reply; in catpt_ipc_alloc_stream() local 91 request.header = msg.val; in catpt_ipc_alloc_stream() 92 request.size = size; in catpt_ipc_alloc_stream() 93 request.data = payload; in catpt_ipc_alloc_stream() 97 ret = catpt_dsp_send_msg(cdev, request, &reply); in catpt_ipc_alloc_stream() 109 struct catpt_ipc_msg request; in catpt_ipc_free_stream() local 112 request.header = msg.val; in catpt_ipc_free_stream() [all …]
|
| /kernel/linux/linux-5.10/include/linux/ |
| D | elevator.h | 36 bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); 38 int (*request_merge)(struct request_queue *q, struct request **, struct bio *); 39 void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); 40 void (*requests_merged)(struct request_queue *, struct request *, struct request *); 42 void (*prepare_request)(struct request *); 43 void (*finish_request)(struct request *); 45 struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); 47 void (*completed_request)(struct request *, u64); 48 void (*requeue_request)(struct request *); 49 struct request *(*former_request)(struct request_queue *, struct request *); [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/i915/ |
| D | i915_request.h | 62 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW. 65 * by __i915_request_unsubmit() if we preempt this request. 67 * Finally cleared for consistency on retiring the request, when 68 * we know the HW is no longer running this request. 75 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution 77 * Using the scheduler, when a request is ready for execution it is put 87 * I915_FENCE_FLAG_HOLD - this request is currently on hold 89 * This request has been suspended, pending an ongoing investigation. 94 * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial 101 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list [all …]
|
| /kernel/linux/linux-4.19/drivers/gpu/drm/amd/display/dc/i2caux/ |
| D | aux_engine.c | 103 struct aux_request_transaction_data request; member 188 engine->funcs->submit_channel_request(engine, &ctx->request); in process_read_request() 228 * "S3: AUX Request CMD PENDING: in process_read_request() 246 struct i2caux_transaction_request *request, in read_command() argument 251 ctx.buffer = request->payload.data; in read_command() 252 ctx.current_read_length = request->payload.length; in read_command() 262 if (request->payload.address_space == in read_command() 264 ctx.request.type = AUX_TRANSACTION_TYPE_DP; in read_command() 265 ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ; in read_command() 266 ctx.request.address = request->payload.address; in read_command() [all …]
|
| /kernel/linux/linux-5.10/block/ |
| D | blk-crypto-internal.h | 26 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio); 31 static inline bool bio_crypt_ctx_back_mergeable(struct request *req, in bio_crypt_ctx_back_mergeable() 38 static inline bool bio_crypt_ctx_front_mergeable(struct request *req, in bio_crypt_ctx_front_mergeable() 45 static inline bool bio_crypt_ctx_merge_rq(struct request *req, in bio_crypt_ctx_merge_rq() 46 struct request *next) in bio_crypt_ctx_merge_rq() 52 static inline void blk_crypto_rq_set_defaults(struct request *rq) in blk_crypto_rq_set_defaults() 58 static inline bool blk_crypto_rq_is_encrypted(struct request *rq) in blk_crypto_rq_is_encrypted() 65 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq, in bio_crypt_rq_ctx_compatible() 71 static inline bool bio_crypt_ctx_front_mergeable(struct request *req, in bio_crypt_ctx_front_mergeable() 77 static inline bool bio_crypt_ctx_back_mergeable(struct request *req, in bio_crypt_ctx_back_mergeable() [all …]
|
| /kernel/linux/linux-5.10/drivers/usb/musb/ |
| D | musb_gadget.c | 32 static inline void map_dma_buffer(struct musb_request *request, in map_dma_buffer() argument 38 request->map_state = UN_MAPPED; in map_dma_buffer() 43 /* Check if DMA engine can handle this request. in map_dma_buffer() 44 * DMA code must reject the USB request explicitly. in map_dma_buffer() 45 * Default behaviour is to map the request. in map_dma_buffer() 49 musb_ep->packet_sz, request->request.buf, in map_dma_buffer() 50 request->request.length); in map_dma_buffer() 54 if (request->request.dma == DMA_ADDR_INVALID) { in map_dma_buffer() 60 request->request.buf, in map_dma_buffer() 61 request->request.length, in map_dma_buffer() [all …]
|