• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  * blkif.h
3  *
4  * Unified block-device I/O interface for Xen guest OSes.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Copyright (c) 2003-2004, Keir Fraser
25  * Copyright (c) 2012, Spectra Logic Corporation
26  */
27 
28 #ifndef __XEN_PUBLIC_IO_BLKIF_H__
29 #define __XEN_PUBLIC_IO_BLKIF_H__
30 
31 #include "ring.h"
32 #include "../grant_table.h"
33 
34 /*
35  * Front->back notifications: When enqueuing a new request, sending a
36  * notification can be made conditional on req_event (i.e., the generic
37  * hold-off mechanism provided by the ring macros). Backends must set
38  * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
39  *
40  * Back->front notifications: When enqueuing a new response, sending a
41  * notification can be made conditional on rsp_event (i.e., the generic
42  * hold-off mechanism provided by the ring macros). Frontends must set
43  * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
44  */
45 
46 #ifndef blkif_vdev_t
47 #define blkif_vdev_t   UINT16
48 #endif
49 #define blkif_sector_t UINT64
50 
51 /*
52  * Feature and Parameter Negotiation
53  * =================================
54  * The two halves of a Xen block driver utilize nodes within the XenStore to
55  * communicate capabilities and to negotiate operating parameters.  This
56  * section enumerates these nodes which reside in the respective front and
57  * backend portions of the XenStore, following the XenBus convention.
58  *
59  * All data in the XenStore is stored as strings.  Nodes specifying numeric
60  * values are encoded in decimal.  Integer value ranges listed below are
61  * expressed as fixed sized integer types capable of storing the conversion
62  * of a properly formatted node string, without loss of information.
63  *
64  * Any specified default value is in effect if the corresponding XenBus node
65  * is not present in the XenStore.
66  *
67  * XenStore nodes in sections marked "PRIVATE" are solely for use by the
68  * driver side whose XenBus tree contains them.
69  *
70  * XenStore nodes marked "DEPRECATED" in their notes section should only be
71  * used to provide interoperability with legacy implementations.
72  *
73  * See the XenBus state transition diagram below for details on when XenBus
74  * nodes must be published and when they can be queried.
75  *
76  *****************************************************************************
77  *                            Backend XenBus Nodes
78  *****************************************************************************
79  *
80  *------------------ Backend Device Identification (PRIVATE) ------------------
81  *
82  * mode
83  *      Values:         "r" (read only), "w" (writable)
84  *
85  *      The read or write access permissions to the backing store to be
86  *      granted to the frontend.
87  *
88  * params
89  *      Values:         string
90  *
91  *      A free formatted string providing sufficient information for the
92  *      backend driver to open the backing device.  (e.g. the path to the
93  *      file or block device representing the backing store.)
94  *
95  * type
96  *      Values:         "file", "phy", "tap"
97  *
98  *      The type of the backing device/object.
99  *
100  *--------------------------------- Features ---------------------------------
101  *
102  * feature-barrier
103  *      Values:         0/1 (boolean)
104  *      Default Value:  0
105  *
106  *      A value of "1" indicates that the backend can process requests
107  *      containing the BLKIF_OP_WRITE_BARRIER request opcode.  Requests
108  *      of this type may still be returned at any time with the
109  *      BLKIF_RSP_EOPNOTSUPP result code.
110  *
111  * feature-flush-cache
112  *      Values:         0/1 (boolean)
113  *      Default Value:  0
114  *
115  *      A value of "1" indicates that the backend can process requests
116  *      containing the BLKIF_OP_FLUSH_DISKCACHE request opcode.  Requests
117  *      of this type may still be returned at any time with the
118  *      BLKIF_RSP_EOPNOTSUPP result code.
119  *
120  * feature-discard
121  *      Values:         0/1 (boolean)
122  *      Default Value:  0
123  *
124  *      A value of "1" indicates that the backend can process requests
125  *      containing the BLKIF_OP_DISCARD request opcode.  Requests
126  *      of this type may still be returned at any time with the
127  *      BLKIF_RSP_EOPNOTSUPP result code.
128  *
129  * feature-persistent
130  *      Values:         0/1 (boolean)
131  *      Default Value:  0
132  *      Notes: 7
133  *
134  *      A value of "1" indicates that the backend can keep the grants used
135  *      by the frontend driver mapped, so the same set of grants should be
136  *      used in all transactions. The maximum number of grants the backend
137  *      can map persistently depends on the implementation, but ideally it
138  *      should be RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST. Using this
139  *      feature the backend doesn't need to unmap each grant, preventing
140  *      costly TLB flushes. The backend driver should only map grants
141  *      persistently if the frontend supports it. If a backend driver chooses
142  *      to use the persistent protocol when the frontend doesn't support it,
143  *      it will probably hit the maximum number of persistently mapped grants
144  *      (due to the fact that the frontend won't be reusing the same grants),
145  *      and fall back to non-persistent mode. Backend implementations may
146  *      shrink or expand the number of persistently mapped grants without
147  *      notifying the frontend depending on memory constraints (this might
148  *      cause a performance degradation).
149  *
150  *      If a backend driver wants to limit the maximum number of persistently
151  *      mapped grants to a value less than RING_SIZE *
152  *      BLKIF_MAX_SEGMENTS_PER_REQUEST a LRU strategy should be used to
153  *      discard the grants that are less commonly used. Using a LRU in the
154  *      backend driver paired with a LIFO queue in the frontend will
155  *      allow us to have better performance in this scenario.
156  *
157  *----------------------- Request Transport Parameters ------------------------
158  *
159  * max-ring-page-order
160  *      Values:         <UINT32>
161  *      Default Value:  0
162  *      Notes:          1, 3
163  *
164  *      The maximum supported size of the request ring buffer in units of
165  *      lb(machine pages). (e.g. 0 == 1 page,  1 = 2 pages, 2 == 4 pages,
166  *      etc.).
167  *
168  * max-ring-pages
169  *      Values:         <UINT32>
170  *      Default Value:  1
171  *      Notes:          DEPRECATED, 2, 3
172  *
173  *      The maximum supported size of the request ring buffer in units of
174  *      machine pages.  The value must be a power of 2.
175  *
176  *------------------------- Backend Device Properties -------------------------
177  *
178  * discard-alignment
179  *      Values:         <UINT32>
180  *      Default Value:  0
181  *      Notes:          4, 5
182  *
183  *      The offset, in bytes from the beginning of the virtual block device,
184  *      to the first, addressable, discard extent on the underlying device.
185  *
186  * discard-granularity
187  *      Values:         <UINT32>
188  *      Default Value:  <"sector-size">
189  *      Notes:          4
190  *
191  *      The size, in bytes, of the individually addressable discard extents
192  *      of the underlying device.
193  *
194  * discard-secure
195  *      Values:         0/1 (boolean)
196  *      Default Value:  0
197  *      Notes:          10
198  *
199  *      A value of "1" indicates that the backend can process BLKIF_OP_DISCARD
200  *      requests with the BLKIF_DISCARD_SECURE flag set.
201  *
202  * info
203  *      Values:         <UINT32> (bitmap)
204  *
205  *      A collection of bit flags describing attributes of the backing
206  *      device.  The VDISK_* macros define the meaning of each bit
207  *      location.
208  *
209  * sector-size
210  *      Values:         <UINT32>
211  *
212  *      The logical sector size, in bytes, of the backend device.
213  *
214  * physical-sector-size
215  *      Values:         <UINT32>
216  *
217  *      The physical sector size, in bytes, of the backend device.
218  *
219  * sectors
220  *      Values:         <UINT64>
221  *
222  *      The size of the backend device, expressed in units of its logical
223  *      sector size ("sector-size").
224  *
225  *****************************************************************************
226  *                            Frontend XenBus Nodes
227  *****************************************************************************
228  *
229  *----------------------- Request Transport Parameters -----------------------
230  *
231  * event-channel
232  *      Values:         <UINT32>
233  *
234  *      The identifier of the Xen event channel used to signal activity
235  *      in the ring buffer.
236  *
237  * ring-ref
238  *      Values:         <UINT32>
239  *      Notes:          6
240  *
241  *      The Xen grant reference granting permission for the backend to map
242  *      the sole page in a single page sized ring buffer.
243  *
244  * ring-ref%u
245  *      Values:         <UINT32>
246  *      Notes:          6
247  *
248  *      For a frontend providing a multi-page ring, a "number of ring pages"
249  *      sized list of nodes, each containing a Xen grant reference granting
250  *      permission for the backend to map the page of the ring located
251  *      at page index "%u".  Page indexes are zero based.
252  *
253  * protocol
254  *      Values:         string (XEN_IO_PROTO_ABI_*)
255  *      Default Value:  XEN_IO_PROTO_ABI_NATIVE
256  *
257  *      The machine ABI rules governing the format of all ring request and
258  *      response structures.
259  *
260  * ring-page-order
261  *      Values:         <UINT32>
262  *      Default Value:  0
263  *      Maximum Value:  MAX(ffs(max-ring-pages) - 1, max-ring-page-order)
264  *      Notes:          1, 3
265  *
266  *      The size of the frontend allocated request ring buffer in units
267  *      of lb(machine pages). (e.g. 0 == 1 page, 1 = 2 pages, 2 == 4 pages,
268  *      etc.).
269  *
270  * num-ring-pages
271  *      Values:         <UINT32>
272  *      Default Value:  1
273  *      Maximum Value:  MAX(max-ring-pages,(0x1 << max-ring-page-order))
274  *      Notes:          DEPRECATED, 2, 3
275  *
276  *      The size of the frontend allocated request ring buffer in units of
277  *      machine pages.  The value must be a power of 2.
278  *
279  * feature-persistent
280  *      Values:         0/1 (boolean)
281  *      Default Value:  0
282  *      Notes: 7, 8, 9
283  *
284  *      A value of "1" indicates that the frontend will reuse the same grants
285  *      for all transactions, allowing the backend to map them with write
286  *      access (even when it should be read-only). If the frontend hits the
287  *      maximum number of allowed persistently mapped grants, it can fallback
288  *      to non persistent mode. This will cause a performance degradation,
289  *      since the the backend driver will still try to map those grants
290  *      persistently. Since the persistent grants protocol is compatible with
291  *      the previous protocol, a frontend driver can choose to work in
292  *      persistent mode even when the backend doesn't support it.
293  *
294  *      It is recommended that the frontend driver stores the persistently
295  *      mapped grants in a LIFO queue, so a subset of all persistently mapped
296  *      grants gets used commonly. This is done in case the backend driver
297  *      decides to limit the maximum number of persistently mapped grants
298  *      to a value less than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
299  *
300  *------------------------- Virtual Device Properties -------------------------
301  *
302  * device-type
303  *      Values:         "disk", "cdrom", "floppy", etc.
304  *
305  * virtual-device
306  *      Values:         <UINT32>
307  *
308  *      A value indicating the physical device to virtualize within the
309  *      frontend's domain.  (e.g. "The first ATA disk", "The third SCSI
310  *      disk", etc.)
311  *
312  *      See docs/misc/vbd-interface.txt for details on the format of this
313  *      value.
314  *
315  * Notes
316  * -----
317  * (1) Multi-page ring buffer scheme first developed in the Citrix XenServer
318  *     PV drivers.
319  * (2) Multi-page ring buffer scheme first used in some RedHat distributions
320  *     including a distribution deployed on certain nodes of the Amazon
321  *     EC2 cluster.
322  * (3) Support for multi-page ring buffers was implemented independently,
323  *     in slightly different forms, by both Citrix and RedHat/Amazon.
324  *     For full interoperability, block front and backends should publish
325  *     identical ring parameters, adjusted for unit differences, to the
326  *     XenStore nodes used in both schemes.
327  * (4) Devices that support discard functionality may internally allocate space
328  *     (discardable extents) in units that are larger than the exported logical
329  *     block size. If the backing device has such discardable extents the
330  *     backend should provide both discard-granularity and discard-alignment.
331  *     Providing just one of the two may be considered an error by the frontend.
332  *     Backends supporting discard should include discard-granularity and
333  *     discard-alignment even if it supports discarding individual sectors.
334  *     Frontends should assume discard-alignment == 0 and discard-granularity
335  *     == sector size if these keys are missing.
336  * (5) The discard-alignment parameter allows a physical device to be
337  *     partitioned into virtual devices that do not necessarily begin or
338  *     end on a discardable extent boundary.
339  * (6) When there is only a single page allocated to the request ring,
340  *     'ring-ref' is used to communicate the grant reference for this
341  *     page to the backend.  When using a multi-page ring, the 'ring-ref'
342  *     node is not created.  Instead 'ring-ref0' - 'ring-refN' are used.
343  * (7) When using persistent grants data has to be copied from/to the page
344  *     where the grant is currently mapped. The overhead of doing this copy
345  *     however doesn't suppress the speed improvement of not having to unmap
346  *     the grants.
347  * (8) The frontend driver has to allow the backend driver to map all grants
348  *     with write access, even when they should be mapped read-only, since
349  *     further requests may reuse these grants and require write permissions.
350  * (9) Linux implementation doesn't have a limit on the maximum number of
351  *     grants that can be persistently mapped in the frontend driver, but
352  *     due to the frontent driver implementation it should never be bigger
353  *     than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
354  *(10) The discard-secure property may be present and will be set to 1 if the
355  *     backing device supports secure discard.
356  */
357 
358 /*
359  * STATE DIAGRAMS
360  *
361  *****************************************************************************
362  *                                   Startup                                 *
363  *****************************************************************************
364  *
365  * Tool stack creates front and back nodes with state XenbusStateInitialising.
366  *
367  * Front                                Back
368  * =================================    =====================================
369  * XenbusStateInitialising              XenbusStateInitialising
370  *  o Query virtual device               o Query backend device identification
371  *    properties.                          data.
372  *  o Setup OS device instance.          o Open and validate backend device.
373  *                                       o Publish backend features and
374  *                                         transport parameters.
375  *                                                      |
376  *                                                      |
377  *                                                      V
378  *                                      XenbusStateInitWait
379  *
380  * o Query backend features and
381  *   transport parameters.
382  * o Allocate and initialize the
383  *   request ring.
384  * o Publish transport parameters
385  *   that will be in effect during
386  *   this connection.
387  *              |
388  *              |
389  *              V
390  * XenbusStateInitialised
391  *
392  *                                       o Query frontend transport parameters.
393  *                                       o Connect to the request ring and
394  *                                         event channel.
395  *                                       o Publish backend device properties.
396  *                                                      |
397  *                                                      |
398  *                                                      V
399  *                                      XenbusStateConnected
400  *
401  *  o Query backend device properties.
402  *  o Finalize OS virtual device
403  *    instance.
404  *              |
405  *              |
406  *              V
407  * XenbusStateConnected
408  *
409  * Note: Drivers that do not support any optional features, or the negotiation
410  *       of transport parameters, can skip certain states in the state machine:
411  *
412  *       o A frontend may transition to XenbusStateInitialised without
413  *         waiting for the backend to enter XenbusStateInitWait.  In this
414  *         case, default transport parameters are in effect and any
415  *         transport parameters published by the frontend must contain
416  *         their default values.
417  *
418  *       o A backend may transition to XenbusStateInitialised, bypassing
419  *         XenbusStateInitWait, without waiting for the frontend to first
420  *         enter the XenbusStateInitialised state.  In this case, default
421  *         transport parameters are in effect and any transport parameters
422  *         published by the backend must contain their default values.
423  *
424  *       Drivers that support optional features and/or transport parameter
425  *       negotiation must tolerate these additional state transition paths.
426  *       In general this means performing the work of any skipped state
427  *       transition, if it has not already been performed, in addition to the
428  *       work associated with entry into the current state.
429  */
430 
431 /*
432  * REQUEST CODES.
433  */
434 #define BLKIF_OP_READ              0
435 #define BLKIF_OP_WRITE             1
436 /*
437  * All writes issued prior to a request with the BLKIF_OP_WRITE_BARRIER
438  * operation code ("barrier request") must be completed prior to the
439  * execution of the barrier request.  All writes issued after the barrier
440  * request must not execute until after the completion of the barrier request.
441  *
442  * Optional.  See "feature-barrier" XenBus node documentation above.
443  */
444 #define BLKIF_OP_WRITE_BARRIER     2
445 /*
446  * Commit any uncommitted contents of the backing device's volatile cache
447  * to stable storage.
448  *
449  * Optional.  See "feature-flush-cache" XenBus node documentation above.
450  */
451 #define BLKIF_OP_FLUSH_DISKCACHE   3
452 /*
453  * Used in SLES sources for device specific command packet
454  * contained within the request. Reserved for that purpose.
455  */
456 #define BLKIF_OP_RESERVED_1        4
457 /*
458  * Indicate to the backend device that a region of storage is no longer in
459  * use, and may be discarded at any time without impact to the client.  If
460  * the BLKIF_DISCARD_SECURE flag is set on the request, all copies of the
461  * discarded region on the device must be rendered unrecoverable before the
462  * command returns.
463  *
464  * This operation is analogous to performing a trim (ATA) or unamp (SCSI),
465  * command on a native device.
466  *
467  * More information about trim/unmap operations can be found at:
468  * http://t13.org/Documents/UploadedDocuments/docs2008/
469  *     e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc
470  * http://www.seagate.com/staticfiles/support/disc/manuals/
471  *     Interface%20manuals/100293068c.pdf
472  *
473  * Optional.  See "feature-discard", "discard-alignment",
474  * "discard-granularity", and "discard-secure" in the XenBus node
475  * documentation above.
476  */
477 #define BLKIF_OP_DISCARD           5
478 
479 /*
480  * Recognized if "feature-max-indirect-segments" in present in the backend
481  * xenbus info. The "feature-max-indirect-segments" node contains the maximum
482  * number of segments allowed by the backend per request. If the node is
483  * present, the frontend might use blkif_request_indirect structs in order to
484  * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
485  * maximum number of indirect segments is fixed by the backend, but the
486  * frontend can issue requests with any number of indirect segments as long as
487  * it's less than the number provided by the backend. The indirect_grefs field
488  * in blkif_request_indirect should be filled by the frontend with the
489  * grant references of the pages that are holding the indirect segments.
490  * These pages are filled with an array of blkif_request_segment that hold the
491  * information about the segments. The number of indirect pages to use is
492  * determined by the number of segments an indirect request contains. Every
493  * indirect page can contain a maximum of
494  * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
495  * calculate the number of indirect pages to use we have to do
496  * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
497  *
498  * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
499  * create the "feature-max-indirect-segments" node!
500  */
501 #define BLKIF_OP_INDIRECT          6
502 
503 /*
504  * Maximum scatter/gather segments per request.
505  * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
506  * NB. This could be 12 if the ring indexes weren't stored in the same page.
507  */
508 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
509 
510 /*
511  * Maximum number of indirect pages to use per request.
512  */
513 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
514 
515 /*
516  * NB. first_sect and last_sect in blkif_request_segment, as well as
517  * sector_number in blkif_request, are always expressed in 512-byte units.
518  * However they must be properly aligned to the real sector size of the
519  * physical disk, which is reported in the "physical-sector-size" node in
520  * the backend xenbus info. Also the xenbus "sectors" node is expressed in
521  * 512-byte units.
522  */
523 struct blkif_request_segment {
524     grant_ref_t gref;        /* reference to I/O buffer frame        */
525     /* @first_sect: first sector in frame to transfer (inclusive).   */
526     /* @last_sect: last sector in frame to transfer (inclusive).     */
527     UINT8     first_sect, last_sect;
528 };
529 
530 /*
531  * Starting ring element for any I/O request.
532  */
533 #if defined(MDE_CPU_IA32)
534 //
535 // pack(4) is necessary when these structs are compiled for Ia32.
536 // Without it, the struct will have a different alignment than the one
537 // a backend expect for a 32bit guest.
538 //
539 #pragma pack(4)
540 #endif
541 struct blkif_request {
542     UINT8        operation;    /* BLKIF_OP_???                         */
543     UINT8        nr_segments;  /* number of segments                   */
544     blkif_vdev_t   handle;       /* only for read/write requests         */
545     UINT64       id;           /* private guest value, echoed in resp  */
546     blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
547     struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
548 };
549 typedef struct blkif_request blkif_request_t;
550 
551 /*
552  * Cast to this structure when blkif_request.operation == BLKIF_OP_DISCARD
553  * sizeof(struct blkif_request_discard) <= sizeof(struct blkif_request)
554  */
555 struct blkif_request_discard {
556     UINT8        operation;    /* BLKIF_OP_DISCARD                     */
557     UINT8        flag;         /* BLKIF_DISCARD_SECURE or zero         */
558 #define BLKIF_DISCARD_SECURE (1<<0)  /* ignored if discard-secure=0      */
559     blkif_vdev_t   handle;       /* same as for read/write requests      */
560     UINT64       id;           /* private guest value, echoed in resp  */
561     blkif_sector_t sector_number;/* start sector idx on disk             */
562     UINT64       nr_sectors;   /* number of contiguous sectors to discard*/
563 };
564 typedef struct blkif_request_discard blkif_request_discard_t;
565 
566 struct blkif_request_indirect {
567     UINT8        operation;    /* BLKIF_OP_INDIRECT                    */
568     UINT8        indirect_op;  /* BLKIF_OP_{READ/WRITE}                */
569     UINT16       nr_segments;  /* number of segments                   */
570     UINT64       id;           /* private guest value, echoed in resp  */
571     blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
572     blkif_vdev_t   handle;       /* same as for read/write requests      */
573     grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
574 #ifdef MDE_CPU_IA32
575     UINT64       pad;          /* Make it 64 byte aligned on i386      */
576 #endif
577 };
578 typedef struct blkif_request_indirect blkif_request_indirect_t;
579 
580 struct blkif_response {
581     UINT64        id;              /* copied from request */
582     UINT8         operation;       /* copied from request */
583     INT16         status;          /* BLKIF_RSP_???       */
584 };
585 typedef struct blkif_response blkif_response_t;
586 #if defined(MDE_CPU_IA32)
587 #pragma pack()
588 #endif
589 
590 /*
591  * STATUS RETURN CODES.
592  */
593  /* Operation not supported (only happens on barrier writes). */
594 #define BLKIF_RSP_EOPNOTSUPP  -2
595  /* Operation failed for some unspecified reason (-EIO). */
596 #define BLKIF_RSP_ERROR       -1
597  /* Operation completed successfully. */
598 #define BLKIF_RSP_OKAY         0
599 
600 /*
601  * Generate blkif ring structures and types.
602  */
603 DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
604 
605 #define VDISK_CDROM        0x1
606 #define VDISK_REMOVABLE    0x2
607 #define VDISK_READONLY     0x4
608 
609 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
610 
611 /*
612  * Local variables:
613  * mode: C
614  * c-file-style: "BSD"
615  * c-basic-offset: 4
616  * tab-width: 4
617  * indent-tabs-mode: nil
618  * End:
619  */
620