1 /* SPDX-License-Identifier: MIT */
2 /******************************************************************************
3  * blkif.h
4  *
5  * Unified block-device I/O interface for Xen guest OSes.
6  *
7  * Copyright (c) 2003-2004, Keir Fraser
8  * Copyright (c) 2012, Spectra Logic Corporation
9  */
10 
11 #ifndef __XEN_PUBLIC_IO_BLKIF_H__
12 #define __XEN_PUBLIC_IO_BLKIF_H__
13 
14 #include "ring.h"
15 #include "../grant_table.h"
16 
17 /*
18  * Front->back notifications: When enqueuing a new request, sending a
19  * notification can be made conditional on req_event (i.e., the generic
20  * hold-off mechanism provided by the ring macros). Backends must set
21  * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
22  *
23  * Back->front notifications: When enqueuing a new response, sending a
24  * notification can be made conditional on rsp_event (i.e., the generic
25  * hold-off mechanism provided by the ring macros). Frontends must set
26  * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
27  */
28 
29 #ifndef blkif_vdev_t
30 #define blkif_vdev_t   uint16_t
31 #endif
32 #define blkif_sector_t uint64_t
33 
34 /*
35  * Feature and Parameter Negotiation
36  * =================================
37  * The two halves of a Xen block driver utilize nodes within the XenStore to
38  * communicate capabilities and to negotiate operating parameters.  This
39  * section enumerates these nodes which reside in the respective front and
40  * backend portions of the XenStore, following the XenBus convention.
41  *
42  * All data in the XenStore is stored as strings.  Nodes specifying numeric
43  * values are encoded in decimal.  Integer value ranges listed below are
44  * expressed as fixed sized integer types capable of storing the conversion
45  * of a properly formatted node string, without loss of information.
46  *
47  * Any specified default value is in effect if the corresponding XenBus node
48  * is not present in the XenStore.
49  *
50  * XenStore nodes in sections marked "PRIVATE" are solely for use by the
51  * driver side whose XenBus tree contains them.
52  *
53  * XenStore nodes marked "DEPRECATED" in their notes section should only be
54  * used to provide interoperability with legacy implementations.
55  *
56  * See the XenBus state transition diagram below for details on when XenBus
57  * nodes must be published and when they can be queried.
58  *
59  *****************************************************************************
60  *                            Backend XenBus Nodes
61  *****************************************************************************
62  *
63  *------------------ Backend Device Identification (PRIVATE) ------------------
64  *
65  * mode
66  *      Values:         "r" (read only), "w" (writable)
67  *
68  *      The read or write access permissions to the backing store to be
69  *      granted to the frontend.
70  *
71  * params
72  *      Values:         string
73  *
74  *      A free formatted string providing sufficient information for the
75  *      hotplug script to attach the device and provide a suitable
76  *      handler (ie: a block device) for blkback to use.
77  *
78  * physical-device
79  *      Values:         "MAJOR:MINOR"
80  *      Notes: 11
81  *
82  *      MAJOR and MINOR are the major number and minor number of the
83  *      backing device respectively.
84  *
85  * physical-device-path
86  *      Values:         path string
87  *
88  *      A string that contains the absolute path to the disk image. On
89  *      NetBSD and Linux this is always a block device, while on FreeBSD
90  *      it can be either a block device or a regular file.
91  *
92  * type
93  *      Values:         "file", "phy", "tap"
94  *
95  *      The type of the backing device/object.
96  *
97  *
98  * direct-io-safe
99  *      Values:         0/1 (boolean)
100  *      Default Value:  0
101  *
102  *      The underlying storage is not affected by the direct IO memory
103  *      lifetime bug.  See:
104  *        https://lists.xen.org/archives/html/xen-devel/2012-12/msg01154.html
105  *
106  *      Therefore this option gives the backend permission to use
107  *      O_DIRECT, notwithstanding that bug.
108  *
109  *      That is, if this option is enabled, use of O_DIRECT is safe,
110  *      in circumstances where we would normally have avoided it as a
111  *      workaround for that bug.  This option is not relevant for all
112  *      backends, and even not necessarily supported for those for
113  *      which it is relevant.  A backend which knows that it is not
114  *      affected by the bug can ignore this option.
115  *
116  *      This option doesn't require a backend to use O_DIRECT, so it
117  *      should not be used to try to control the caching behaviour.
118  *
119  *--------------------------------- Features ---------------------------------
120  *
121  * feature-barrier
122  *      Values:         0/1 (boolean)
123  *      Default Value:  0
124  *
125  *      A value of "1" indicates that the backend can process requests
126  *      containing the BLKIF_OP_WRITE_BARRIER request opcode.  Requests
127  *      of this type may still be returned at any time with the
128  *      BLKIF_RSP_EOPNOTSUPP result code.
129  *
130  * feature-flush-cache
131  *      Values:         0/1 (boolean)
132  *      Default Value:  0
133  *
134  *      A value of "1" indicates that the backend can process requests
135  *      containing the BLKIF_OP_FLUSH_DISKCACHE request opcode.  Requests
136  *      of this type may still be returned at any time with the
137  *      BLKIF_RSP_EOPNOTSUPP result code.
138  *
139  * feature-discard
140  *      Values:         0/1 (boolean)
141  *      Default Value:  0
142  *
143  *      A value of "1" indicates that the backend can process requests
144  *      containing the BLKIF_OP_DISCARD request opcode.  Requests
145  *      of this type may still be returned at any time with the
146  *      BLKIF_RSP_EOPNOTSUPP result code.
147  *
148  * feature-persistent
149  *      Values:         0/1 (boolean)
150  *      Default Value:  0
151  *      Notes: 7
152  *
153  *      A value of "1" indicates that the backend can keep the grants used
154  *      by the frontend driver mapped, so the same set of grants should be
155  *      used in all transactions. The maximum number of grants the backend
156  *      can map persistently depends on the implementation, but ideally it
157  *      should be RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST. Using this
158  *      feature the backend doesn't need to unmap each grant, preventing
159  *      costly TLB flushes. The backend driver should only map grants
160  *      persistently if the frontend supports it. If a backend driver chooses
161  *      to use the persistent protocol when the frontend doesn't support it,
162  *      it will probably hit the maximum number of persistently mapped grants
163  *      (due to the fact that the frontend won't be reusing the same grants),
164  *      and fall back to non-persistent mode. Backend implementations may
165  *      shrink or expand the number of persistently mapped grants without
166  *      notifying the frontend depending on memory constraints (this might
167  *      cause a performance degradation).
168  *
169  *      If a backend driver wants to limit the maximum number of persistently
170  *      mapped grants to a value less than RING_SIZE *
171  *      BLKIF_MAX_SEGMENTS_PER_REQUEST a LRU strategy should be used to
172  *      discard the grants that are less commonly used. Using a LRU in the
173  *      backend driver paired with a LIFO queue in the frontend will
174  *      allow us to have better performance in this scenario.
175  *
176  *----------------------- Request Transport Parameters ------------------------
177  *
178  * max-ring-page-order
179  *      Values:         <uint32_t>
180  *      Default Value:  0
181  *      Notes:          1, 3
182  *
183  *      The maximum supported size of the request ring buffer in units of
184  *      lb(machine pages). (e.g. 0 == 1 page,  1 = 2 pages, 2 == 4 pages,
185  *      etc.).
186  *
187  * max-ring-pages
188  *      Values:         <uint32_t>
189  *      Default Value:  1
190  *      Notes:          DEPRECATED, 2, 3
191  *
192  *      The maximum supported size of the request ring buffer in units of
193  *      machine pages.  The value must be a power of 2.
194  *
195  *------------------------- Backend Device Properties -------------------------
196  *
197  * discard-enable
198  *      Values:         0/1 (boolean)
199  *      Default Value:  1
200  *
201  *      This optional property, set by the toolstack, instructs the backend
202  *      to offer (or not to offer) discard to the frontend. If the property
203  *      is missing the backend should offer discard if the backing storage
204  *      actually supports it.
205  *
206  * discard-alignment
207  *      Values:         <uint32_t>
208  *      Default Value:  0
209  *      Notes:          4, 5
210  *
211  *      The offset, in bytes from the beginning of the virtual block device,
212  *      to the first, addressable, discard extent on the underlying device.
213  *
214  * discard-granularity
215  *      Values:         <uint32_t>
216  *      Default Value:  <"sector-size">
217  *      Notes:          4
218  *
219  *      The size, in bytes, of the individually addressable discard extents
220  *      of the underlying device.
221  *
222  * discard-secure
223  *      Values:         0/1 (boolean)
224  *      Default Value:  0
225  *      Notes:          10
226  *
227  *      A value of "1" indicates that the backend can process BLKIF_OP_DISCARD
228  *      requests with the BLKIF_DISCARD_SECURE flag set.
229  *
230  * info
231  *      Values:         <uint32_t> (bitmap)
232  *
233  *      A collection of bit flags describing attributes of the backing
234  *      device.  The VDISK_* macros define the meaning of each bit
235  *      location.
236  *
237  * sector-size
238  *      Values:         <uint32_t>
239  *
240  *      The logical block size, in bytes, of the underlying storage. This must
241  *      be a power of two with a minimum value of 512.  The sector size should
242  *      only be used for request segment length and alignment.
243  *
244  *      When exposing a device that uses a logical sector size of 4096, the
245  *      only difference xenstore wise will be that 'sector-size' (and possibly
246  *      'physical-sector-size' if supported by the backend) will be 4096, but
247  *      the 'sectors' node will still be calculated using 512 byte units.  The
248  *      sector base units in the ring requests fields will all be 512 byte
249  *      based despite the logical sector size exposed in 'sector-size'.
250  *
251  * physical-sector-size
252  *      Values:         <uint32_t>
253  *      Default Value:  <"sector-size">
254  *
255  *      The physical block size, in bytes, of the backend storage. This
256  *      must be an integer multiple of "sector-size".
257  *
258  * sectors
259  *      Values:         <uint64_t>
260  *
261  *      The size of the backend device, expressed in units of 512b.  The
262  *      product of "sectors" * 512 must also be an integer multiple of
263  *      "physical-sector-size", if that node is present.
264  *
265  *****************************************************************************
266  *                            Frontend XenBus Nodes
267  *****************************************************************************
268  *
269  *----------------------- Request Transport Parameters -----------------------
270  *
271  * event-channel
272  *      Values:         <uint32_t>
273  *
274  *      The identifier of the Xen event channel used to signal activity
275  *      in the ring buffer.
276  *
277  * ring-ref
278  *      Values:         <uint32_t>
279  *      Notes:          6
280  *
281  *      The Xen grant reference granting permission for the backend to map
282  *      the sole page in a single page sized ring buffer.
283  *
284  * ring-ref%u
285  *      Values:         <uint32_t>
286  *      Notes:          6
287  *
288  *      For a frontend providing a multi-page ring, a "number of ring pages"
289  *      sized list of nodes, each containing a Xen grant reference granting
290  *      permission for the backend to map the page of the ring located
291  *      at page index "%u".  Page indexes are zero based.
292  *
293  * protocol
294  *      Values:         string (XEN_IO_PROTO_ABI_*)
295  *      Default Value:  XEN_IO_PROTO_ABI_NATIVE
296  *
297  *      The machine ABI rules governing the format of all ring request and
298  *      response structures.
299  *
300  * ring-page-order
301  *      Values:         <uint32_t>
302  *      Default Value:  0
303  *      Maximum Value:  MAX(ffs(max-ring-pages) - 1, max-ring-page-order)
304  *      Notes:          1, 3
305  *
306  *      The size of the frontend allocated request ring buffer in units
307  *      of lb(machine pages). (e.g. 0 == 1 page, 1 = 2 pages, 2 == 4 pages,
308  *      etc.).
309  *
310  * num-ring-pages
311  *      Values:         <uint32_t>
312  *      Default Value:  1
313  *      Maximum Value:  MAX(max-ring-pages,(0x1 << max-ring-page-order))
314  *      Notes:          DEPRECATED, 2, 3
315  *
316  *      The size of the frontend allocated request ring buffer in units of
317  *      machine pages.  The value must be a power of 2.
318  *
319  *--------------------------------- Features ---------------------------------
320  *
321  * feature-persistent
322  *      Values:         0/1 (boolean)
323  *      Default Value:  0
324  *      Notes: 7, 8, 9
325  *
326  *      A value of "1" indicates that the frontend will reuse the same grants
327  *      for all transactions, allowing the backend to map them with write
328  *      access (even when it should be read-only). If the frontend hits the
329  *      maximum number of allowed persistently mapped grants, it can fallback
330  *      to non persistent mode. This will cause a performance degradation,
331  *      since the backend driver will still try to map those grants
332  *      persistently. Since the persistent grants protocol is compatible with
333  *      the previous protocol, a frontend driver can choose to work in
334  *      persistent mode even when the backend doesn't support it.
335  *
336  *      It is recommended that the frontend driver stores the persistently
337  *      mapped grants in a LIFO queue, so a subset of all persistently mapped
338  *      grants gets used commonly. This is done in case the backend driver
339  *      decides to limit the maximum number of persistently mapped grants
340  *      to a value less than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
341  *
342  * feature-large-sector-size
343  *      Values:         0/1 (boolean)
344  *      Default Value:  0
345  *      Notes:          DEPRECATED, 12
346  *
347  *      A value of "1" indicates that the frontend will correctly supply and
348  *      interpret all sector-based quantities in terms of the "sector-size"
349  *      value supplied in the backend info, whatever that may be set to.
350  *      If this node is not present or its value is "0" then it is assumed
351  *      that the frontend requires that the logical block size is 512 as it
352  *      is hardcoded (which is the case in some frontend implementations).
353  *
354  * trusted
355  *      Values:         0/1 (boolean)
356  *      Default value:  1
357  *
358  *      A value of "0" indicates that the frontend should not trust the
359  *      backend, and should deploy whatever measures available to protect from
360  *      a malicious backend on the other end.
361  *
362  *------------------------- Virtual Device Properties -------------------------
363  *
364  * device-type
365  *      Values:         "disk", "cdrom", "floppy", etc.
366  *
367  * virtual-device
368  *      Values:         <uint32_t>
369  *
370  *      A value indicating the physical device to virtualize within the
371  *      frontend's domain.  (e.g. "The first ATA disk", "The third SCSI
372  *      disk", etc.)
373  *
374  *      See docs/misc/vbd-interface.txt for details on the format of this
375  *      value.
376  *
377  * Notes
378  * -----
379  * (1) Multi-page ring buffer scheme first developed in the Citrix XenServer
380  *     PV drivers.
381  * (2) Multi-page ring buffer scheme first used in some RedHat distributions
382  *     including a distribution deployed on certain nodes of the Amazon
383  *     EC2 cluster.
384  * (3) Support for multi-page ring buffers was implemented independently,
385  *     in slightly different forms, by both Citrix and RedHat/Amazon.
386  *     For full interoperability, block front and backends should publish
387  *     identical ring parameters, adjusted for unit differences, to the
388  *     XenStore nodes used in both schemes.
389  * (4) Devices that support discard functionality may internally allocate space
390  *     (discardable extents) in units that are larger than the exported logical
391  *     block size. If the backing device has such discardable extents the
392  *     backend should provide both discard-granularity and discard-alignment.
393  *     Providing just one of the two may be considered an error by the frontend.
394  *     Backends supporting discard should include discard-granularity and
395  *     discard-alignment even if it supports discarding individual sectors.
396  *     Frontends should assume discard-alignment == 0 and discard-granularity
397  *     == sector size if these keys are missing.
398  * (5) The discard-alignment parameter allows a physical device to be
399  *     partitioned into virtual devices that do not necessarily begin or
400  *     end on a discardable extent boundary.
401  * (6) When there is only a single page allocated to the request ring,
402  *     'ring-ref' is used to communicate the grant reference for this
403  *     page to the backend.  When using a multi-page ring, the 'ring-ref'
404  *     node is not created.  Instead 'ring-ref0' - 'ring-refN' are used.
405  * (7) When using persistent grants data has to be copied from/to the page
406  *     where the grant is currently mapped. The overhead of doing this copy
407  *     however doesn't suppress the speed improvement of not having to unmap
408  *     the grants.
409  * (8) The frontend driver has to allow the backend driver to map all grants
410  *     with write access, even when they should be mapped read-only, since
411  *     further requests may reuse these grants and require write permissions.
412  * (9) Linux implementation doesn't have a limit on the maximum number of
413  *     grants that can be persistently mapped in the frontend driver, but
414  *     due to the frontent driver implementation it should never be bigger
415  *     than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
416  *(10) The discard-secure property may be present and will be set to 1 if the
417  *     backing device supports secure discard.
418  *(11) Only used by Linux and NetBSD.
419  *(12) Possibly only ever implemented by the QEMU Qdisk backend and the Windows
420  *     PV block frontend.  Other backends and frontends supported 'sector-size'
421  *     values greater than 512 before such feature was added.  Frontends should
422  *     not expose this node, neither should backends make any decisions based
423  *     on it being exposed by the frontend.
424  */
425 
426 /*
427  * Multiple hardware queues/rings:
428  * If supported, the backend will write the key "multi-queue-max-queues" to
429  * the directory for that vbd, and set its value to the maximum supported
430  * number of queues.
431  * Frontends that are aware of this feature and wish to use it can write the
432  * key "multi-queue-num-queues" with the number they wish to use, which must be
433  * greater than zero, and no more than the value reported by the backend in
434  * "multi-queue-max-queues".
435  *
436  * For frontends requesting just one queue, the usual event-channel and
437  * ring-ref keys are written as before, simplifying the backend processing
438  * to avoid distinguishing between a frontend that doesn't understand the
439  * multi-queue feature, and one that does, but requested only one queue.
440  *
441  * Frontends requesting two or more queues must not write the toplevel
442  * event-channel and ring-ref keys, instead writing those keys under sub-keys
443  * having the name "queue-N" where N is the integer ID of the queue/ring for
444  * which those keys belong. Queues are indexed from zero.
445  * For example, a frontend with two queues must write the following set of
446  * queue-related keys:
447  *
448  * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
449  * /local/domain/1/device/vbd/0/queue-0 = ""
450  * /local/domain/1/device/vbd/0/queue-0/ring-ref = "<ring-ref#0>"
451  * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
452  * /local/domain/1/device/vbd/0/queue-1 = ""
453  * /local/domain/1/device/vbd/0/queue-1/ring-ref = "<ring-ref#1>"
454  * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
455  *
456  * It is also possible to use multiple queues/rings together with
457  * feature multi-page ring buffer.
458  * For example, a frontend requests two queues/rings and the size of each ring
459  * buffer is two pages must write the following set of related keys:
460  *
461  * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
462  * /local/domain/1/device/vbd/0/ring-page-order = "1"
463  * /local/domain/1/device/vbd/0/queue-0 = ""
464  * /local/domain/1/device/vbd/0/queue-0/ring-ref0 = "<ring-ref#0>"
465  * /local/domain/1/device/vbd/0/queue-0/ring-ref1 = "<ring-ref#1>"
466  * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
467  * /local/domain/1/device/vbd/0/queue-1 = ""
468  * /local/domain/1/device/vbd/0/queue-1/ring-ref0 = "<ring-ref#2>"
469  * /local/domain/1/device/vbd/0/queue-1/ring-ref1 = "<ring-ref#3>"
470  * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
471  *
472  */
473 
474 /*
475  * STATE DIAGRAMS
476  *
477  *****************************************************************************
478  *                                   Startup                                 *
479  *****************************************************************************
480  *
481  * Tool stack creates front and back nodes with state XenbusStateInitialising.
482  *
483  * Front                                Back
484  * =================================    =====================================
485  * XenbusStateInitialising              XenbusStateInitialising
486  *  o Query virtual device               o Query backend device identification
487  *    properties.                          data.
488  *  o Setup OS device instance.          o Open and validate backend device.
489  *                                       o Publish backend features and
490  *                                         transport parameters.
491  *                                                      |
492  *                                                      |
493  *                                                      V
494  *                                      XenbusStateInitWait
495  *
496  * o Query backend features and
497  *   transport parameters.
498  * o Allocate and initialize the
499  *   request ring.
500  * o Publish transport parameters
501  *   that will be in effect during
502  *   this connection.
503  *              |
504  *              |
505  *              V
506  * XenbusStateInitialised
507  *
508  *                                       o Query frontend transport parameters.
509  *                                       o Connect to the request ring and
510  *                                         event channel.
511  *                                       o Publish backend device properties.
512  *                                                      |
513  *                                                      |
514  *                                                      V
515  *                                      XenbusStateConnected
516  *
517  *  o Query backend device properties.
518  *  o Finalize OS virtual device
519  *    instance.
520  *              |
521  *              |
522  *              V
523  * XenbusStateConnected
524  *
525  * Note: Drivers that do not support any optional features, or the negotiation
526  *       of transport parameters, can skip certain states in the state machine:
527  *
528  *       o A frontend may transition to XenbusStateInitialised without
529  *         waiting for the backend to enter XenbusStateInitWait.  In this
530  *         case, default transport parameters are in effect and any
531  *         transport parameters published by the frontend must contain
532  *         their default values.
533  *
534  *       o A backend may transition to XenbusStateInitialised, bypassing
535  *         XenbusStateInitWait, without waiting for the frontend to first
536  *         enter the XenbusStateInitialised state.  In this case, default
537  *         transport parameters are in effect and any transport parameters
538  *         published by the backend must contain their default values.
539  *
540  *       Drivers that support optional features and/or transport parameter
541  *       negotiation must tolerate these additional state transition paths.
542  *       In general this means performing the work of any skipped state
543  *       transition, if it has not already been performed, in addition to the
544  *       work associated with entry into the current state.
545  */
546 
547 /*
548  * REQUEST CODES.
549  */
550 #define BLKIF_OP_READ              0
551 #define BLKIF_OP_WRITE             1
552 /*
553  * All writes issued prior to a request with the BLKIF_OP_WRITE_BARRIER
554  * operation code ("barrier request") must be completed prior to the
555  * execution of the barrier request.  All writes issued after the barrier
556  * request must not execute until after the completion of the barrier request.
557  *
558  * Optional.  See "feature-barrier" XenBus node documentation above.
559  */
560 #define BLKIF_OP_WRITE_BARRIER     2
561 /*
562  * Commit any uncommitted contents of the backing device's volatile cache
563  * to stable storage.
564  *
565  * Optional.  See "feature-flush-cache" XenBus node documentation above.
566  */
567 #define BLKIF_OP_FLUSH_DISKCACHE   3
568 /*
569  * Used in SLES sources for device specific command packet
570  * contained within the request. Reserved for that purpose.
571  */
572 #define BLKIF_OP_RESERVED_1        4
573 /*
574  * Indicate to the backend device that a region of storage is no longer in
575  * use, and may be discarded at any time without impact to the client.  If
576  * the BLKIF_DISCARD_SECURE flag is set on the request, all copies of the
577  * discarded region on the device must be rendered unrecoverable before the
578  * command returns.
579  *
580  * This operation is analogous to performing a trim (ATA) or unamp (SCSI),
581  * command on a native device.
582  *
583  * More information about trim/unmap operations can be found at:
584  * http://t13.org/Documents/UploadedDocuments/docs2008/
585  *     e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc
586  * http://www.seagate.com/staticfiles/support/disc/manuals/
587  *     Interface%20manuals/100293068c.pdf
588  *
589  * Optional.  See "feature-discard", "discard-alignment",
590  * "discard-granularity", and "discard-secure" in the XenBus node
591  * documentation above.
592  */
593 #define BLKIF_OP_DISCARD           5
594 
595 /*
596  * Recognized if "feature-max-indirect-segments" in present in the backend
597  * xenbus info. The "feature-max-indirect-segments" node contains the maximum
598  * number of segments allowed by the backend per request. If the node is
599  * present, the frontend might use blkif_request_indirect structs in order to
600  * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
601  * maximum number of indirect segments is fixed by the backend, but the
602  * frontend can issue requests with any number of indirect segments as long as
603  * it's less than the number provided by the backend. The indirect_grefs field
604  * in blkif_request_indirect should be filled by the frontend with the
605  * grant references of the pages that are holding the indirect segments.
606  * These pages are filled with an array of blkif_request_segment that hold the
607  * information about the segments. The number of indirect pages to use is
608  * determined by the number of segments an indirect request contains. Every
609  * indirect page can contain a maximum of
610  * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
611  * calculate the number of indirect pages to use we have to do
612  * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
613  *
614  * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
615  * create the "feature-max-indirect-segments" node!
616  */
617 #define BLKIF_OP_INDIRECT          6
618 
619 /*
620  * Maximum scatter/gather segments per request.
621  * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
622  * NB. This could be 12 if the ring indexes weren't stored in the same page.
623  */
624 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
625 
626 /*
627  * Maximum number of indirect pages to use per request.
628  */
629 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
630 
631 /*
632  * NB. 'first_sect' and 'last_sect' in blkif_request_segment are all in units
633  * of 512 bytes, despite the 'sector-size' xenstore node possibly having a
634  * value greater than 512.
635  *
636  * The value in 'first_sect' and 'last_sect' fields must be setup so that the
637  * resulting segment offset and size is aligned to the logical sector size
638  * reported by the 'sector-size' xenstore node, see 'Backend Device Properties'
639  * section.
640  */
641 struct blkif_request_segment {
642     grant_ref_t gref;        /* reference to I/O buffer frame        */
643     /* @first_sect: first sector in frame to transfer (inclusive).   */
644     /* @last_sect: last sector in frame to transfer (inclusive).     */
645     uint8_t     first_sect, last_sect;
646 };
647 
648 /*
649  * Starting ring element for any I/O request.
650  *
651  * The 'sector_number' field is in units of 512b, despite the value of the
652  * 'sector-size' xenstore node.  Note however that the offset in
653  * 'sector_number' must be aligned to 'sector-size'.
654  */
655 struct blkif_request {
656     uint8_t        operation;    /* BLKIF_OP_???                         */
657     uint8_t        nr_segments;  /* number of segments                   */
658     blkif_vdev_t   handle;       /* only for read/write requests         */
659     uint64_t       id;           /* private guest value, echoed in resp  */
660     blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
661     struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
662 };
663 typedef struct blkif_request blkif_request_t;
664 
665 /*
666  * Cast to this structure when blkif_request.operation == BLKIF_OP_DISCARD
667  * sizeof(struct blkif_request_discard) <= sizeof(struct blkif_request)
668  *
669  * The 'sector_number' field is in units of 512b, despite the value of the
670  * 'sector-size' xenstore node.  Note however that the offset in
671  * 'sector_number' must be aligned to 'discard-granularity'.
672  */
673 struct blkif_request_discard {
674     uint8_t        operation;    /* BLKIF_OP_DISCARD                     */
675     uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
676 #define BLKIF_DISCARD_SECURE (1<<0)  /* ignored if discard-secure=0      */
677     blkif_vdev_t   handle;       /* same as for read/write requests      */
678     uint64_t       id;           /* private guest value, echoed in resp  */
679     blkif_sector_t sector_number;/* start sector idx on disk             */
680     uint64_t       nr_sectors;   /* number of contiguous sectors to discard*/
681 };
682 typedef struct blkif_request_discard blkif_request_discard_t;
683 
684 /*
685  * The 'sector_number' field is in units of 512b, despite the value of the
686  * 'sector-size' xenstore node.  Note however that the offset in
687  * 'sector_number' must be aligned to 'sector-size'.
688  */
689 struct blkif_request_indirect {
690     uint8_t        operation;    /* BLKIF_OP_INDIRECT                    */
691     uint8_t        indirect_op;  /* BLKIF_OP_{READ/WRITE}                */
692     uint16_t       nr_segments;  /* number of segments                   */
693     uint64_t       id;           /* private guest value, echoed in resp  */
694     blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
695     blkif_vdev_t   handle;       /* same as for read/write requests      */
696     grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
697 #ifdef __i386__
698     uint64_t       pad;          /* Make it 64 byte aligned on i386      */
699 #endif
700 };
701 typedef struct blkif_request_indirect blkif_request_indirect_t;
702 
703 struct blkif_response {
704     uint64_t        id;              /* copied from request */
705     uint8_t         operation;       /* copied from request */
706     int16_t         status;          /* BLKIF_RSP_???       */
707 };
708 typedef struct blkif_response blkif_response_t;
709 
710 /*
711  * STATUS RETURN CODES.
712  */
713  /* Operation not supported (only happens on barrier writes). */
714 #define BLKIF_RSP_EOPNOTSUPP  -2
715  /* Operation failed for some unspecified reason (-EIO). */
716 #define BLKIF_RSP_ERROR       -1
717  /* Operation completed successfully. */
718 #define BLKIF_RSP_OKAY         0
719 
720 /*
721  * Generate blkif ring structures and types.
722  */
723 DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
724 
725 #define VDISK_CDROM        0x1
726 #define VDISK_REMOVABLE    0x2
727 #define VDISK_READONLY     0x4
728 
729 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
730 
731 /*
732  * Local variables:
733  * mode: C
734  * c-file-style: "BSD"
735  * c-basic-offset: 4
736  * tab-width: 4
737  * indent-tabs-mode: nil
738  * End:
739  */
740