File: /Users/paulross/dev/linux/linux-3.13/include/linux/blk_types.h

Green shading in the line number column means the source is part of the translation unit, red means it is conditionally excluded. Highlighted line numbers link to the translation unit page. Highlighted macros link to the macro page.

       1: /*
       2:  * Block data types and constants.  Directly include this file only to
       3:  * break include dependency loop.
       4:  */
       5: #ifndef __LINUX_BLK_TYPES_H
       6: #define __LINUX_BLK_TYPES_H
       7: 
       8: #ifdef CONFIG_BLOCK
       9: 
      10: #include <linux/types.h>
      11: 
      12: struct bio_set;
      13: struct bio;
      14: struct bio_integrity_payload;
      15: struct page;
      16: struct block_device;
      17: struct io_context;
      18: struct cgroup_subsys_state;
      19: typedef void (bio_end_io_t) (struct bio *, int);
      20: typedef void (bio_destructor_t) (struct bio *);
      21: 
      22: /*
      23:  * was unsigned short, but we might as well be ready for > 64kB I/O pages
      24:  */
      25: struct bio_vec {
      26:     struct page    *bv_page;
      27:     unsigned int    bv_len;
      28:     unsigned int    bv_offset;
      29: };
      30: 
      31: /*
      32:  * main unit of I/O for the block layer and lower layers (ie drivers and
      33:  * stacking drivers)
      34:  */
      35: struct bio {
      36:     sector_t        bi_sector;    /* device address in 512 byte
      37:                            sectors */
      38:     struct bio        *bi_next;    /* request queue link */
      39:     struct block_device    *bi_bdev;
      40:     unsigned long        bi_flags;    /* status, command, etc */
      41:     unsigned long        bi_rw;        /* bottom bits READ/WRITE,
      42:                          * top bits priority
      43:                          */
      44: 
      45:     unsigned short        bi_vcnt;    /* how many bio_vec's */
      46:     unsigned short        bi_idx;        /* current index into bvl_vec */
      47: 
      48:     /* Number of segments in this BIO after
      49:      * physical address coalescing is performed.
      50:      */
      51:     unsigned int        bi_phys_segments;
      52: 
      53:     unsigned int        bi_size;    /* residual I/O count */
      54: 
      55:     /*
      56:      * To keep track of the max segment size, we account for the
      57:      * sizes of the first and last mergeable segments in this bio.
      58:      */
      59:     unsigned int        bi_seg_front_size;
      60:     unsigned int        bi_seg_back_size;
      61: 
      62:     bio_end_io_t        *bi_end_io;
      63: 
      64:     void            *bi_private;
      65: #ifdef CONFIG_BLK_CGROUP
      66:     /*
      67:      * Optional ioc and css associated with this bio.  Put on bio
      68:      * release.  Read comment on top of bio_associate_current().
      69:      */
      70:     struct io_context    *bi_ioc;
      71:     struct cgroup_subsys_state *bi_css;
      72: #endif
      73: #if defined(CONFIG_BLK_DEV_INTEGRITY)
      74:     struct bio_integrity_payload *bi_integrity;  /* data integrity */
      75: #endif
      76: 
      77:     /*
      78:      * Everything starting with bi_max_vecs will be preserved by bio_reset()
      79:      */
      80: 
      81:     unsigned int        bi_max_vecs;    /* max bvl_vecs we can hold */
      82: 
      83:     atomic_t        bi_cnt;        /* pin count */
      84: 
      85:     struct bio_vec        *bi_io_vec;    /* the actual vec list */
      86: 
      87:     struct bio_set        *bi_pool;
      88: 
      89:     /*
      90:      * We can inline a number of vecs at the end of the bio, to avoid
      91:      * double allocations for a small number of bio_vecs. This member
      92:      * MUST obviously be kept at the very end of the bio.
      93:      */
      94:     struct bio_vec        bi_inline_vecs[0];
      95: };
      96: 
      97: #define BIO_RESET_BYTES        offsetof(struct bio, bi_max_vecs)
      98: 
      99: /*
     100:  * bio flags
     101:  */
     102: #define BIO_UPTODATE    0    /* ok after I/O completion */
     103: #define BIO_RW_BLOCK    1    /* RW_AHEAD set, and read/write would block */
     104: #define BIO_EOF        2    /* out-out-bounds error */
     105: #define BIO_SEG_VALID    3    /* bi_phys_segments valid */
     106: #define BIO_CLONED    4    /* doesn't own data */
     107: #define BIO_BOUNCED    5    /* bio is a bounce bio */
     108: #define BIO_USER_MAPPED 6    /* contains user pages */
     109: #define BIO_EOPNOTSUPP    7    /* not supported */
     110: #define BIO_NULL_MAPPED 8    /* contains invalid user pages */
     111: #define BIO_FS_INTEGRITY 9    /* fs owns integrity data, not block layer */
     112: #define BIO_QUIET    10    /* Make BIO Quiet */
     113: #define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */
     114: #define BIO_SNAP_STABLE    12    /* bio data must be snapshotted during write */
     115: 
     116: /*
     117:  * Flags starting here get preserved by bio_reset() - this includes
     118:  * BIO_POOL_IDX()
     119:  */
     120: #define BIO_RESET_BITS    13
     121: #define BIO_OWNS_VEC    13    /* bio_free() should free bvec */
     122: 
     123: #define bio_flagged(bio, flag)    ((bio)->bi_flags & (1 << (flag)))
     124: 
     125: /*
     126:  * top 4 bits of bio flags indicate the pool this bio came from
     127:  */
     128: #define BIO_POOL_BITS        (4)
     129: #define BIO_POOL_NONE        ((1UL << BIO_POOL_BITS) - 1)
     130: #define BIO_POOL_OFFSET        (BITS_PER_LONG - BIO_POOL_BITS)
     131: #define BIO_POOL_MASK        (1UL << BIO_POOL_OFFSET)
     132: #define BIO_POOL_IDX(bio)    ((bio)->bi_flags >> BIO_POOL_OFFSET)
     133: 
     134: #endif /* CONFIG_BLOCK */
     135: 
     136: /*
     137:  * Request flags.  For use in the cmd_flags field of struct request, and in
     138:  * bi_rw of struct bio.  Note that some flags are only valid in either one.
     139:  */
     140: enum rq_flag_bits {
     141:     /* common flags */
     142:     __REQ_WRITE,        /* not set, read. set, write */
     143:     __REQ_FAILFAST_DEV,    /* no driver retries of device errors */
     144:     __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
     145:     __REQ_FAILFAST_DRIVER,    /* no driver retries of driver errors */
     146: 
     147:     __REQ_SYNC,        /* request is sync (sync write or read) */
     148:     __REQ_META,        /* metadata io request */
     149:     __REQ_PRIO,        /* boost priority in cfq */
     150:     __REQ_DISCARD,        /* request to discard sectors */
     151:     __REQ_SECURE,        /* secure discard (used with __REQ_DISCARD) */
     152:     __REQ_WRITE_SAME,    /* write same block many times */
     153: 
     154:     __REQ_NOIDLE,        /* don't anticipate more IO after this one */
     155:     __REQ_FUA,        /* forced unit access */
     156:     __REQ_FLUSH,        /* request for cache flush */
     157: 
     158:     /* bio only flags */
     159:     __REQ_RAHEAD,        /* read ahead, can fail anytime */
     160:     __REQ_THROTTLED,    /* This bio has already been subjected to
     161:                  * throttling rules. Don't do it again. */
     162: 
     163:     /* request only flags */
     164:     __REQ_SORTED,        /* elevator knows about this request */
     165:     __REQ_SOFTBARRIER,    /* may not be passed by ioscheduler */
     166:     __REQ_NOMERGE,        /* don't touch this for merging */
     167:     __REQ_STARTED,        /* drive already may have started this one */
     168:     __REQ_DONTPREP,        /* don't call prep for this one */
     169:     __REQ_QUEUED,        /* uses queueing */
     170:     __REQ_ELVPRIV,        /* elevator private data attached */
     171:     __REQ_FAILED,        /* set if the request failed */
     172:     __REQ_QUIET,        /* don't worry about errors */
     173:     __REQ_PREEMPT,        /* set for "ide_preempt" requests */
     174:     __REQ_ALLOCED,        /* request came from our alloc pool */
     175:     __REQ_COPY_USER,    /* contains copies of user pages */
     176:     __REQ_FLUSH_SEQ,    /* request for flush sequence */
     177:     __REQ_IO_STAT,        /* account I/O stat */
     178:     __REQ_MIXED_MERGE,    /* merge of different types, fail separately */
     179:     __REQ_KERNEL,         /* direct IO to kernel pages */
     180:     __REQ_PM,        /* runtime pm request */
     181:     __REQ_END,        /* last of chain of requests */
     182:     __REQ_NR_BITS,        /* stops here */
     183: };
     184: 
     185: #define REQ_WRITE        (1ULL << __REQ_WRITE)
     186: #define REQ_FAILFAST_DEV    (1ULL << __REQ_FAILFAST_DEV)
     187: #define REQ_FAILFAST_TRANSPORT    (1ULL << __REQ_FAILFAST_TRANSPORT)
     188: #define REQ_FAILFAST_DRIVER    (1ULL << __REQ_FAILFAST_DRIVER)
     189: #define REQ_SYNC        (1ULL << __REQ_SYNC)
     190: #define REQ_META        (1ULL << __REQ_META)
     191: #define REQ_PRIO        (1ULL << __REQ_PRIO)
     192: #define REQ_DISCARD        (1ULL << __REQ_DISCARD)
     193: #define REQ_WRITE_SAME        (1ULL << __REQ_WRITE_SAME)
     194: #define REQ_NOIDLE        (1ULL << __REQ_NOIDLE)
     195: 
     196: #define REQ_FAILFAST_MASK \
     197:     (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
     198: #define REQ_COMMON_MASK \
     199:     (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
     200:      REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
     201:      REQ_SECURE)
     202: #define REQ_CLONE_MASK        REQ_COMMON_MASK
     203: 
     204: #define BIO_NO_ADVANCE_ITER_MASK    (REQ_DISCARD|REQ_WRITE_SAME)
     205: 
     206: /* This mask is used for both bio and request merge checking */
     207: #define REQ_NOMERGE_FLAGS \
     208:     (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
     209: 
     210: #define REQ_RAHEAD        (1ULL << __REQ_RAHEAD)
     211: #define REQ_THROTTLED        (1ULL << __REQ_THROTTLED)
     212: 
     213: #define REQ_SORTED        (1ULL << __REQ_SORTED)
     214: #define REQ_SOFTBARRIER        (1ULL << __REQ_SOFTBARRIER)
     215: #define REQ_FUA            (1ULL << __REQ_FUA)
     216: #define REQ_NOMERGE        (1ULL << __REQ_NOMERGE)
     217: #define REQ_STARTED        (1ULL << __REQ_STARTED)
     218: #define REQ_DONTPREP        (1ULL << __REQ_DONTPREP)
     219: #define REQ_QUEUED        (1ULL << __REQ_QUEUED)
     220: #define REQ_ELVPRIV        (1ULL << __REQ_ELVPRIV)
     221: #define REQ_FAILED        (1ULL << __REQ_FAILED)
     222: #define REQ_QUIET        (1ULL << __REQ_QUIET)
     223: #define REQ_PREEMPT        (1ULL << __REQ_PREEMPT)
     224: #define REQ_ALLOCED        (1ULL << __REQ_ALLOCED)
     225: #define REQ_COPY_USER        (1ULL << __REQ_COPY_USER)
     226: #define REQ_FLUSH        (1ULL << __REQ_FLUSH)
     227: #define REQ_FLUSH_SEQ        (1ULL << __REQ_FLUSH_SEQ)
     228: #define REQ_IO_STAT        (1ULL << __REQ_IO_STAT)
     229: #define REQ_MIXED_MERGE        (1ULL << __REQ_MIXED_MERGE)
     230: #define REQ_SECURE        (1ULL << __REQ_SECURE)
     231: #define REQ_KERNEL        (1ULL << __REQ_KERNEL)
     232: #define REQ_PM            (1ULL << __REQ_PM)
     233: #define REQ_END            (1ULL << __REQ_END)
     234: 
     235: #endif /* __LINUX_BLK_TYPES_H */
     236: