File: /Users/paulross/dev/linux/linux-3.13/arch/x86/include/asm/pgtable_types.h

Green shading in the line number column means the source is part of the translation unit, red means it is conditionally excluded. Highlighted line numbers link to the translation unit page. Highlighted macros link to the macro page.

       1: #ifndef _ASM_X86_PGTABLE_DEFS_H
       2: #define _ASM_X86_PGTABLE_DEFS_H
       3: 
       4: #include <linux/const.h>
       5: #include <asm/page_types.h>
       6: 
       7: #define FIRST_USER_ADDRESS    0
       8: 
       9: #define _PAGE_BIT_PRESENT    0    /* is present */
      10: #define _PAGE_BIT_RW        1    /* writeable */
      11: #define _PAGE_BIT_USER        2    /* userspace addressable */
      12: #define _PAGE_BIT_PWT        3    /* page write through */
      13: #define _PAGE_BIT_PCD        4    /* page cache disabled */
      14: #define _PAGE_BIT_ACCESSED    5    /* was accessed (raised by CPU) */
      15: #define _PAGE_BIT_DIRTY        6    /* was written to (raised by CPU) */
      16: #define _PAGE_BIT_PSE        7    /* 4 MB (or 2MB) page */
      17: #define _PAGE_BIT_PAT        7    /* on 4KB pages */
      18: #define _PAGE_BIT_GLOBAL    8    /* Global TLB entry PPro+ */
      19: #define _PAGE_BIT_UNUSED1    9    /* available for programmer */
      20: #define _PAGE_BIT_IOMAP        10    /* flag used to indicate IO mapping */
      21: #define _PAGE_BIT_HIDDEN    11    /* hidden by kmemcheck */
      22: #define _PAGE_BIT_PAT_LARGE    12    /* On 2MB or 1GB pages */
      23: #define _PAGE_BIT_SPECIAL    _PAGE_BIT_UNUSED1
      24: #define _PAGE_BIT_CPA_TEST    _PAGE_BIT_UNUSED1
      25: #define _PAGE_BIT_SPLITTING    _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
      26: #define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
      27: 
      28: /* If _PAGE_BIT_PRESENT is clear, we use these: */
      29: /* - if the user mapped it with PROT_NONE; pte_present gives true */
      30: #define _PAGE_BIT_PROTNONE    _PAGE_BIT_GLOBAL
      31: /* - set: nonlinear file mapping, saved PTE; unset:swap */
      32: #define _PAGE_BIT_FILE        _PAGE_BIT_DIRTY
      33: 
      34: #define _PAGE_PRESENT    (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
      35: #define _PAGE_RW    (_AT(pteval_t, 1) << _PAGE_BIT_RW)
      36: #define _PAGE_USER    (_AT(pteval_t, 1) << _PAGE_BIT_USER)
      37: #define _PAGE_PWT    (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
      38: #define _PAGE_PCD    (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
      39: #define _PAGE_ACCESSED    (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
      40: #define _PAGE_DIRTY    (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
      41: #define _PAGE_PSE    (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
      42: #define _PAGE_GLOBAL    (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
      43: #define _PAGE_UNUSED1    (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
      44: #define _PAGE_IOMAP    (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
      45: #define _PAGE_PAT    (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
      46: #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
      47: #define _PAGE_SPECIAL    (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
      48: #define _PAGE_CPA_TEST    (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
      49: #define _PAGE_SPLITTING    (_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING)
      50: #define __HAVE_ARCH_PTE_SPECIAL
      51: 
      52: #ifdef CONFIG_KMEMCHECK
      53: #define _PAGE_HIDDEN    (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
      54: #else
      55: #define _PAGE_HIDDEN    (_AT(pteval_t, 0))
      56: #endif
      57: 
      58: /*
      59:  * The same hidden bit is used by kmemcheck, but since kmemcheck
      60:  * works on kernel pages while soft-dirty engine on user space,
      61:  * they do not conflict with each other.
      62:  */
      63: 
      64: #define _PAGE_BIT_SOFT_DIRTY    _PAGE_BIT_HIDDEN
      65: 
      66: #ifdef CONFIG_MEM_SOFT_DIRTY
      67: #define _PAGE_SOFT_DIRTY    (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
      68: #else
      69: #define _PAGE_SOFT_DIRTY    (_AT(pteval_t, 0))
      70: #endif
      71: 
      72: /*
      73:  * Tracking soft dirty bit when a page goes to a swap is tricky.
      74:  * We need a bit which can be stored in pte _and_ not conflict
      75:  * with swap entry format. On x86 bits 6 and 7 are *not* involved
      76:  * into swap entry computation, but bit 6 is used for nonlinear
      77:  * file mapping, so we borrow bit 7 for soft dirty tracking.
      78:  *
      79:  * Please note that this bit must be treated as swap dirty page
      80:  * mark if and only if the PTE has present bit clear!
      81:  */
      82: #ifdef CONFIG_MEM_SOFT_DIRTY
      83: #define _PAGE_SWP_SOFT_DIRTY    _PAGE_PSE
      84: #else
      85: #define _PAGE_SWP_SOFT_DIRTY    (_AT(pteval_t, 0))
      86: #endif
      87: 
      88: #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
      89: #define _PAGE_NX    (_AT(pteval_t, 1) << _PAGE_BIT_NX)
      90: #else
      91: #define _PAGE_NX    (_AT(pteval_t, 0))
      92: #endif
      93: 
      94: #define _PAGE_FILE    (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
      95: #define _PAGE_PROTNONE    (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
      96: 
      97: /*
      98:  * _PAGE_NUMA indicates that this page will trigger a numa hinting
      99:  * minor page fault to gather numa placement statistics (see
     100:  * pte_numa()). The bit picked (8) is within the range between
     101:  * _PAGE_FILE (6) and _PAGE_PROTNONE (8) bits. Therefore, it doesn't
     102:  * require changes to the swp entry format because that bit is always
     103:  * zero when the pte is not present.
     104:  *
     105:  * The bit picked must be always zero when the pmd is present and not
     106:  * present, so that we don't lose information when we set it while
     107:  * atomically clearing the present bit.
     108:  *
     109:  * Because we shared the same bit (8) with _PAGE_PROTNONE this can be
     110:  * interpreted as _PAGE_NUMA only in places that _PAGE_PROTNONE
     111:  * couldn't reach, like handle_mm_fault() (see access_error in
     112:  * arch/x86/mm/fault.c, the vma protection must not be PROT_NONE for
     113:  * handle_mm_fault() to be invoked).
     114:  */
     115: #define _PAGE_NUMA    _PAGE_PROTNONE
     116: 
     117: #define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |    \
     118:              _PAGE_ACCESSED | _PAGE_DIRTY)
     119: #define _KERNPG_TABLE    (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |    \
     120:              _PAGE_DIRTY)
     121: 
     122: /* Set of bits not changed in pte_modify */
     123: #define _PAGE_CHG_MASK    (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |        \
     124:              _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
     125: #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
     126: 
     127: #define _PAGE_CACHE_MASK    (_PAGE_PCD | _PAGE_PWT)
     128: #define _PAGE_CACHE_WB        (0)
     129: #define _PAGE_CACHE_WC        (_PAGE_PWT)
     130: #define _PAGE_CACHE_UC_MINUS    (_PAGE_PCD)
     131: #define _PAGE_CACHE_UC        (_PAGE_PCD | _PAGE_PWT)
     132: 
     133: #define PAGE_NONE    __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
     134: #define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
     135:                  _PAGE_ACCESSED | _PAGE_NX)
     136: 
     137: #define PAGE_SHARED_EXEC    __pgprot(_PAGE_PRESENT | _PAGE_RW |    \
     138:                      _PAGE_USER | _PAGE_ACCESSED)
     139: #define PAGE_COPY_NOEXEC    __pgprot(_PAGE_PRESENT | _PAGE_USER |    \
     140:                      _PAGE_ACCESSED | _PAGE_NX)
     141: #define PAGE_COPY_EXEC        __pgprot(_PAGE_PRESENT | _PAGE_USER |    \
     142:                      _PAGE_ACCESSED)
     143: #define PAGE_COPY        PAGE_COPY_NOEXEC
     144: #define PAGE_READONLY        __pgprot(_PAGE_PRESENT | _PAGE_USER |    \
     145:                      _PAGE_ACCESSED | _PAGE_NX)
     146: #define PAGE_READONLY_EXEC    __pgprot(_PAGE_PRESENT | _PAGE_USER |    \
     147:                      _PAGE_ACCESSED)
     148: 
     149: #define __PAGE_KERNEL_EXEC                        \
     150:     (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
     151: #define __PAGE_KERNEL        (__PAGE_KERNEL_EXEC | _PAGE_NX)
     152: 
     153: #define __PAGE_KERNEL_RO        (__PAGE_KERNEL & ~_PAGE_RW)
     154: #define __PAGE_KERNEL_RX        (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
     155: #define __PAGE_KERNEL_EXEC_NOCACHE    (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
     156: #define __PAGE_KERNEL_WC        (__PAGE_KERNEL | _PAGE_CACHE_WC)
     157: #define __PAGE_KERNEL_NOCACHE        (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
     158: #define __PAGE_KERNEL_UC_MINUS        (__PAGE_KERNEL | _PAGE_PCD)
     159: #define __PAGE_KERNEL_VSYSCALL        (__PAGE_KERNEL_RX | _PAGE_USER)
     160: #define __PAGE_KERNEL_VVAR        (__PAGE_KERNEL_RO | _PAGE_USER)
     161: #define __PAGE_KERNEL_VVAR_NOCACHE    (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
     162: #define __PAGE_KERNEL_LARGE        (__PAGE_KERNEL | _PAGE_PSE)
     163: #define __PAGE_KERNEL_LARGE_NOCACHE    (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
     164: #define __PAGE_KERNEL_LARGE_EXEC    (__PAGE_KERNEL_EXEC | _PAGE_PSE)
     165: 
     166: #define __PAGE_KERNEL_IO        (__PAGE_KERNEL | _PAGE_IOMAP)
     167: #define __PAGE_KERNEL_IO_NOCACHE    (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
     168: #define __PAGE_KERNEL_IO_UC_MINUS    (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
     169: #define __PAGE_KERNEL_IO_WC        (__PAGE_KERNEL_WC | _PAGE_IOMAP)
     170: 
     171: #define PAGE_KERNEL            __pgprot(__PAGE_KERNEL)
     172: #define PAGE_KERNEL_RO            __pgprot(__PAGE_KERNEL_RO)
     173: #define PAGE_KERNEL_EXEC        __pgprot(__PAGE_KERNEL_EXEC)
     174: #define PAGE_KERNEL_RX            __pgprot(__PAGE_KERNEL_RX)
     175: #define PAGE_KERNEL_WC            __pgprot(__PAGE_KERNEL_WC)
     176: #define PAGE_KERNEL_NOCACHE        __pgprot(__PAGE_KERNEL_NOCACHE)
     177: #define PAGE_KERNEL_UC_MINUS        __pgprot(__PAGE_KERNEL_UC_MINUS)
     178: #define PAGE_KERNEL_EXEC_NOCACHE    __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
     179: #define PAGE_KERNEL_LARGE        __pgprot(__PAGE_KERNEL_LARGE)
     180: #define PAGE_KERNEL_LARGE_NOCACHE    __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
     181: #define PAGE_KERNEL_LARGE_EXEC        __pgprot(__PAGE_KERNEL_LARGE_EXEC)
     182: #define PAGE_KERNEL_VSYSCALL        __pgprot(__PAGE_KERNEL_VSYSCALL)
     183: #define PAGE_KERNEL_VVAR        __pgprot(__PAGE_KERNEL_VVAR)
     184: #define PAGE_KERNEL_VVAR_NOCACHE    __pgprot(__PAGE_KERNEL_VVAR_NOCACHE)
     185: 
     186: #define PAGE_KERNEL_IO            __pgprot(__PAGE_KERNEL_IO)
     187: #define PAGE_KERNEL_IO_NOCACHE        __pgprot(__PAGE_KERNEL_IO_NOCACHE)
     188: #define PAGE_KERNEL_IO_UC_MINUS        __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
     189: #define PAGE_KERNEL_IO_WC        __pgprot(__PAGE_KERNEL_IO_WC)
     190: 
     191: /*         xwr */
     192: #define __P000    PAGE_NONE
     193: #define __P001    PAGE_READONLY
     194: #define __P010    PAGE_COPY
     195: #define __P011    PAGE_COPY
     196: #define __P100    PAGE_READONLY_EXEC
     197: #define __P101    PAGE_READONLY_EXEC
     198: #define __P110    PAGE_COPY_EXEC
     199: #define __P111    PAGE_COPY_EXEC
     200: 
     201: #define __S000    PAGE_NONE
     202: #define __S001    PAGE_READONLY
     203: #define __S010    PAGE_SHARED
     204: #define __S011    PAGE_SHARED
     205: #define __S100    PAGE_READONLY_EXEC
     206: #define __S101    PAGE_READONLY_EXEC
     207: #define __S110    PAGE_SHARED_EXEC
     208: #define __S111    PAGE_SHARED_EXEC
     209: 
     210: /*
     211:  * early identity mapping  pte attrib macros.
     212:  */
     213: #ifdef CONFIG_X86_64
     214: #define __PAGE_KERNEL_IDENT_LARGE_EXEC    __PAGE_KERNEL_LARGE_EXEC
     215: #else
     216: /*
     217:  * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
     218:  * bits are combined, this will alow user to access the high address mapped
     219:  * VDSO in the presence of CONFIG_COMPAT_VDSO
     220:  */
     221: #define PTE_IDENT_ATTR     0x003        /* PRESENT+RW */
     222: #define PDE_IDENT_ATTR     0x067        /* PRESENT+RW+USER+DIRTY+ACCESSED */
     223: #define PGD_IDENT_ATTR     0x001        /* PRESENT (no other attributes) */
     224: #endif
     225: 
     226: #ifdef CONFIG_X86_32
     227: # include <asm/pgtable_32_types.h>
     228: #else
     229: # include <asm/pgtable_64_types.h>
     230: #endif
     231: 
     232: #ifndef __ASSEMBLY__
     233: 
     234: #include <linux/types.h>
     235: 
     236: /* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
     237: #define PTE_PFN_MASK        ((pteval_t)PHYSICAL_PAGE_MASK)
     238: 
     239: /* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
     240: #define PTE_FLAGS_MASK        (~PTE_PFN_MASK)
     241: 
     242: typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
     243: 
     244: typedef struct { pgdval_t pgd; } pgd_t;
     245: 
     246: static inline pgd_t native_make_pgd(pgdval_t val)
     247: {
     248:     return (pgd_t) { val };
     249: }
     250: 
     251: static inline pgdval_t native_pgd_val(pgd_t pgd)
     252: {
     253:     return pgd.pgd;
     254: }
     255: 
     256: static inline pgdval_t pgd_flags(pgd_t pgd)
     257: {
     258:     return native_pgd_val(pgd) & PTE_FLAGS_MASK;
     259: }
     260: 
     261: #if PAGETABLE_LEVELS > 3
     262: typedef struct { pudval_t pud; } pud_t;
     263: 
     264: static inline pud_t native_make_pud(pmdval_t val)
     265: {
     266:     return (pud_t) { val };
     267: }
     268: 
     269: static inline pudval_t native_pud_val(pud_t pud)
     270: {
     271:     return pud.pud;
     272: }
     273: #else
     274: #include <asm-generic/pgtable-nopud.h>
     275: 
     276: static inline pudval_t native_pud_val(pud_t pud)
     277: {
     278:     return native_pgd_val(pud.pgd);
     279: }
     280: #endif
     281: 
     282: #if PAGETABLE_LEVELS > 2
     283: typedef struct { pmdval_t pmd; } pmd_t;
     284: 
     285: static inline pmd_t native_make_pmd(pmdval_t val)
     286: {
     287:     return (pmd_t) { val };
     288: }
     289: 
     290: static inline pmdval_t native_pmd_val(pmd_t pmd)
     291: {
     292:     return pmd.pmd;
     293: }
     294: #else
     295: #include <asm-generic/pgtable-nopmd.h>
     296: 
     297: static inline pmdval_t native_pmd_val(pmd_t pmd)
     298: {
     299:     return native_pgd_val(pmd.pud.pgd);
     300: }
     301: #endif
     302: 
     303: static inline pudval_t pud_flags(pud_t pud)
     304: {
     305:     return native_pud_val(pud) & PTE_FLAGS_MASK;
     306: }
     307: 
     308: static inline pmdval_t pmd_flags(pmd_t pmd)
     309: {
     310:     return native_pmd_val(pmd) & PTE_FLAGS_MASK;
     311: }
     312: 
     313: static inline pte_t native_make_pte(pteval_t val)
     314: {
     315:     return (pte_t) { .pte = val };
     316: }
     317: 
     318: static inline pteval_t native_pte_val(pte_t pte)
     319: {
     320:     return pte.pte;
     321: }
     322: 
     323: static inline pteval_t pte_flags(pte_t pte)
     324: {
     325:     return native_pte_val(pte) & PTE_FLAGS_MASK;
     326: }
     327: 
     328: #define pgprot_val(x)    ((x).pgprot)
     329: #define __pgprot(x)    ((pgprot_t) { (x) } )
     330: 
     331: 
     332: typedef struct page *pgtable_t;
     333: 
     334: extern pteval_t __supported_pte_mask;
     335: extern void set_nx(void);
     336: extern int nx_enabled;
     337: 
     338: #define pgprot_writecombine    pgprot_writecombine
     339: extern pgprot_t pgprot_writecombine(pgprot_t prot);
     340: 
     341: /* Indicate that x86 has its own track and untrack pfn vma functions */
     342: #define __HAVE_PFNMAP_TRACKING
     343: 
     344: #define __HAVE_PHYS_MEM_ACCESS_PROT
     345: struct file;
     346: pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
     347:                               unsigned long size, pgprot_t vma_prot);
     348: int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
     349:                               unsigned long size, pgprot_t *vma_prot);
     350: 
     351: /* Install a pte for a particular vaddr in kernel space. */
     352: void set_pte_vaddr(unsigned long vaddr, pte_t pte);
     353: 
     354: #ifdef CONFIG_X86_32
     355: extern void native_pagetable_init(void);
     356: #else
     357: #define native_pagetable_init        paging_init
     358: #endif
     359: 
     360: struct seq_file;
     361: extern void arch_report_meminfo(struct seq_file *m);
     362: 
     363: enum pg_level {
     364:     PG_LEVEL_NONE,
     365:     PG_LEVEL_4K,
     366:     PG_LEVEL_2M,
     367:     PG_LEVEL_1G,
     368:     PG_LEVEL_NUM
     369: };
     370: 
     371: #ifdef CONFIG_PROC_FS
     372: extern void update_page_count(int level, unsigned long pages);
     373: #else
     374: static inline void update_page_count(int level, unsigned long pages) { }
     375: #endif
     376: 
     377: /*
     378:  * Helper function that returns the kernel pagetable entry controlling
     379:  * the virtual address 'address'. NULL means no pagetable entry present.
     380:  * NOTE: the return type is pte_t but if the pmd is PSE then we return it
     381:  * as a pte too.
     382:  */
     383: extern pte_t *lookup_address(unsigned long address, unsigned int *level);
     384: extern phys_addr_t slow_virt_to_phys(void *__address);
     385: 
     386: #endif    /* !__ASSEMBLY__ */
     387: 
     388: #endif /* _ASM_X86_PGTABLE_DEFS_H */
     389: