Linux/mm.h
 
29 /*
30  * Linux kernel virtual memory manager primitives.
31  * The idea being to have a "virtual" mm in the same way
32  * we have a virtual fs - giving a cleaner interface to the
33  * mm details, and allowing different kinds of memory mappings
34  * (from shared memory to executable loading to arbitrary
35  * mmap() functions).
36  */
37 
38 /*
39  * This struct defines a memory VMM memory area. There is one of these
40  * per VM-area/task.  A VM area is any part of the process virtual memory
41  * space that has a special rule for the page-fault handlers (ie a shared
42  * library, the executable area etc).
43  */
44 struct vm_area_struct {
45         struct mm_struct * vm_mm;       /* The address space we belong to. */
46         unsigned long vm_start;         /* Our start address within vm_mm. */
47         unsigned long vm_end;           /* The first byte after our end address
48                                            within vm_mm. */
49 
50         /* linked list of VM areas per task, sorted by address */
51         struct vm_area_struct *vm_next;
52 
53         pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
54         unsigned long vm_flags;         /* Flags, listed below. */
55 
56         rb_node_t vm_rb;
57 
58         /*
59          * For areas with an address space and backing store,
60          * one of the address_space->i_mmap{,shared} lists,
61          * for shm areas, the list of attaches, otherwise unused.
62          */
63         struct vm_area_struct *vm_next_share;
64         struct vm_area_struct **vm_pprev_share;
65 
66         /* Function pointers to deal with this struct. */
67         struct vm_operations_struct * vm_ops;
68 
69         /* Information about our backing store: */
70         unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
71                                            units, *not* PAGE_CACHE_SIZE */
72         struct file * vm_file;          /* File we map to (can be NULL). */
73         unsigned long vm_raend;         /* XXX: put full readahead info here. */
74         void * vm_private_data;         /* was vm_pte (shared mem) */
75 };
76 
77 /*
78  * vm_flags..
79  */
80 #define VM_READ         0x00000001      /* currently active flags */
81 #define VM_WRITE        0x00000002
82 #define VM_EXEC         0x00000004
83 #define VM_SHARED       0x00000008
84 
85 #define VM_MAYREAD      0x00000010      /* limits for mprotect() etc */
86 #define VM_MAYWRITE     0x00000020
87 #define VM_MAYEXEC      0x00000040
88 #define VM_MAYSHARE     0x00000080
89 
90 #define VM_GROWSDOWN    0x00000100      /* general info on the segment */
91 #define VM_GROWSUP      0x00000200
92 #define VM_SHM          0x00000400      /* shared memory area, don't swap out */
93 #define VM_DENYWRITE    0x00000800      /* ETXTBSY on write attempts.. */
94 
95 #define VM_EXECUTABLE   0x00001000
96 #define VM_LOCKED       0x00002000
97 #define VM_IO           0x00004000      /* Memory mapped I/O or similar */
98 
99                                         /* Used by sys_madvise() */
100 #define VM_SEQ_READ     0x00008000      /* App will access data sequentially */
101 #define VM_RAND_READ    0x00010000      /* App will not benefit from clustered reads */
102 
103 #define VM_DONTCOPY     0x00020000      /* Do not copy this vma on fork */
104 #define VM_DONTEXPAND   0x00040000      /* Cannot expand with mremap() */
105 #define VM_RESERVED     0x00080000      /* Don't unmap it from swap_out */
106 
107 #define VM_STACK_FLAGS  0x00000177
108 
109 #define VM_READHINTMASK                 (VM_SEQ_READ | VM_RAND_READ)
110 #define VM_ClearReadHint(v)             (v)->vm_flags &= ~VM_READHINTMASK
111 #define VM_NormalReadHint(v)            (!((v)->vm_flags & VM_READHINTMASK))
112 #define VM_SequentialReadHint(v)        ((v)->vm_flags & VM_SEQ_READ)
113 #define VM_RandomReadHint(v)            ((v)->vm_flags & VM_RAND_READ)
114 
115 /* read ahead limits */
116 extern int vm_min_readahead;
117 extern int vm_max_readahead;
118 
119 /*
120  * mapping from the currently active vm_flags protection bits (the
121  * low four bits) to a page protection mask.
122  */
123 extern pgprot_t protection_map[16];
124 
125 
126 /*
127  * These are the virtual MM functions - opening of an area, closing and
128  * unmapping it (needed to keep files on disk up-to-date etc), pointer
129  * to the functions called when a no-page or a wp-page exception occurs. 
130  */
131 struct vm_operations_struct {
132         void (*open)(struct vm_area_struct * area);
133         void (*close)(struct vm_area_struct * area);
134         struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int unused);
135 };

 

 

 
137 /*
138  * Each physical page in the system has a struct page associated with
139  * it to keep track of whatever it is we are using the page for at the
140  * moment. Note that we have no way to track which tasks are using
141  * a page.
142  *
143  * Try to keep the most commonly accessed fields in single cache lines
144  * here (16 bytes or greater).  This ordering should be particularly
145  * beneficial on 32-bit processors.
146  *
147  * The first line is data used in page cache lookup, the second line
148  * is used for linear searches (eg. clock algorithm scans). 
149  *
150  * TODO: make this structure smaller, it could be as small as 32 bytes.
151  */
152 typedef struct page {
153         struct list_head list;          /* ->mapping has some page lists. */
154         struct address_space *mapping;  /* The inode (or ...) we belong to. */
155         unsigned long index;            /* Our offset within mapping. */
156         struct page *next_hash;         /* Next page sharing our hash bucket in
157                                            the pagecache hash table. */
158         atomic_t count;                 /* Usage count, see below. */
159         unsigned long flags;            /* atomic flags, some possibly
160                                            updated asynchronously */
161         struct list_head lru;           /* Pageout list, eg. active_list;
162                                            protected by pagemap_lru_lock !! */
163         struct page **pprev_hash;       /* Complement to *next_hash. */
164         struct buffer_head * buffers;   /* Buffer maps us to a disk block. */
165 
166         /*
167          * On machines where all RAM is mapped into kernel address space,
168          * we can simply calculate the virtual address. On machines with
169          * highmem some memory is mapped into kernel virtual memory
170          * dynamically, so we need a place to store that address.
171          * Note that this field could be 16 bits on x86 ... ;)
172          *
173          * Architectures with slow multiplication can define
174          * WANT_PAGE_VIRTUAL in asm/page.h
175          */
176 #if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
177         void *virtual;                  /* Kernel virtual address (NULL if
178                                            not kmapped, ie. highmem) */
179 #endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */
180 } mem_map_t;
181 
182 /*