Line data Source code
1 : /* $OpenBSD: pmap.h,v 1.67 2018/04/20 07:27:54 mlarkin Exp $ */
2 : /* $NetBSD: pmap.h,v 1.1 2003/04/26 18:39:46 fvdl Exp $ */
3 :
4 : /*
5 : * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 : * All rights reserved.
7 : *
8 : * Redistribution and use in source and binary forms, with or without
9 : * modification, are permitted provided that the following conditions
10 : * are met:
11 : * 1. Redistributions of source code must retain the above copyright
12 : * notice, this list of conditions and the following disclaimer.
13 : * 2. Redistributions in binary form must reproduce the above copyright
14 : * notice, this list of conditions and the following disclaimer in the
15 : * documentation and/or other materials provided with the distribution.
16 : *
17 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 : * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 : * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 : * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 : * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 : * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 : * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 : * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 : * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 : * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 : */
28 :
29 : /*
30 : * Copyright (c) 2001 Wasabi Systems, Inc.
31 : * All rights reserved.
32 : *
33 : * Written by Frank van der Linden for Wasabi Systems, Inc.
34 : *
35 : * Redistribution and use in source and binary forms, with or without
36 : * modification, are permitted provided that the following conditions
37 : * are met:
38 : * 1. Redistributions of source code must retain the above copyright
39 : * notice, this list of conditions and the following disclaimer.
40 : * 2. Redistributions in binary form must reproduce the above copyright
41 : * notice, this list of conditions and the following disclaimer in the
42 : * documentation and/or other materials provided with the distribution.
43 : * 3. All advertising materials mentioning features or use of this software
44 : * must display the following acknowledgement:
45 : * This product includes software developed for the NetBSD Project by
46 : * Wasabi Systems, Inc.
47 : * 4. The name of Wasabi Systems, Inc. may not be used to endorse
48 : * or promote products derived from this software without specific prior
49 : * written permission.
50 : *
51 : * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
52 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
53 : * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
54 : * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
55 : * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
56 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
57 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
58 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
59 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
60 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
61 : * POSSIBILITY OF SUCH DAMAGE.
62 : */
63 :
64 : /*
65 : * pmap.h: see pmap.c for the history of this pmap module.
66 : */
67 :
68 : #ifndef _MACHINE_PMAP_H_
69 : #define _MACHINE_PMAP_H_
70 :
71 : #ifndef _LOCORE
72 : #ifdef _KERNEL
73 : #include <machine/cpufunc.h>
74 : #endif /* _KERNEL */
75 : #include <sys/mutex.h>
76 : #include <uvm/uvm_object.h>
77 : #include <machine/pte.h>
78 : #endif
79 :
80 : /*
81 : * The x86_64 pmap module closely resembles the i386 one. It uses
82 : * the same recursive entry scheme. See the i386 pmap.h for a
83 : * description. The alternate area trick for accessing non-current
84 : * pmaps has been removed, though, because it performs badly on SMP
85 : * systems.
86 : * The most obvious difference to i386 is that 2 extra levels of page
87 : * table need to be dealt with. The level 1 page table pages are at:
88 : *
89 : * l1: 0x00007f8000000000 - 0x00007fffffffffff (39 bits, needs PML4 entry)
90 : *
91 : * The other levels are kept as physical pages in 3 UVM objects and are
92 : * temporarily mapped for virtual access when needed.
93 : *
94 : * The other obvious difference from i386 is that it has a direct map of all
95 : * physical memory in the VA range:
96 : *
97 : * 0xffffff0000000000 - 0xffffff7fffffffff
98 : *
99 : * The direct map is used in some cases to access PTEs of non-current pmaps.
100 : *
101 : * Note that address space is signed, so the layout for 48 bits is:
102 : *
103 : * +---------------------------------+ 0xffffffffffffffff
104 : * | Kernel Image |
105 : * +---------------------------------+ 0xffffff8000000000
106 : * | Direct Map |
107 : * +---------------------------------+ 0xffffff0000000000
108 : * ~ ~
109 : * | |
110 : * | Kernel Space |
111 : * | |
112 : * | |
113 : * +---------------------------------+ 0xffff800000000000 = 0x0000800000000000
114 : * | L1 table (PTE pages) |
115 : * +---------------------------------+ 0x00007f8000000000
116 : * ~ ~
117 : * | |
118 : * | User Space |
119 : * | |
120 : * | |
121 : * +---------------------------------+ 0x0000000000000000
122 : *
123 : * In other words, there is a 'VA hole' at 0x0000800000000000 -
124 : * 0xffff800000000000 which will trap, just as on, for example,
125 : * sparcv9.
126 : *
127 : * The unused space can be used if needed, but it adds a little more
128 : * complexity to the calculations.
129 : */
130 :
131 : /*
132 : * Mask to get rid of the sign-extended part of addresses.
133 : */
134 : #define VA_SIGN_MASK 0xffff000000000000
135 : #define VA_SIGN_NEG(va) ((va) | VA_SIGN_MASK)
136 : /*
137 : * XXXfvdl this one's not right.
138 : */
139 : #define VA_SIGN_POS(va) ((va) & ~VA_SIGN_MASK)
140 :
141 : #define L4_SLOT_PTE 255
142 : #define L4_SLOT_KERN 256
143 : #define L4_SLOT_KERNBASE 511
144 : #define L4_SLOT_DIRECT 510
145 :
146 : #define PDIR_SLOT_KERN L4_SLOT_KERN
147 : #define PDIR_SLOT_PTE L4_SLOT_PTE
148 : #define PDIR_SLOT_DIRECT L4_SLOT_DIRECT
149 :
150 : /*
151 : * the following defines give the virtual addresses of various MMU
152 : * data structures:
153 : * PTE_BASE: the base VA of the linear PTE mappings
154 : * PDP_PDE: the VA of the PDE that points back to the PDP
155 : *
156 : */
157 :
158 : #define PTE_BASE ((pt_entry_t *) (L4_SLOT_PTE * NBPD_L4))
159 : #define PMAP_DIRECT_BASE (VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4)))
160 : #define PMAP_DIRECT_END (VA_SIGN_NEG(((L4_SLOT_DIRECT + 1) * NBPD_L4)))
161 :
162 : #define L1_BASE PTE_BASE
163 :
164 : #define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L4_SLOT_PTE * NBPD_L3))
165 : #define L3_BASE ((pd_entry_t *)((char *)L2_BASE + L4_SLOT_PTE * NBPD_L2))
166 : #define L4_BASE ((pd_entry_t *)((char *)L3_BASE + L4_SLOT_PTE * NBPD_L1))
167 :
168 : #define PDP_PDE (L4_BASE + PDIR_SLOT_PTE)
169 :
170 : #define PDP_BASE L4_BASE
171 :
172 : #define NKL4_MAX_ENTRIES (unsigned long)1
173 : #define NKL3_MAX_ENTRIES (unsigned long)(NKL4_MAX_ENTRIES * 512)
174 : #define NKL2_MAX_ENTRIES (unsigned long)(NKL3_MAX_ENTRIES * 512)
175 : #define NKL1_MAX_ENTRIES (unsigned long)(NKL2_MAX_ENTRIES * 512)
176 :
177 : #define NKL4_KIMG_ENTRIES 1
178 : #define NKL3_KIMG_ENTRIES 1
179 : #define NKL2_KIMG_ENTRIES 16
180 :
181 : #define NDML4_ENTRIES 1
182 : #define NDML3_ENTRIES 1
183 : #define NDML2_ENTRIES 4 /* 4GB */
184 :
185 : /*
186 : * Since kva space is below the kernel in its entirety, we start off
187 : * with zero entries on each level.
188 : */
189 : #define NKL4_START_ENTRIES 0
190 : #define NKL3_START_ENTRIES 0
191 : #define NKL2_START_ENTRIES 0
192 : #define NKL1_START_ENTRIES 0 /* XXX */
193 :
194 : #define NTOPLEVEL_PDES (PAGE_SIZE / (sizeof (pd_entry_t)))
195 :
196 : #define NPDPG (PAGE_SIZE / sizeof (pd_entry_t))
197 :
198 : /*
199 : * pl*_pi: index in the ptp page for a pde mapping a VA.
200 : * (pl*_i below is the index in the virtual array of all pdes per level)
201 : */
202 : #define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
203 : #define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
204 : #define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
205 : #define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
206 :
207 : /*
208 : * pl*_i: generate index into pde/pte arrays in virtual space
209 : */
210 : #define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
211 : #define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
212 : #define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
213 : #define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
214 : #define pl_i(va, lvl) \
215 : (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
216 :
217 : #define PTP_MASK_INITIALIZER { L1_FRAME, L2_FRAME, L3_FRAME, L4_FRAME }
218 : #define PTP_SHIFT_INITIALIZER { L1_SHIFT, L2_SHIFT, L3_SHIFT, L4_SHIFT }
219 : #define NKPTP_INITIALIZER { NKL1_START_ENTRIES, NKL2_START_ENTRIES, \
220 : NKL3_START_ENTRIES, NKL4_START_ENTRIES }
221 : #define NKPTPMAX_INITIALIZER { NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES, \
222 : NKL3_MAX_ENTRIES, NKL4_MAX_ENTRIES }
223 : #define NBPD_INITIALIZER { NBPD_L1, NBPD_L2, NBPD_L3, NBPD_L4 }
224 : #define PDES_INITIALIZER { L2_BASE, L3_BASE, L4_BASE }
225 :
226 : /*
227 : * PTP macros:
228 : * a PTP's index is the PD index of the PDE that points to it
229 : * a PTP's offset is the byte-offset in the PTE space that this PTP is at
230 : * a PTP's VA is the first VA mapped by that PTP
231 : */
232 :
233 : #define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE)
234 :
235 : #define PTP_LEVELS 4
236 :
237 : /*
238 : * PG_AVAIL usage: we make use of the ignored bits of the PTE
239 : */
240 :
241 : #define PG_W PG_AVAIL1 /* "wired" mapping */
242 : #define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */
243 : /* PG_AVAIL3 not used */
244 :
245 : /*
246 : * Number of PTEs per cache line. 8 byte pte, 64-byte cache line
247 : * Used to avoid false sharing of cache lines.
248 : */
249 : #define NPTECL 8
250 :
251 :
252 : #if defined(_KERNEL) && !defined(_LOCORE)
253 : /*
254 : * pmap data structures: see pmap.c for details of locking.
255 : */
256 :
257 : struct pmap;
258 : typedef struct pmap *pmap_t;
259 :
260 : /*
261 : * we maintain a list of all non-kernel pmaps
262 : */
263 :
264 : LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
265 :
266 : /*
267 : * the pmap structure
268 : *
269 : * note that the pm_obj contains the reference count,
270 : * page list, and number of PTPs within the pmap.
271 : */
272 :
273 : #define PMAP_TYPE_NORMAL 1
274 : #define PMAP_TYPE_EPT 2
275 : #define PMAP_TYPE_RVI 3
276 : #define pmap_nested(pm) ((pm)->pm_type != PMAP_TYPE_NORMAL)
277 :
278 : struct pmap {
279 : struct mutex pm_mtx;
280 : struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
281 : LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
282 : /*
283 : * pm_pdir : VA of page table to be used when executing in
284 : * privileged mode
285 : * pm_pdirpa : PA of page table to be used when executing in
286 : * privileged mode
287 : * pm_pdir_intel : VA of special page table to be used when executing
288 : * on an Intel CPU in usermode (no kernel mappings)
289 : * pm_pdirpa_intel : PA of special page table to be used when executing
290 : * on an Intel CPU in usermode (no kernel mappings)
291 : */
292 : pd_entry_t *pm_pdir, *pm_pdir_intel;
293 : paddr_t pm_pdirpa, pm_pdirpa_intel;
294 :
295 : struct vm_page *pm_ptphint[PTP_LEVELS-1];
296 : /* pointer to a PTP in our pmap */
297 : struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
298 :
299 : u_int64_t pm_cpus; /* mask of CPUs using pmap */
300 : int pm_type; /* Type of pmap this is (PMAP_TYPE_x) */
301 : };
302 :
303 : /*
304 : * MD flags that we use for pmap_enter (in the pa):
305 : */
306 : #define PMAP_PA_MASK ~((paddr_t)PAGE_MASK) /* to remove the flags */
307 : #define PMAP_NOCACHE 0x1 /* set the non-cacheable bit. */
308 : #define PMAP_WC 0x2 /* set page write combining. */
309 :
310 : /*
311 : * We keep mod/ref flags in struct vm_page->pg_flags.
312 : */
313 : #define PG_PMAP_MOD PG_PMAP0
314 : #define PG_PMAP_REF PG_PMAP1
315 : #define PG_PMAP_WC PG_PMAP2
316 :
317 : /*
318 : * for each managed physical page we maintain a list of <PMAP,VA>'s
319 : * which it is mapped at.
320 : */
321 : struct pv_entry { /* locked by its list's pvh_lock */
322 : struct pv_entry *pv_next; /* next entry */
323 : struct pmap *pv_pmap; /* the pmap */
324 : vaddr_t pv_va; /* the virtual address */
325 : struct vm_page *pv_ptp; /* the vm_page of the PTP */
326 : };
327 :
328 : /*
329 : * global kernel variables
330 : */
331 :
332 : /* PTDpaddr: is the physical address of the kernel's PDP */
333 : extern u_long PTDpaddr;
334 :
335 : extern struct pmap kernel_pmap_store; /* kernel pmap */
336 :
337 : extern long nkptp[];
338 :
339 : extern const paddr_t ptp_masks[];
340 : extern const int ptp_shifts[];
341 : extern const long nbpd[], nkptpmax[];
342 :
343 : /*
344 : * macros
345 : */
346 :
347 : #define pmap_kernel() (&kernel_pmap_store)
348 : #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
349 : #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
350 : #define pmap_update(pmap) /* nothing (yet) */
351 :
352 : #define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M)
353 : #define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U)
354 : #define pmap_copy(DP,SP,D,L,S)
355 : #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
356 : #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
357 : #define pmap_move(DP,SP,D,L,S)
358 : #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
359 :
360 : #define pmap_proc_iflush(p,va,len) /* nothing */
361 : #define pmap_unuse_final(p) /* nothing */
362 : #define pmap_remove_holes(vm) do { /* nothing */ } while (0)
363 :
364 :
365 : /*
366 : * prototypes
367 : */
368 :
369 : paddr_t pmap_bootstrap(paddr_t, paddr_t);
370 : boolean_t pmap_clear_attrs(struct vm_page *, unsigned long);
371 : static void pmap_page_protect(struct vm_page *, vm_prot_t);
372 : void pmap_page_remove (struct vm_page *);
373 : static void pmap_protect(struct pmap *, vaddr_t,
374 : vaddr_t, vm_prot_t);
375 : void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
376 : boolean_t pmap_test_attrs(struct vm_page *, unsigned);
377 : static void pmap_update_pg(vaddr_t);
378 : void pmap_write_protect(struct pmap *, vaddr_t,
379 : vaddr_t, vm_prot_t);
380 : void pmap_fix_ept(struct pmap *, vaddr_t);
381 :
382 : vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
383 :
384 : paddr_t pmap_prealloc_lowmem_ptps(paddr_t);
385 :
386 : void pagezero(vaddr_t);
387 :
388 : int pmap_convert(struct pmap *, int);
389 : void pmap_enter_special(vaddr_t, paddr_t, vm_prot_t);
390 :
391 : /*
392 : * functions for flushing the cache for vaddrs and pages.
393 : * these functions are not part of the MI pmap interface and thus
394 : * should not be used as such.
395 : */
396 : void pmap_flush_cache(vaddr_t, vsize_t);
397 : #define pmap_flush_page(paddr) do { \
398 : KDASSERT(PHYS_TO_VM_PAGE(paddr) != NULL); \
399 : pmap_flush_cache(PMAP_DIRECT_MAP(paddr), PAGE_SIZE); \
400 : } while (/* CONSTCOND */ 0)
401 :
402 : #define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */
403 : #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
404 :
405 : /*
406 : * inline functions
407 : */
408 :
409 : static inline void
410 : pmap_remove_all(struct pmap *pmap)
411 : {
412 : /* Nothing. */
413 : }
414 :
415 : /*
416 : * pmap_update_pg: flush one page from the TLB (or flush the whole thing
417 : * if hardware doesn't support one-page flushing)
418 : */
419 :
420 : inline static void
421 0 : pmap_update_pg(vaddr_t va)
422 : {
423 0 : invlpg(va);
424 0 : }
425 :
426 : /*
427 : * pmap_page_protect: change the protection of all recorded mappings
428 : * of a managed page
429 : *
430 : * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
431 : * => we only have to worry about making the page more protected.
432 : * unprotecting a page is done on-demand at fault time.
433 : */
434 :
435 : inline static void
436 0 : pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
437 : {
438 0 : if ((prot & PROT_WRITE) == 0) {
439 0 : if (prot & (PROT_READ | PROT_EXEC)) {
440 0 : (void) pmap_clear_attrs(pg, PG_RW);
441 0 : } else {
442 0 : pmap_page_remove(pg);
443 : }
444 : }
445 0 : }
446 :
447 : /*
448 : * pmap_protect: change the protection of pages in a pmap
449 : *
450 : * => this function is a frontend for pmap_remove/pmap_write_protect
451 : * => we only have to worry about making the page more protected.
452 : * unprotecting a page is done on-demand at fault time.
453 : */
454 :
455 : inline static void
456 0 : pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
457 : {
458 0 : if ((prot & PROT_WRITE) == 0) {
459 0 : if (prot & (PROT_READ| PROT_EXEC)) {
460 0 : pmap_write_protect(pmap, sva, eva, prot);
461 0 : } else {
462 0 : pmap_remove(pmap, sva, eva);
463 : }
464 : }
465 0 : }
466 :
467 : /*
468 : * various address inlines
469 : *
470 : * vtopte: return a pointer to the PTE mapping a VA, works only for
471 : * user and PT addresses
472 : *
473 : * kvtopte: return a pointer to the PTE mapping a kernel VA
474 : */
475 :
476 : static inline pt_entry_t *
477 : vtopte(vaddr_t va)
478 : {
479 : return (PTE_BASE + pl1_i(va));
480 : }
481 :
482 : static inline pt_entry_t *
483 0 : kvtopte(vaddr_t va)
484 : {
485 : #ifdef LARGEPAGES
486 : {
487 : pd_entry_t *pde;
488 :
489 : pde = L1_BASE + pl2_i(va);
490 : if (*pde & PG_PS)
491 : return ((pt_entry_t *)pde);
492 : }
493 : #endif
494 :
495 0 : return (PTE_BASE + pl1_i(va));
496 : }
497 :
498 : #define PMAP_DIRECT_MAP(pa) ((vaddr_t)PMAP_DIRECT_BASE + (pa))
499 : #define PMAP_DIRECT_UNMAP(va) ((paddr_t)(va) - PMAP_DIRECT_BASE)
500 : #define pmap_map_direct(pg) PMAP_DIRECT_MAP(VM_PAGE_TO_PHYS(pg))
501 : #define pmap_unmap_direct(va) PHYS_TO_VM_PAGE(PMAP_DIRECT_UNMAP(va))
502 :
503 : #define __HAVE_PMAP_DIRECT
504 :
505 : #endif /* _KERNEL && !_LOCORE */
506 :
507 : #ifndef _LOCORE
508 : struct pv_entry;
509 : struct vm_page_md {
510 : struct mutex pv_mtx;
511 : struct pv_entry *pv_list;
512 : };
513 :
514 : #define VM_MDPAGE_INIT(pg) do { \
515 : mtx_init(&(pg)->mdpage.pv_mtx, IPL_VM); \
516 : (pg)->mdpage.pv_list = NULL; \
517 : } while (0)
518 : #endif /* !_LOCORE */
519 :
520 : #endif /* _MACHINE_PMAP_H_ */
|