Hallo, dies ist ein Test.
PWD: /www/data-lst1/unixsoft/unixsoft/kaempfer/.public_html
Running in File Mode
Relative path: ./../../../../.././../usr/include/vm/hat.h
Real path: /usr/include/vm/hat.h
Zurück
/* * Copyright (c) 1987, 2022, Oracle and/or its affiliates. */ /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ /* All Rights Reserved */ /* * University Copyright- Copyright (c) 1982, 1986, 1988 * The Regents of the University of California * All Rights Reserved * * University Acknowledgment- Portions of this document are derived from * software developed by the University of California, Berkeley, and its * contributors. */ #ifndef _VM_HAT_H #define _VM_HAT_H #include <sys/types.h> #include <sys/t_lock.h> #include <vm/faultcode.h> #include <sys/kstat.h> #include <sys/siginfo.h> #ifdef __cplusplus extern "C" { #endif /* * VM - Hardware Address Translation management. * * This file describes the machine independent interfaces to * the hardware address translation management routines. Other * machine specific interfaces and structures are defined * in <vm/hat_xxx.h>. The hat layer manages the address * translation hardware as a cache driven by calls from the * higher levels of the VM system. */ struct hat; struct kpme; struct memseg; #include <vm/page.h> /* * a callback used with hat_unload_callback() * start and end mark are set to a range of unloaded addresses * and the function is invoked with a pointer to this data structure * Any pages that need to be freed are collected in hcb_pages and * freed at the end of the hat function that invokes the callback. * This is so that we are not holding any hat layer locks while * freeing those pages. */ typedef struct hat_callback { caddr_t hcb_start_addr; caddr_t hcb_end_addr; void (*hcb_function)(struct hat_callback *, int); void *hcb_data; void *hcb_pages; } hat_callback_t; typedef void *hat_region_cookie_t; typedef struct hat_range { list_node_t hr_link; caddr_t hr_addr; size_t hr_size; } hat_range_t; #ifdef _KERNEL /* * One time hat initialization */ void hat_init(void); /* * One time hat stats initialization */ void hat_stat_init(void); /* * Operations on an address space: * * struct hat *hat_alloc(as) * allocated a hat structure for as. * * void hat_free_start(hat) * informs hat layer process has finished executing but as has not * been cleaned up yet. * * void hat_free_end(hat) * informs hat layer as is being destroyed. hat layer cannot use as * pointer after this call. * * void hat_swapin(hat) * allocate any hat resources required for process being swapped in. * * void hat_swapout(hat) * deallocate hat resources for process being swapped out. * * size_t hat_get_mapped_size(hat, rsspriv) * returns number of bytes that have valid mappings in hat if 2nd param * is false. If 2nd param is true, returns just the private mapping count. * * void hat_stats_enable(hat) * void hat_stats_disable(hat) * enables/disables collection of stats for hat. * * int hat_dup(parenthat, childhat, addr, len, flags) * Duplicate address translations of the parent to the child. Supports * the entire address range or a range depending on flag, * zero returned on success, non-zero on error * * void hat_thread_exit(thread) * Notifies the HAT that a thread is exiting, called after it has been * reassigned to the kernel AS. * * void hat_as_exit(hat) * Notifies the hat layer that the current process is exiting. */ struct hat *hat_alloc(struct as *); struct hat *hat_alloc_model(struct as *, model_t); void hat_free_start(struct hat *); void hat_free_end(struct hat *); int hat_dup(struct hat *, struct hat *, caddr_t, size_t, uint_t); void hat_swapin(struct hat *); void hat_swapout(struct hat *); size_t hat_get_mapped_size(struct hat *, boolean_t); int hat_stats_enable(struct hat *); void hat_stats_disable(struct hat *); void hat_thread_exit(kthread_t *); void hat_as_exit(struct hat *); /* * Operations on a named address within a segment: * * void hat_memload(hat, addr, pp, attr, flags) * load/lock the given page struct * * void hat_memload_array(hat, addr, len, ppa, attr, flags) * load/lock the given array of page structs * * void hat_memload_array_mc(hat, addr, len, ppa, attr, flags, mc) * same as above, but with an extra Memory Context argument * * void hat_devload(hat, addr, len, pf, attr, flags) * load/lock the given page frame number * * void hat_devload_mc(hat, addr, len, pf, attr, flags, mc) * Same as above with an extra Memory Context argument. * * void hat_unlock(hat, addr, len) * unlock a given range of addresses * * void hat_unload(hat, addr, len, flags) * void hat_unload_callback(hat, addr, len, flags, callback) * unload a given range of addresses (has optional callback) * void hat_unload_list(hat, ranges, nranges, flags) * unload a list of ranges * * void hat_sync(hat, addr, len, flags) * synchronize mapping with software data structures * * void hat_map(hat, addr, len, flags) * * void hat_setattr(hat, addr, len, attr) * void hat_clrattr(hat, addr, len, attr) * void hat_chgattr(hat, addr, len, attr) * modify attributes for a range of addresses. skips any invalid mappings * * uint_t hat_getattr(hat, addr, *attr) * returns attr for <hat,addr> in *attr. returns 0 if there was a * mapping and *attr is valid, nonzero if there was no mapping and * *attr is not valid. * * size_t hat_getpagesize(hat, addr) * returns pagesize in bytes for <hat, addr>. returns -1 if there is * no mapping. This is an advisory call. * * size_t hat_getpagesize_share(hat, addr, shared) * returns pagesize in bytes for <hat, addr>. returns -1 if there is * no mapping. This is an advisory call. Only searches for shared mappings * if the shared parameter is set. * * pfn_t hat_getpfnum(hat, addr) * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid. * * pfn_t hat_getkpfnum(addr) * returns pfn for non-memory mapped addr in kernel address space * or PFN_INVALID if mapping is invalid or is kernel memory. * * int hat_probe(hat, addr) * return 0 if no valid mapping is present. Faster version * of hat_getattr in certain architectures. * * int hat_share(outer_hat, outer_addr, inner_hat, inner_addr, len, szc) * Set up the range [outer_addr, outer_addr + len) in outer_hat to * reflect the range [inner_addr, inner_addr + len) in inner_hat, * with desired pagesize szc. Returns 0 on success. * * void hat_share_range(hat, addr, len, flags) * Ensure that the VA range [addr, len) in the outer address space managed * by "hat" is an accurate reflection of the corresponding part of the * inner HAT. The outer VA range must be a subset of an active hat_share() * outer range. Flags are ORed from: * HSR_CONSIST: inner mappings are consistant * * size_t hat_share_pref_alignment(len) * Return the preferred hat_share() alignment for an outer address * mapping of a region of length "len". * * caddr_t hat_share_pref_inneraddr(addr, len) * Return a "good" inner base address given an outer address of addr * and a length of len. * * void hat_unshare(hat, addr, len, szc) * Removes an active outer hat_share at [addr, addr + len). Must match * the range for the prior hat_share() precisely. * * void hat_chgprot(hat, addr, len, vprot) * This is a deprecated call. New segment drivers should store * all attributes and use hat_*attr calls. * Change the protections in the virtual address range * given to the specified virtual protection. If vprot is ~PROT_WRITE, * then remove write permission, leaving the other permissions * unchanged. If vprot is ~PROT_USER, remove user permissions. * * void hat_flush_range(hat, addr, size) * Invalidate a virtual address translation for the local CPU. */ void hat_memload(struct hat *, caddr_t, struct page *, uint_t, uint_t); void hat_memload_array(struct hat *, caddr_t, size_t, struct page **, uint_t, uint_t); void hat_memload_array_mc(struct hat *, caddr_t, size_t, struct page **, uint_t, uint_t, uint32_t); void hat_memload_region(struct hat *, caddr_t, struct page *, uint_t, uint_t, hat_region_cookie_t); void hat_memload_array_region(struct hat *, caddr_t, size_t, struct page **, uint_t, uint_t, hat_region_cookie_t); void hat_devload(struct hat *, caddr_t, size_t, pfn_t, uint_t, int); void hat_devload_mc(struct hat *, caddr_t, size_t, pfn_t, uint_t, int, uint32_t); void hat_unlock(struct hat *, caddr_t, size_t); void hat_unlock_region(struct hat *, caddr_t, size_t, hat_region_cookie_t); void hat_unload(struct hat *, caddr_t, size_t, uint_t); void hat_unload_callback(struct hat *, caddr_t, size_t, uint_t, hat_callback_t *); void hat_unload_list(struct hat *, list_t *, pgcnt_t, uint_t); void hat_flush_range(struct hat *, caddr_t, size_t); void hat_sync(struct hat *, caddr_t, size_t, uint_t); void hat_map(struct hat *, caddr_t, size_t, uint_t); void hat_setattr(struct hat *, caddr_t, size_t, uint_t); void hat_clrattr(struct hat *, caddr_t, size_t, uint_t); void hat_chgattr(struct hat *, caddr_t, size_t, uint_t); uint_t hat_getattr(struct hat *, caddr_t, uint_t *); ssize_t hat_getpagesize(struct hat *, caddr_t); ssize_t hat_getpagesize_share(struct hat *, caddr_t, int); pfn_t hat_getpfnum(struct hat *, caddr_t); paddr_t hat_getpaddr_size(struct hat *, caddr_t, size_t *); int hat_probe(struct hat *, caddr_t); int hat_share(struct hat *, caddr_t, struct hat *, caddr_t, size_t, uint_t); void hat_share_range(struct hat *, caddr_t, size_t, uint_t); #define HSR_CONSIST (0x1) size_t hat_share_pref_alignment(size_t); caddr_t hat_share_pref_inneraddr(caddr_t, size_t); void hat_unshare(struct hat *, caddr_t, size_t, uint_t); void hat_chgprot(struct hat *, caddr_t, size_t, uint_t); void hat_reserve(struct as *, caddr_t, size_t); pfn_t va_to_pfn(void *); uint64_t va_to_pa(void *); #if defined(__amd64) uintptr_t hat_getvaddr(struct hat *, pfn_t); #endif /* * hat_getkpfnum() is never supported on amd64 and will be * removed from other platforms in future release */ #if !defined(__amd64) pfn_t hat_getkpfnum(caddr_t); #endif /* * Kernel Physical Mapping (segkpm) hat interface routines. */ caddr_t hat_kpm_mapin(struct page *, struct kpme *); void hat_kpm_mapout(struct page *, struct kpme *, caddr_t); caddr_t hat_kpm_mapin_pfn(pfn_t); void hat_kpm_mapout_pfn(pfn_t); caddr_t hat_kpm_paddr_mapin(paddr_t, size_t); void hat_kpm_paddr_mapout(void *, size_t); caddr_t hat_kpm_page2va(struct page *, int); struct page *hat_kpm_vaddr2page(caddr_t); caddr_t hat_kpm_pfn2va(pfn_t pfn); pfn_t hat_kpm_va2pfn(void *); paddr_t hat_kpm_va2paddr(void *); int hat_kpm_fault(struct hat *, caddr_t); void hat_kpm_addmem(pfn_t, pgcnt_t); void hat_kpm_delmem(pfn_t, pgcnt_t); void hat_kpm_walk(void (*)(void *, void *, size_t), void *); /* * Operations on all translations for a given page(s) * * void hat_page_setattr(pp, flag) * void hat_page_clrattr(pp, flag) * used to set/clr red/mod bits. * * uint hat_page_getattr(pp, flag) * If flag is specified, returns 0 if attribute is disabled * and non zero if enabled. If flag specifes multiple attributs * then returns 0 if ALL atriibutes are disabled. This is an advisory * call. * * int hat_pageunload(pp, forceflag) * unload all translations attached to pp. * * uint_t hat_pagesync(pp, flags) * get hw stats from hardware into page struct and reset hw stats * returns attributes of page * * ulong_t hat_page_getshare(pp) * returns approx number of mappings to this pp. A return of 0 implies * there are no mappings to the page. * * faultcode_t hat_softlock(hat, addr, lenp, ppp, flags); * called to softlock pages for zero copy tcp * * void hat_page_demote(pp); * unload all large mappings to pp and decrease p_szc of all * constituent pages according to the remaining mappings. */ void hat_page_setattr(struct page *, uint_t); void hat_page_clrattr(struct page *, uint_t); uint_t hat_page_getattr(struct page *, uint_t); int hat_pageunload(struct page *, uint_t); uint_t hat_pagesync(struct page *, uint_t); ulong_t hat_page_getshare(struct page *); int hat_page_checkshare(struct page *, ulong_t); faultcode_t hat_softlock(struct hat *, caddr_t, size_t *, struct page **, uint_t); void hat_page_demote(struct page *); /* * Memory Context support functions: * * hat_mc_aligned() * Check if the address is aligned properly for Memory Contexts. */ int hat_mc_aligned(caddr_t, size_t, size_t); /* * Routine to expose supported HAT features to PIM. */ enum hat_features { HAT_SHARED_PT_UNUSED, /* obsolete */ HAT_DYNAMIC_ISM_UNMAP_UNUSED, /* obsolete */ HAT_VMODSORT, /* support for VMODSORT flag of vnode */ HAT_SHARED_REGIONS, /* shared regions support */ HAT_ADI, /* application data integrity */ HAT_MC /* memory contexts */ }; int hat_supported(enum hat_features, void *); /* * Services provided to the hat: * * void as_signal_proc(as, siginfo) * deliver signal to all processes that have this as. * * int hat_setstat(as, addr, len, rmbits) * informs hatstat layer that ref/mod bits need to be updated for * address range. Returns 0 on success, 1 for failure. */ void as_signal_proc(struct as *, k_siginfo_t *siginfo); void hat_setstat(struct as *, caddr_t, size_t, uint_t); /* * Flags to pass to hat routines. * * Certain flags only apply to some interfaces: * * HAT_LOAD Default flags to load a translation to the page. * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(), * and hat_devload(). * HAT_LOAD_ADV Advisory load - Load translation if and only if * sufficient MMU resources exist (i.e., do not steal). * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables * that map some user pages (not kas) is shared by more * than one process (eg. ISM). * HAT_LOAD_CONTIG Pages are contigous * HAT_LOAD_NOCONSIST Do not add mapping to mapping list. * HAT_LOAD_REMAP Reload a valid pte with a different page frame. * HAT_RELOAD_SHARE Reload a shared page table entry. Some platforms * may require different actions than on the first * load of a shared mapping. * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this * point, it's setting up mapping to allocate internal * hat layer data structures. This flag forces hat layer * to tap its reserves in order to prevent infinite * recursion. * HAT_LOAD_TEXT A flag to hat_memload() to indicate loading text pages. * HAT_LOAD_PRIV for rss stats to track MAP_PRIVATE mappings. * HAT_ISKZ A flag to hat_devload() to indicate loading mappings for * kernel zone kzhost process. */ /* * Flags for hat_memload/hat_devload */ #define HAT_FLAGS_RESV 0xFF000000 /* resv for hat impl */ #define HAT_LOAD 0x00 #define HAT_LOAD_LOCK 0x01 #define HAT_LOAD_ADV 0x04 #define HAT_LOAD_CONTIG 0x10 #define HAT_LOAD_NOCONSIST 0x20 #define HAT_LOAD_SHARE 0x40 #define HAT_LOAD_REMAP 0x80 #define HAT_RELOAD_SHARE 0x100 #define HAT_NO_KALLOC 0x200 #define HAT_LOAD_TEXT 0x400 #define HAT_LOAD_PRIV 0x800 #define HAT_ISKZ 0x1000 /* * Flags for initializing disable_*large_pages. * * HAT_AUTO_TEXT Get MMU specific disable_auto_text_large_pages * HAT_AUTO_DATA Get MMU specific disable_auto_data_large_pages */ #define HAT_AUTO_TEXT 0x800 #define HAT_AUTO_DATA 0x1000 /* * Attributes for hat_memload/hat_devload/hat_*attr * are a superset of prot flags defined in mman.h. */ #define HAT_PLAT_ATTR_MASK 0xF00000 #define HAT_PROT_MASK 0x0F #define HAT_NOFAULT 0x10 #define HAT_NOSYNC 0x20 #define HAT_MCD 0x40 /* * Advisory ordering attributes. Apply only to device mappings. * * HAT_STRICTORDER: the CPU must issue the references in order, as the * programmer specified. This is the default. * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds * of reordering; store or load with store or load). * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores * to consecutive locations (for example, turn two consecutive byte * stores into one halfword store), and it may batch individual loads * (for example, turn two consecutive byte loads into one halfword load). * This also implies re-ordering. * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it * until another store occurs. The default is to fetch new data * on every load. This also implies merging. * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to * the device (perhaps with other data) at a later time. The default is * to push the data right away. This also implies load caching. */ #define HAT_STRICTORDER 0x0000 #define HAT_UNORDERED_OK 0x0100 #define HAT_MERGING_OK 0x0200 #define HAT_LOADCACHING_OK 0x0300 #define HAT_STORECACHING_OK 0x0400 #define HAT_ORDER_MASK 0x0700 /* endian attributes */ #define HAT_NEVERSWAP 0x0000 #define HAT_STRUCTURE_BE 0x1000 #define HAT_STRUCTURE_LE 0x2000 #define HAT_ENDIAN_MASK 0x3000 /* flags for hat_softlock */ #define HAT_COW 0x0001 /* * Flags for hat_unload */ #define HAT_UNLOAD 0x00 #define HAT_UNLOAD_NO_TLB_FLUSH 0x01 #define HAT_UNLOAD_NOSYNC 0x02 #define HAT_UNLOAD_UNLOCK 0x04 #define HAT_UNLOAD_OTHER 0x08 #define HAT_UNLOAD_UNMAP 0x10 /* * Following flag is applicable only on x86. It is used to provide a hint to * hat_unload that it may optimize the number of cross-calls for performing * TLB invalidations by grouping multiple translations and sending a single * cross-call to invalidate all of them in one shot. Note that using this * optimization may have some other dangerous consequences if there is a chance * that the physical page may be re-used for something else before we perform * TLB invalidations. So, this flag should be used with caution only in those * cases where caller has made other arrangements to prevent the identity of * underlying physical page being changed underneath. For example, if we know * that the caller has acquired SE_EXCL lock over each of the underlying * physical pages before calling hat_unload, then there is no chance that the * identity of physical pages may change before we return from hat_unload. In * such case, it is safe to pass this flag to hat_unload. * * Providing this additional hint is not needed on SPARC. SPARC hat always * optimizes the number of cross-calls by grouping multiple translations. But, * passing this flag doesn't cause any harm on SPARC as it will just be ignored. * So, wherever we use this flag, we may pass it both on SPARC and x86 without * any #ifdef guard statements. */ #define HAT_UNLOAD_GRP_TLB_INVAL 0x20 /* * SPARC-specific. Used internally as a hint to skip xt_sync(). */ #define HAT_UNLOAD_NO_XT_SYNC 0x40 /* * Flags for hat_pagesync, hat_getstat, hat_sync */ #define HAT_SYNC_DONTZERO 0x00 #define HAT_SYNC_ZERORM 0x01 /* Additional flags for hat_pagesync */ #define HAT_SYNC_STOPON_REF 0x02 #define HAT_SYNC_STOPON_MOD 0x04 #define HAT_SYNC_STOPON_RM (HAT_SYNC_STOPON_REF | HAT_SYNC_STOPON_MOD) #define HAT_SYNC_STOPON_SHARED 0x08 /* * Flags for hat_dup * * HAT_DUP_ALL dup entire address space * HAT_DUP_COW dup plus hat_clrattr(..PROT_WRITE) on newas */ #define HAT_DUP_ALL 1 #define HAT_DUP_COW 2 #define HAT_DUP_SRD 3 /* * Flags for hat_map */ #define HAT_MAP 0x00 /* * Flag for hat_pageunload */ #define HAT_ADV_PGUNLOAD 0x00 #define HAT_FORCE_PGUNLOAD 0x01 /* * Attributes for hat_page_*attr, hat_setstats and * returned by hat_pagesync. */ #define P_MOD 0x1 /* the modified bit */ #define P_REF 0x2 /* the referenced bit */ #define P_RO 0x4 /* Read only page */ #define P_NSH 0x8 /* Not to shuffle v_pages */ #define hat_ismod(pp) (hat_page_getattr(pp, P_MOD)) #define hat_isref(pp) (hat_page_getattr(pp, P_REF)) #define hat_isro(pp) (hat_page_getattr(pp, P_RO)) #define hat_setmod(pp) (hat_page_setattr(pp, P_MOD)) #define hat_setmod_only(pp) (hat_page_setattr(pp, P_MOD|P_NSH)) #define hat_setref(pp) (hat_page_setattr(pp, P_REF)) #define hat_setrefmod(pp) (hat_page_setattr(pp, P_REF|P_MOD)) #define hat_clrmod(pp) (hat_page_clrattr(pp, P_MOD)) #define hat_clrref(pp) (hat_page_clrattr(pp, P_REF)) #define hat_clrrefmod(pp) (hat_page_clrattr(pp, P_REF|P_MOD)) #define hat_page_is_mapped(pp) (hat_page_checkshare(pp, 0)) /* * hat_setup is being used in sparc/os/sundep.c */ void hat_setup(struct hat *, int); /* * Flags for hat_setup */ #define HAT_DONTALLOC 0 #define HAT_ALLOC 1 #define HAT_INIT 2 /* * Other routines, for statistics */ int hat_startstat(struct as *); void hat_getstat(struct as *, caddr_t, size_t, uint_t, char *, int); void hat_freestat(struct as *, int); void hat_resvstat(size_t, struct as *, caddr_t); boolean_t hat_is_hatid(struct as *, uint_t); /* * Some platforms provide HW features that use some number of the high * bits of a VA for alternate purposes such as data corruption detection. * hat_normalize_va() is used to normalize such VAs before presenting * them to parts of the kernel that do not have knowledge of the features. */ caddr_t hat_normalize_va(struct hat *, caddr_t); caddr_t hat_normalize_va_checkadi(struct as *, caddr_t, kthread_t *); #endif /* _KERNEL */ /* * The size of the bit array for ref and mod bit storage must be a power of 2. * 2 bits are collected for each page. Below the power used is 4, * which is 16 8-bit characters = 128 bits, ref and mod bit information * for 64 pages. */ #define HRM_SHIFT 4 #define HRM_BYTES (1 << HRM_SHIFT) #define HRM_PAGES ((HRM_BYTES * NBBY) / 2) #define HRM_PGPERBYTE (NBBY/2) #define HRM_PGBYTEMASK (HRM_PGPERBYTE-1) #define HRM_PGOFFMASK ((HRM_PGPERBYTE-1) << MMU_PAGESHIFT) #define HRM_BASEOFFSET (((MMU_PAGESIZE * HRM_PAGES) - 1)) #define HRM_BASEMASK (~(HRM_BASEOFFSET)) #define HRM_BASESHIFT (MMU_PAGESHIFT + (HRM_SHIFT + 2)) #define HRM_PAGEMASK (MMU_PAGEMASK ^ HRM_BASEMASK) #define HRM_HASHSIZE 0x200 #define HRM_HASHMASK (HRM_HASHSIZE - 1) #define HRM_BLIST_INCR 0x200 /* * The structure for maintaining referenced and modified information */ typedef struct hrmstat { struct as *hrm_as; /* stat block belongs to this as */ uintptr_t hrm_base; /* base of block */ ushort_t hrm_id; /* opaque identifier, one of a_vbits */ struct hrmstat *hrm_anext; /* as statistics block list */ struct hrmstat *hrm_hnext; /* list for hashed blocks */ uchar_t hrm_bits[HRM_BYTES]; /* the ref and mod bits */ } hrmstat_t; typedef struct hrmbucket { kmutex_t hb_lock; hrmstat_t *hb_chain; } hrmbucket_t; /* * For global monitoring of the reference and modified bits * of all address spaces we reserve one id bit. */ #define HRM_SWSMONID 1 #ifdef _KERNEL /* * Hat locking functions * XXX - these two functions are currently being used by hatstats * they can be removed by using a per-as mutex for hatstats. */ void hat_enter(struct hat *); void hat_exit(struct hat *); typedef void (*hat_rgn_cb_func_t)(caddr_t, caddr_t, caddr_t, size_t, void *, u_offset_t, void *); void hat_join_srd(struct hat *, vnode_t *); hat_region_cookie_t hat_join_region(struct hat *, caddr_t, size_t, void *, u_offset_t, uchar_t, uchar_t, hat_rgn_cb_func_t, uint_t); void hat_leave_region(struct hat *, hat_region_cookie_t, uint_t); void hat_dup_region(struct hat *, hat_region_cookie_t); #define HAT_INVALID_REGION_COOKIE ((hat_region_cookie_t)-1) #define HAT_IS_REGION_COOKIE_VALID(c) ((c) != HAT_INVALID_REGION_COOKIE) /* hat_join_region() flags */ #define HAT_REGION_TEXT 0x1 /* passed by segvn */ #define HAT_REGION_ISM 0x2 /* for hat_share()/hat_unshare() */ #define HAT_REGION_TYPE_MASK (0x7) /* * va mask related interfaces. */ int hat_va_mask_get(struct hat *); int hat_va_mask_set(struct hat *, int); void hat_va_mask_set_exec(struct hat *, int); #ifdef __amd64 /* * SKA interfaces. */ void hat_ska_map(void *, size_t); void hat_ska_unmap(void *, size_t); #endif /* __amd64 */ #endif /* _KERNEL */ #ifdef __cplusplus } #endif #endif /* _VM_HAT_H */