2 * Copyright (c) 2016 Apple Inc. All rights reserved.
18 #include <sys/queue.h>
19 #include <machine/cpu_capabilities.h>
22 * There should be better APIs to describe the shared region
23 * For now, some hackery.
26 #include <mach/shared_region.h>
28 static __inline boolean_t
29 in_shared_region(mach_vm_address_t addr
)
31 const mach_vm_address_t base
= SHARED_REGION_BASE
;
32 const mach_vm_address_t size
= SHARED_REGION_SIZE
;
33 return addr
>= base
&& addr
< (base
+ size
);
37 * On both x64 and arm, there's a globallly-shared
38 * read-only page at _COMM_PAGE_START_ADDRESS
39 * which low-level library routines reference.
41 * On x64, somewhere randomly chosen between _COMM_PAGE_TEXT_ADDRESS
42 * and the top of the user address space, there's the
43 * pre-emption-free-zone read-execute page.
46 #include <System/machine/cpu_capabilities.h>
48 static __inline boolean_t
49 in_comm_region(const mach_vm_address_t addr
, const vm_region_submap_info_data_64_t
*info
)
51 return addr
>= _COMM_PAGE_START_ADDRESS
&&
52 SM_TRUESHARED
== info
->share_mode
&&
53 VM_INHERIT_SHARE
== info
->inheritance
&&
54 !info
->external_pager
&& (info
->max_protection
& VM_PROT_WRITE
) == 0;
57 static __inline boolean_t
58 in_zfod_region(const vm_region_submap_info_data_64_t
*info
)
60 return info
->share_mode
== SM_EMPTY
&& !info
->is_submap
&&
61 0 == info
->object_id
&& !info
->external_pager
&&
62 0 == info
->pages_dirtied
+ info
->pages_resident
+ info
->pages_swapped_out
;
65 static struct region
*
66 new_region(mach_vm_offset_t vmaddr
, mach_vm_size_t vmsize
, const vm_region_submap_info_data_64_t
*infop
)
68 struct region
*r
= calloc(1, sizeof (*r
));
69 assert(vmaddr
!= 0 && vmsize
!= 0);
73 r
->r_purgable
= VM_PURGABLE_DENY
;
74 r
->r_insharedregion
= in_shared_region(vmaddr
);
75 r
->r_incommregion
= in_comm_region(vmaddr
, &r
->r_info
);
76 r
->r_inzfodregion
= in_zfod_region(&r
->r_info
);
78 if (r
->r_inzfodregion
)
81 r
->r_op
= &vanilla_ops
;
86 del_fileref_region(struct region
*r
)
88 assert(&fileref_ops
== r
->r_op
);
89 /* r->r_fileref->fr_libent is a reference into the name table */
90 poison(r
->r_fileref
, 0xdeadbee9, sizeof (*r
->r_fileref
));
92 poison(r
, 0xdeadbeeb, sizeof (*r
));
97 del_zfod_region(struct region
*r
)
99 assert(&zfod_ops
== r
->r_op
);
100 assert(r
->r_inzfodregion
&& 0 == r
->r_nsubregions
);
101 assert(NULL
== r
->r_fileref
);
102 poison(r
, 0xdeadbeed, sizeof (*r
));
107 del_vanilla_region(struct region
*r
)
109 assert(&vanilla_ops
== r
->r_op
);
110 assert(!r
->r_inzfodregion
&& 0 == r
->r_nsubregions
);
111 assert(NULL
== r
->r_fileref
);
112 poison(r
, 0xdeadbeef, sizeof (*r
));
117 * "does any part of this address range match the tag?"
120 is_tagged(task_t task
, mach_vm_offset_t addr
, mach_vm_offset_t size
, unsigned tag
)
122 mach_vm_offset_t vm_addr
= addr
;
123 mach_vm_offset_t vm_size
= 0;
125 size_t pgsize
= (1u << pageshift_host
);
128 mach_msg_type_number_t count
= VM_REGION_SUBMAP_INFO_COUNT_64
;
129 vm_region_submap_info_data_64_t info
;
131 kern_return_t ret
= mach_vm_region_recurse(task
, &vm_addr
, &vm_size
, &depth
, (vm_region_recurse_info_t
)&info
, &count
);
133 if (KERN_FAILURE
== ret
) {
134 err_mach(ret
, NULL
, "error inspecting task at %llx", vm_addr
);
136 } else if (KERN_INVALID_ADDRESS
== ret
) {
137 err_mach(ret
, NULL
, "invalid address at %llx", vm_addr
);
139 } else if (KERN_SUCCESS
!= ret
) {
140 err_mach(ret
, NULL
, "error inspecting task at %llx", vm_addr
);
143 if (info
.is_submap
) {
147 if (info
.user_tag
== tag
)
149 if (vm_addr
+ vm_size
> addr
+ size
)
155 STAILQ_HEAD(regionhead
, region
);
158 * XXX Need something like mach_vm_shared_region_recurse()
159 * to properly identify the shared region address ranges as
164 walk_regions(task_t task
, struct regionhead
*rhead
)
166 mach_vm_offset_t vm_addr
= MACH_VM_MIN_ADDRESS
;
169 if (OPTIONS_DEBUG(opt
, 3)) {
170 printf("Building raw region list\n");
171 print_memory_region_header();
174 vm_region_submap_info_data_64_t info
;
175 mach_msg_type_number_t count
= VM_REGION_SUBMAP_INFO_COUNT_64
;
176 mach_vm_size_t vm_size
;
178 kern_return_t ret
= mach_vm_region_recurse(task
, &vm_addr
, &vm_size
, &depth
, (vm_region_recurse_info_t
)&info
, &count
);
180 if (KERN_FAILURE
== ret
) {
181 err_mach(ret
, NULL
, "error inspecting task at %llx", vm_addr
);
183 } else if (KERN_INVALID_ADDRESS
== ret
) {
184 break; /* loop termination */
185 } else if (KERN_SUCCESS
!= ret
) {
186 err_mach(ret
, NULL
, "error inspecting task at %llx", vm_addr
);
190 if (OPTIONS_DEBUG(opt
, 3)) {
191 struct region
*d
= new_region(vm_addr
, vm_size
, &info
);
196 if (info
.is_submap
) {
198 /* We also want to see submaps -- for debugging purposes. */
199 struct region
*r
= new_region(vm_addr
, vm_size
, &info
);
201 STAILQ_INSERT_TAIL(rhead
, r
, r_linkage
);
207 if (VM_MEMORY_IOKIT
== info
.user_tag
) {
209 continue; // ignore immediately: IO memory has side-effects
212 struct region
*r
= new_region(vm_addr
, vm_size
, &info
);
216 /* grab the page info of the first page in the mapping */
218 mach_msg_type_number_t pageinfoCount
= VM_PAGE_INFO_BASIC_COUNT
;
219 ret
= mach_vm_page_info(task
, R_ADDR(r
), VM_PAGE_INFO_BASIC
, (vm_page_info_t
)&r
->r_pageinfo
, &pageinfoCount
);
220 if (KERN_SUCCESS
!= ret
)
221 err_mach(ret
, r
, "getting pageinfo at %llx", R_ADDR(r
));
223 /* record the purgability */
225 ret
= mach_vm_purgable_control(task
, vm_addr
, VM_PURGABLE_GET_STATE
, &r
->r_purgable
);
226 if (KERN_SUCCESS
!= ret
)
227 r
->r_purgable
= VM_PURGABLE_DENY
;
229 STAILQ_INSERT_TAIL(rhead
, r
, r_linkage
);
240 del_region_list(struct regionhead
*rhead
)
242 struct region
*r
, *t
;
244 STAILQ_FOREACH_SAFE(r
, rhead
, r_linkage
, t
) {
245 STAILQ_REMOVE(rhead
, r
, region
, r_linkage
);
252 build_region_list(task_t task
)
254 struct regionhead
*rhead
= malloc(sizeof (*rhead
));
256 if (0 != walk_regions(task
, rhead
)) {
257 del_region_list(rhead
);
264 walk_region_list(struct regionhead
*rhead
, walk_region_cbfn_t cbfn
, void *arg
)
266 struct region
*r
, *t
;
268 STAILQ_FOREACH_SAFE(r
, rhead
, r_linkage
, t
) {
269 switch (cbfn(r
, arg
)) {
272 case WALK_DELETE_REGION
:
273 STAILQ_REMOVE(rhead
, r
, region
, r_linkage
);
292 if (0 == pageshift_host
) {
294 kern_return_t ret
= host_page_size(MACH_PORT_NULL
, &hps
);
295 if (KERN_SUCCESS
!= ret
|| hps
== 0)
296 err_mach(ret
, NULL
, "host page size");
298 while (((vm_offset_t
)1 << pshift
) != hps
)
300 pageshift_host
= pshift
;
302 if (OPTIONS_DEBUG(opt
, 3))
303 printf("host page size: %lu\n", 1ul << pageshift_host
);
305 if (0 == pageshift_app
) {
306 size_t psz
= getpagesize();
308 while ((1ul << pshift
) != psz
)
310 pageshift_app
= pshift
;
312 if (OPTIONS_DEBUG(opt
, 3) && pageshift_app
!= pageshift_host
)
313 printf("app page size: %lu\n", 1ul << pageshift_app
);
317 print_memory_region_header(void)
319 printf("%-33s %c %-7s %-7s %8s %16s ",
320 "Address Range", 'S', "Size", "Cur/Max", "Obj32", "FirstPgObjectID");
321 printf("%9s %-3s %-11s %5s ",
322 "Offset", "Tag", "Mode", "Refc");
324 printf("%5s ", "Depth");
326 printf("%5s %5s %5s %3s ",
327 "Res", "SNP", "Dirty", "Pgr");
332 region_type(const struct region
*r
)
336 if (r
->r_inzfodregion
)
338 if (r
->r_incommregion
)
340 if (r
->r_insharedregion
)
346 print_memory_region(const struct region
*r
)
351 printf("%016llx-%016llx %c %-7s %s/%s %8x %16llx ",
352 R_ADDR(r
), R_ENDADDR(r
), region_type(r
),
353 str_hsize(hstr
, R_SIZE(r
)),
354 str_prot(r
->r_info
.protection
),
355 str_prot(r
->r_info
.max_protection
),
356 r
->r_info
.object_id
, r
->r_pageinfo
.object_id
359 printf("%9lld %3d %-11s %5u ",
360 r
->r_info
.external_pager
?
361 r
->r_pageinfo
.offset
: r
->r_info
.offset
,
363 str_shared(r
->r_info
.share_mode
),
367 printf("%5u ", r
->r_depth
);
370 if (!r
->r_info
.is_submap
) {
371 printf("%5u %5u %5u %3s ",
372 r
->r_info
.pages_resident
,
373 r
->r_info
.pages_shared_now_private
,
374 r
->r_info
.pages_dirtied
,
375 r
->r_info
.external_pager
? "ext" : "");
377 printf("\n %s at %lld ",
378 r
->r_fileref
->fr_pathname
,
379 r
->r_fileref
->fr_offset
);
381 printf("%s", str_tagr(tstr
, r
));
383 if (r
->r_nsubregions
) {
384 printf(" %-33s %7s %12s\t%s\n",
385 "Address Range", "Size", "Type(s)", "Filename(s)");
386 for (unsigned i
= 0; i
< r
->r_nsubregions
; i
++) {
387 struct subregion
*s
= r
->r_subregions
[i
];
388 printf(" %016llx-%016llx %7s %12s\t%s\n",
389 S_ADDR(s
), S_ENDADDR(s
),
390 str_hsize(hstr
, S_SIZE(s
)),
396 printf("%5s %5s %5s %3s %s\n", "", "", "", "", str_tagr(tstr
, r
));
401 region_print_memory(struct region
*r
, __unused
void *arg
)
404 return WALK_CONTINUE
;
408 print_one_memory_region(const struct region
*r
)
410 print_memory_region_header();
416 * The reported size of a mapping to a file object gleaned from
417 * mach_vm_region_recurse() can exceed the underlying size of the file.
418 * If we attempt to write out the full reported size, we find that we
419 * error (EFAULT) or if we compress it, we die with the SIGBUS.
421 * See rdar://23744374
423 * Figure out what the "non-faulting" size of the object is to
424 * *host* page size resolution.
427 is_actual_size(const task_t task
, const struct region
*r
, mach_vm_size_t
*hostvmsize
)
429 if (!r
->r_info
.external_pager
||
430 (r
->r_info
.max_protection
& VM_PROT_READ
) == VM_PROT_NONE
)
433 const size_t pagesize_host
= 1ul << pageshift_host
;
434 const unsigned filepages
= r
->r_info
.pages_resident
+
435 r
->r_info
.pages_swapped_out
;
437 if (pagesize_host
* filepages
== R_SIZE(r
))
441 * Verify that the last couple of host-pagesize pages
442 * of a file backed mapping are actually pageable in the
443 * underlying object by walking backwards from the end
444 * of the application-pagesize mapping.
446 *hostvmsize
= R_SIZE(r
);
448 const long npagemax
= 1ul << (pageshift_app
- pageshift_host
);
449 for (long npage
= 0; npage
< npagemax
; npage
++) {
451 const mach_vm_address_t taddress
=
452 R_ENDADDR(r
) - pagesize_host
* (npage
+ 1);
453 if (taddress
< R_ADDR(r
) || taddress
>= R_ENDADDR(r
))
456 mach_msg_type_number_t pCount
= VM_PAGE_INFO_BASIC_COUNT
;
457 vm_page_info_basic_data_t pInfo
;
459 kern_return_t ret
= mach_vm_page_info(task
, taddress
, VM_PAGE_INFO_BASIC
, (vm_page_info_t
)&pInfo
, &pCount
);
460 if (KERN_SUCCESS
!= ret
) {
461 err_mach(ret
, NULL
, "getting pageinfo at %llx", taddress
);
466 * If this page has been in memory before, assume it can
467 * be brought back again
469 if (pInfo
.disposition
& (VM_PAGE_QUERY_PAGE_PRESENT
| VM_PAGE_QUERY_PAGE_REF
| VM_PAGE_QUERY_PAGE_DIRTY
| VM_PAGE_QUERY_PAGE_PAGED_OUT
))
473 * Force the page to be fetched to see if it faults
475 mach_vm_size_t tsize
= 1ul << pageshift_host
;
476 void *tmp
= valloc((size_t)tsize
);
477 const mach_vm_address_t vtmp
= (mach_vm_address_t
)tmp
;
479 switch (ret
= mach_vm_read_overwrite(task
,
480 taddress
, tsize
, vtmp
, &tsize
)) {
481 case KERN_INVALID_ADDRESS
:
482 *hostvmsize
= taddress
- R_ADDR(r
);
487 err_mach(ret
, NULL
, "mach_vm_overwrite()");
492 return R_SIZE(r
) == *hostvmsize
;