]> git.cameronkatri.com Git - apple_cmds.git/blob - system_cmds/gcore.tproj/vm.c
shell_cmds: Fix compilation for lower targets
[apple_cmds.git] / system_cmds / gcore.tproj / vm.c
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 */
4
5 #include "options.h"
6 #include "vm.h"
7 #include "utils.h"
8 #include "region.h"
9 #include "sparse.h"
10
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <ctype.h>
14 #include <errno.h>
15 #include <unistd.h>
16 #include <stdbool.h>
17 #include <assert.h>
18 #include <sys/queue.h>
19 #include <machine/cpu_capabilities.h>
20
21 /*
22 * There should be better APIs to describe the shared region
23 * For now, some hackery.
24 */
25
26 #include <mach/shared_region.h>
27
28 static __inline boolean_t
29 in_shared_region(mach_vm_address_t addr)
30 {
31 const mach_vm_address_t base = SHARED_REGION_BASE;
32 const mach_vm_address_t size = SHARED_REGION_SIZE;
33 return addr >= base && addr < (base + size);
34 }
35
36 /*
37 * On both x64 and arm, there's a globallly-shared
38 * read-only page at _COMM_PAGE_START_ADDRESS
39 * which low-level library routines reference.
40 *
41 * On x64, somewhere randomly chosen between _COMM_PAGE_TEXT_ADDRESS
42 * and the top of the user address space, there's the
43 * pre-emption-free-zone read-execute page.
44 */
45
46 #include <System/machine/cpu_capabilities.h>
47
48 static __inline boolean_t
49 in_comm_region(const mach_vm_address_t addr, const vm_region_submap_info_data_64_t *info)
50 {
51 return addr >= _COMM_PAGE_START_ADDRESS &&
52 SM_TRUESHARED == info->share_mode &&
53 VM_INHERIT_SHARE == info->inheritance &&
54 !info->external_pager && (info->max_protection & VM_PROT_WRITE) == 0;
55 }
56
57 static __inline boolean_t
58 in_zfod_region(const vm_region_submap_info_data_64_t *info)
59 {
60 return info->share_mode == SM_EMPTY && !info->is_submap &&
61 0 == info->object_id && !info->external_pager &&
62 0 == info->pages_dirtied + info->pages_resident + info->pages_swapped_out;
63 }
64
65 static struct region *
66 new_region(mach_vm_offset_t vmaddr, mach_vm_size_t vmsize, const vm_region_submap_info_data_64_t *infop)
67 {
68 struct region *r = calloc(1, sizeof (*r));
69 assert(vmaddr != 0 && vmsize != 0);
70 R_SETADDR(r, vmaddr);
71 R_SETSIZE(r, vmsize);
72 r->r_info = *infop;
73 r->r_purgable = VM_PURGABLE_DENY;
74 r->r_insharedregion = in_shared_region(vmaddr);
75 r->r_incommregion = in_comm_region(vmaddr, &r->r_info);
76 r->r_inzfodregion = in_zfod_region(&r->r_info);
77
78 if (r->r_inzfodregion)
79 r->r_op = &zfod_ops;
80 else
81 r->r_op = &vanilla_ops;
82 return r;
83 }
84
85 void
86 del_fileref_region(struct region *r)
87 {
88 assert(&fileref_ops == r->r_op);
89 /* r->r_fileref->fr_libent is a reference into the name table */
90 poison(r->r_fileref, 0xdeadbee9, sizeof (*r->r_fileref));
91 free(r->r_fileref);
92 poison(r, 0xdeadbeeb, sizeof (*r));
93 free(r);
94 }
95
96 void
97 del_zfod_region(struct region *r)
98 {
99 assert(&zfod_ops == r->r_op);
100 assert(r->r_inzfodregion && 0 == r->r_nsubregions);
101 assert(NULL == r->r_fileref);
102 poison(r, 0xdeadbeed, sizeof (*r));
103 free(r);
104 }
105
106 void
107 del_vanilla_region(struct region *r)
108 {
109 assert(&vanilla_ops == r->r_op);
110 assert(!r->r_inzfodregion && 0 == r->r_nsubregions);
111 assert(NULL == r->r_fileref);
112 poison(r, 0xdeadbeef, sizeof (*r));
113 free(r);
114 }
115
116 /*
117 * "does any part of this address range match the tag?"
118 */
119 int
120 is_tagged(task_t task, mach_vm_offset_t addr, mach_vm_offset_t size, unsigned tag)
121 {
122 mach_vm_offset_t vm_addr = addr;
123 mach_vm_offset_t vm_size = 0;
124 natural_t depth = 0;
125 size_t pgsize = (1u << pageshift_host);
126
127 do {
128 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
129 vm_region_submap_info_data_64_t info;
130
131 kern_return_t ret = mach_vm_region_recurse(task, &vm_addr, &vm_size, &depth, (vm_region_recurse_info_t)&info, &count);
132
133 if (KERN_FAILURE == ret) {
134 err_mach(ret, NULL, "error inspecting task at %llx", vm_addr);
135 return -1;
136 } else if (KERN_INVALID_ADDRESS == ret) {
137 err_mach(ret, NULL, "invalid address at %llx", vm_addr);
138 return -1;
139 } else if (KERN_SUCCESS != ret) {
140 err_mach(ret, NULL, "error inspecting task at %llx", vm_addr);
141 return -1;
142 }
143 if (info.is_submap) {
144 depth++;
145 continue;
146 }
147 if (info.user_tag == tag)
148 return 1;
149 if (vm_addr + vm_size > addr + size)
150 return 0;
151 vm_addr += pgsize;
152 } while (1);
153 }
154
155 STAILQ_HEAD(regionhead, region);
156
157 /*
158 * XXX Need something like mach_vm_shared_region_recurse()
159 * to properly identify the shared region address ranges as
160 * we go.
161 */
162
163 static int
164 walk_regions(task_t task, struct regionhead *rhead)
165 {
166 mach_vm_offset_t vm_addr = MACH_VM_MIN_ADDRESS;
167 natural_t depth = 0;
168
169 if (OPTIONS_DEBUG(opt, 3)) {
170 printf("Building raw region list\n");
171 print_memory_region_header();
172 }
173 while (1) {
174 vm_region_submap_info_data_64_t info;
175 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
176 mach_vm_size_t vm_size;
177
178 kern_return_t ret = mach_vm_region_recurse(task, &vm_addr, &vm_size, &depth, (vm_region_recurse_info_t)&info, &count);
179
180 if (KERN_FAILURE == ret) {
181 err_mach(ret, NULL, "error inspecting task at %llx", vm_addr);
182 goto bad;
183 } else if (KERN_INVALID_ADDRESS == ret) {
184 break; /* loop termination */
185 } else if (KERN_SUCCESS != ret) {
186 err_mach(ret, NULL, "error inspecting task at %llx", vm_addr);
187 goto bad;
188 }
189
190 if (OPTIONS_DEBUG(opt, 3)) {
191 struct region *d = new_region(vm_addr, vm_size, &info);
192 ROP_PRINT(d);
193 ROP_DELETE(d);
194 }
195
196 if (info.is_submap) {
197 #ifdef CONFIG_SUBMAP
198 /* We also want to see submaps -- for debugging purposes. */
199 struct region *r = new_region(vm_addr, vm_size, &info);
200 r->r_depth = depth;
201 STAILQ_INSERT_TAIL(rhead, r, r_linkage);
202 #endif
203 depth++;
204 continue;
205 }
206
207 if (VM_MEMORY_IOKIT == info.user_tag) {
208 vm_addr += vm_size;
209 continue; // ignore immediately: IO memory has side-effects
210 }
211
212 struct region *r = new_region(vm_addr, vm_size, &info);
213 #ifdef CONFIG_SUBMAP
214 r->r_depth = depth;
215 #endif
216 /* grab the page info of the first page in the mapping */
217
218 mach_msg_type_number_t pageinfoCount = VM_PAGE_INFO_BASIC_COUNT;
219 ret = mach_vm_page_info(task, R_ADDR(r), VM_PAGE_INFO_BASIC, (vm_page_info_t)&r->r_pageinfo, &pageinfoCount);
220 if (KERN_SUCCESS != ret)
221 err_mach(ret, r, "getting pageinfo at %llx", R_ADDR(r));
222
223 /* record the purgability */
224
225 ret = mach_vm_purgable_control(task, vm_addr, VM_PURGABLE_GET_STATE, &r->r_purgable);
226 if (KERN_SUCCESS != ret)
227 r->r_purgable = VM_PURGABLE_DENY;
228
229 STAILQ_INSERT_TAIL(rhead, r, r_linkage);
230
231 vm_addr += vm_size;
232 }
233
234 return 0;
235 bad:
236 return EX_OSERR;
237 }
238
239 void
240 del_region_list(struct regionhead *rhead)
241 {
242 struct region *r, *t;
243
244 STAILQ_FOREACH_SAFE(r, rhead, r_linkage, t) {
245 STAILQ_REMOVE(rhead, r, region, r_linkage);
246 ROP_DELETE(r);
247 }
248 free(rhead);
249 }
250
251 struct regionhead *
252 build_region_list(task_t task)
253 {
254 struct regionhead *rhead = malloc(sizeof (*rhead));
255 STAILQ_INIT(rhead);
256 if (0 != walk_regions(task, rhead)) {
257 del_region_list(rhead);
258 return NULL;
259 }
260 return rhead;
261 }
262
263 int
264 walk_region_list(struct regionhead *rhead, walk_region_cbfn_t cbfn, void *arg)
265 {
266 struct region *r, *t;
267
268 STAILQ_FOREACH_SAFE(r, rhead, r_linkage, t) {
269 switch (cbfn(r, arg)) {
270 case WALK_CONTINUE:
271 break;
272 case WALK_DELETE_REGION:
273 STAILQ_REMOVE(rhead, r, region, r_linkage);
274 ROP_DELETE(r);
275 break;
276 case WALK_TERMINATE:
277 goto done;
278 case WALK_ERROR:
279 return -1;
280 }
281 }
282 done:
283 return 0;
284 }
285
286 int pageshift_host;
287 int pageshift_app;
288
289 void
290 setpageshift(void)
291 {
292 if (0 == pageshift_host) {
293 vm_size_t hps = 0;
294 kern_return_t ret = host_page_size(MACH_PORT_NULL, &hps);
295 if (KERN_SUCCESS != ret || hps == 0)
296 err_mach(ret, NULL, "host page size");
297 int pshift = 0;
298 while (((vm_offset_t)1 << pshift) != hps)
299 pshift++;
300 pageshift_host = pshift;
301 }
302 if (OPTIONS_DEBUG(opt, 3))
303 printf("host page size: %lu\n", 1ul << pageshift_host);
304
305 if (0 == pageshift_app) {
306 size_t psz = getpagesize();
307 int pshift = 0;
308 while ((1ul << pshift) != psz)
309 pshift++;
310 pageshift_app = pshift;
311 }
312 if (OPTIONS_DEBUG(opt, 3) && pageshift_app != pageshift_host)
313 printf("app page size: %lu\n", 1ul << pageshift_app);
314 }
315
316 void
317 print_memory_region_header(void)
318 {
319 printf("%-33s %c %-7s %-7s %8s %16s ",
320 "Address Range", 'S', "Size", "Cur/Max", "Obj32", "FirstPgObjectID");
321 printf("%9s %-3s %-11s %5s ",
322 "Offset", "Tag", "Mode", "Refc");
323 #ifdef CONFIG_SUBMAP
324 printf("%5s ", "Depth");
325 #endif
326 printf("%5s %5s %5s %3s ",
327 "Res", "SNP", "Dirty", "Pgr");
328 printf("\n");
329 }
330
331 static __inline char
332 region_type(const struct region *r)
333 {
334 if (r->r_fileref)
335 return 'f';
336 if (r->r_inzfodregion)
337 return 'z';
338 if (r->r_incommregion)
339 return 'c';
340 if (r->r_insharedregion)
341 return 's';
342 return ' ';
343 }
344
345 void
346 print_memory_region(const struct region *r)
347 {
348 hsize_str_t hstr;
349 tag_str_t tstr;
350
351 printf("%016llx-%016llx %c %-7s %s/%s %8x %16llx ",
352 R_ADDR(r), R_ENDADDR(r), region_type(r),
353 str_hsize(hstr, R_SIZE(r)),
354 str_prot(r->r_info.protection),
355 str_prot(r->r_info.max_protection),
356 r->r_info.object_id, r->r_pageinfo.object_id
357 );
358
359 printf("%9lld %3d %-11s %5u ",
360 r->r_info.external_pager ?
361 r->r_pageinfo.offset : r->r_info.offset,
362 r->r_info.user_tag,
363 str_shared(r->r_info.share_mode),
364 r->r_info.ref_count
365 );
366 #ifdef CONFIG_SUBMAP
367 printf("%5u ", r->r_depth);
368 #endif
369
370 if (!r->r_info.is_submap) {
371 printf("%5u %5u %5u %3s ",
372 r->r_info.pages_resident,
373 r->r_info.pages_shared_now_private,
374 r->r_info.pages_dirtied,
375 r->r_info.external_pager ? "ext" : "");
376 if (r->r_fileref)
377 printf("\n %s at %lld ",
378 r->r_fileref->fr_pathname,
379 r->r_fileref->fr_offset);
380 else
381 printf("%s", str_tagr(tstr, r));
382 printf("\n");
383 if (r->r_nsubregions) {
384 printf(" %-33s %7s %12s\t%s\n",
385 "Address Range", "Size", "Type(s)", "Filename(s)");
386 for (unsigned i = 0; i < r->r_nsubregions; i++) {
387 struct subregion *s = r->r_subregions[i];
388 printf(" %016llx-%016llx %7s %12s\t%s\n",
389 S_ADDR(s), S_ENDADDR(s),
390 str_hsize(hstr, S_SIZE(s)),
391 S_MACHO_TYPE(s),
392 S_FILENAME(s));
393 }
394 }
395 } else {
396 printf("%5s %5s %5s %3s %s\n", "", "", "", "", str_tagr(tstr, r));
397 }
398 }
399
400 walk_return_t
401 region_print_memory(struct region *r, __unused void *arg)
402 {
403 ROP_PRINT(r);
404 return WALK_CONTINUE;
405 }
406
407 void
408 print_one_memory_region(const struct region *r)
409 {
410 print_memory_region_header();
411 ROP_PRINT(r);
412 }
413
414 #ifdef RDAR_23744374
415 /*
416 * The reported size of a mapping to a file object gleaned from
417 * mach_vm_region_recurse() can exceed the underlying size of the file.
418 * If we attempt to write out the full reported size, we find that we
419 * error (EFAULT) or if we compress it, we die with the SIGBUS.
420 *
421 * See rdar://23744374
422 *
423 * Figure out what the "non-faulting" size of the object is to
424 * *host* page size resolution.
425 */
426 bool
427 is_actual_size(const task_t task, const struct region *r, mach_vm_size_t *hostvmsize)
428 {
429 if (!r->r_info.external_pager ||
430 (r->r_info.max_protection & VM_PROT_READ) == VM_PROT_NONE)
431 return true;
432
433 const size_t pagesize_host = 1ul << pageshift_host;
434 const unsigned filepages = r->r_info.pages_resident +
435 r->r_info.pages_swapped_out;
436
437 if (pagesize_host * filepages == R_SIZE(r))
438 return true;
439
440 /*
441 * Verify that the last couple of host-pagesize pages
442 * of a file backed mapping are actually pageable in the
443 * underlying object by walking backwards from the end
444 * of the application-pagesize mapping.
445 */
446 *hostvmsize = R_SIZE(r);
447
448 const long npagemax = 1ul << (pageshift_app - pageshift_host);
449 for (long npage = 0; npage < npagemax; npage++) {
450
451 const mach_vm_address_t taddress =
452 R_ENDADDR(r) - pagesize_host * (npage + 1);
453 if (taddress < R_ADDR(r) || taddress >= R_ENDADDR(r))
454 break;
455
456 mach_msg_type_number_t pCount = VM_PAGE_INFO_BASIC_COUNT;
457 vm_page_info_basic_data_t pInfo;
458
459 kern_return_t ret = mach_vm_page_info(task, taddress, VM_PAGE_INFO_BASIC, (vm_page_info_t)&pInfo, &pCount);
460 if (KERN_SUCCESS != ret) {
461 err_mach(ret, NULL, "getting pageinfo at %llx", taddress);
462 break; /* bail */
463 }
464
465 /*
466 * If this page has been in memory before, assume it can
467 * be brought back again
468 */
469 if (pInfo.disposition & (VM_PAGE_QUERY_PAGE_PRESENT | VM_PAGE_QUERY_PAGE_REF | VM_PAGE_QUERY_PAGE_DIRTY | VM_PAGE_QUERY_PAGE_PAGED_OUT))
470 continue;
471
472 /*
473 * Force the page to be fetched to see if it faults
474 */
475 mach_vm_size_t tsize = 1ul << pageshift_host;
476 void *tmp = valloc((size_t)tsize);
477 const mach_vm_address_t vtmp = (mach_vm_address_t)tmp;
478
479 switch (ret = mach_vm_read_overwrite(task,
480 taddress, tsize, vtmp, &tsize)) {
481 case KERN_INVALID_ADDRESS:
482 *hostvmsize = taddress - R_ADDR(r);
483 break;
484 case KERN_SUCCESS:
485 break;
486 default:
487 err_mach(ret, NULL, "mach_vm_overwrite()");
488 break;
489 }
490 free(tmp);
491 }
492 return R_SIZE(r) == *hostvmsize;
493 }
494 #endif