2 * Copyright (c) 2016 Apple Inc. All rights reserved.
5 typedef char *kobject_description_t
[512];
16 #include <sys/types.h>
17 #include <sys/sysctl.h>
34 #include <mach/mach.h>
36 static struct subregion
*
38 const mach_vm_offset_t vmaddr
,
39 const mach_vm_offset_t vmsize
,
40 const native_segment_command_t
*sc
,
41 const struct libent
*le
)
43 struct subregion
*s
= malloc(sizeof (*s
));
45 assert(vmaddr
!= 0 && vmsize
!= 0);
46 assert(vmaddr
< vmaddr
+ vmsize
);
53 s
->s_isuuidref
= false;
58 del_subregion(struct subregion
*s
)
60 poison(s
, 0xfacefac1, sizeof (*s
));
65 clean_subregions(struct region
*r
)
67 if (r
->r_nsubregions
) {
68 assert(r
->r_subregions
);
69 for (unsigned i
= 0; i
< r
->r_nsubregions
; i
++)
70 del_subregion(r
->r_subregions
[i
]);
71 poison(r
->r_subregions
, 0xfac1fac1, sizeof (r
->r_subregions
[0]) * r
->r_nsubregions
);
72 free(r
->r_subregions
);
74 r
->r_subregions
= NULL
;
76 assert(NULL
== r
->r_subregions
);
82 del_sparse_region(struct region
*r
)
85 poison(r
, 0xcafecaff, sizeof (*r
));
89 #define NULLsc ((native_segment_command_t *)0)
92 issamesubregiontype(const struct subregion
*s0
, const struct subregion
*s1
) {
93 return 0 == strncmp(S_MACHO_TYPE(s0
), S_MACHO_TYPE(s1
), sizeof (NULLsc
->segname
));
97 issubregiontype(const struct subregion
*s
, const char *sctype
) {
98 return 0 == strncmp(S_MACHO_TYPE(s
), sctype
, sizeof (NULLsc
->segname
));
102 elide_subregion(struct region
*r
, unsigned ind
)
104 del_subregion(r
->r_subregions
[ind
]);
105 for (unsigned j
= ind
; j
< r
->r_nsubregions
- 1; j
++)
106 r
->r_subregions
[j
] = r
->r_subregions
[j
+1];
107 assert(r
->r_nsubregions
!= 0);
108 r
->r_subregions
[--r
->r_nsubregions
] = NULL
;
111 struct subregionlist
{
112 STAILQ_ENTRY(subregionlist
) srl_linkage
;
113 struct subregion
*srl_s
;
115 typedef STAILQ_HEAD(, subregionlist
) subregionlisthead_t
;
118 add_subregions_for_libent(
119 subregionlisthead_t
*srlh
,
120 const struct region
*r
,
121 const native_mach_header_t
*mh
,
122 const mach_vm_offset_t __unused mh_taddr
, // address in target
123 const struct libent
*le
)
125 const struct load_command
*lc
= (const void *)(mh
+ 1);
126 mach_vm_offset_t objoff
= le
->le_objoff
;
127 for (unsigned n
= 0; n
< mh
->ncmds
; n
++) {
129 const native_segment_command_t
*sc
;
132 case NATIVE_LC_SEGMENT
:
133 sc
= (const void *)lc
;
135 if (0 == sc
->vmaddr
&& strcmp(sc
->segname
, SEG_PAGEZERO
) == 0)
137 mach_vm_offset_t lo
= sc
->vmaddr
+ objoff
;
138 mach_vm_offset_t hi
= lo
+ sc
->vmsize
;
140 /* Eliminate non-overlapping sections first */
142 if (R_ENDADDR(r
) - 1 < lo
)
144 if (hi
- 1 < R_ADDR(r
))
148 * Some part of this segment is in the region.
149 * Trim the edges in the case where we span regions.
153 if (hi
> R_ENDADDR(r
))
156 struct subregionlist
*srl
= calloc(1, sizeof (*srl
));
157 struct subregion
*s
= new_subregion(lo
, hi
- lo
, sc
, le
);
158 assert(sc
->fileoff
>= 0);
160 STAILQ_INSERT_HEAD(srlh
, srl
, srl_linkage
);
162 if (OPTIONS_DEBUG(opt
, 2)) {
164 printr(r
, "subregion %llx-%llx %7s %12s\t%s [%s off %lu for %lu nsects %u flags %x]\n",
165 S_ADDR(s
), S_ENDADDR(s
),
166 str_hsize(hstr
, S_SIZE(s
)),
169 str_prot(sc
->initprot
),
170 (unsigned long)sc
->fileoff
,
171 (unsigned long)sc
->filesize
,
172 sc
->nsects
, sc
->flags
);
179 lc
= (const void *)((caddr_t
)lc
+ lc
->cmdsize
);
183 return WALK_CONTINUE
;
187 * Because we aggregate information from multiple sources, there may
188 * be duplicate subregions. Eliminate them here.
190 * Note that the each library in the shared cache points
191 * separately at a single, unified (large!) __LINKEDIT section; these
192 * get removed here too.
194 * Assumes the subregion array is sorted by address!
197 eliminate_duplicate_subregions(struct region
*r
)
200 while (i
< r
->r_nsubregions
) {
201 struct subregion
*s0
= r
->r_subregions
[i
-1];
202 struct subregion
*s1
= r
->r_subregions
[i
];
204 if (S_ADDR(s0
) != S_ADDR(s1
) || S_SIZE(s0
) != S_SIZE(s1
)) {
208 if (memcmp(&s0
->s_segcmd
, &s1
->s_segcmd
, sizeof (s0
->s_segcmd
)) != 0) {
212 if (OPTIONS_DEBUG(opt
, 3))
213 printr(r
, "eliding duplicate %s subregion (%llx-%llx) file %s\n",
214 S_MACHO_TYPE(s1
), S_ADDR(s1
), S_ENDADDR(s1
), S_FILENAME(s1
));
215 /* If the duplicate subregions aren't mapping the same file (?), forget the name */
216 if (s0
->s_libent
!= s1
->s_libent
)
217 s0
->s_libent
= s1
->s_libent
= NULL
;
218 elide_subregion(r
, i
);
223 * See if any of the dyld information we have can better describe this
224 * region of the target address space.
227 decorate_memory_region(struct region
*r
, void *arg
)
229 if (r
->r_inzfodregion
|| r
->r_incommregion
)
230 return WALK_CONTINUE
;
232 const dyld_process_info dpi
= arg
;
234 __block walk_return_t retval
= WALK_CONTINUE
;
235 __block subregionlisthead_t srlhead
= STAILQ_HEAD_INITIALIZER(srlhead
);
237 _dyld_process_info_for_each_image(dpi
, ^(uint64_t __unused mhaddr
, const uuid_t uuid
, __unused
const char *path
) {
238 if (WALK_CONTINUE
== retval
) {
239 const struct libent
*le
= libent_lookup_byuuid(uuid
);
240 assert(le
->le_mhaddr
== mhaddr
);
241 bool shouldskip
= false;
242 if (V_SIZE(&le
->le_vr
))
243 shouldskip
= (R_ENDADDR(r
) < V_ADDR(&le
->le_vr
) ||
244 R_ADDR(r
) > V_ENDADDR(&le
->le_vr
));
246 retval
= add_subregions_for_libent(&srlhead
, r
, le
->le_mh
, le
->le_mhaddr
, le
);
249 if (WALK_CONTINUE
!= retval
)
253 * Take the unsorted list of subregions, if any,
254 * and hang a sorted array of ranges on the region structure.
256 if (!STAILQ_EMPTY(&srlhead
)) {
257 struct subregionlist
*srl
;
258 STAILQ_FOREACH(srl
, &srlhead
, srl_linkage
) {
261 assert(r
->r_nsubregions
);
263 r
->r_subregions
= calloc(r
->r_nsubregions
, sizeof (void *));
265 STAILQ_FOREACH(srl
, &srlhead
, srl_linkage
) {
266 r
->r_subregions
[i
++] = srl
->srl_s
;
268 qsort_b(r
->r_subregions
, r
->r_nsubregions
, sizeof (void *),
269 ^(const void *a
, const void *b
) {
270 const struct subregion
*lhs
= *(struct subregion
**)a
;
271 const struct subregion
*rhs
= *(struct subregion
**)b
;
272 if (S_ADDR(lhs
) > S_ADDR(rhs
))
274 if (S_ADDR(lhs
) < S_ADDR(rhs
))
279 eliminate_duplicate_subregions(r
);
281 if (r
->r_info
.external_pager
) {
283 * Only very specific segment types get to be filerefs
285 for (i
= 0; i
< r
->r_nsubregions
; i
++) {
286 struct subregion
*s
= r
->r_subregions
[i
];
288 * Anything marked writable is trivially disqualified; we're
289 * going to copy it anyway.
291 if (s
->s_segcmd
.initprot
& VM_PROT_WRITE
)
294 /* __TEXT and __LINKEDIT are our real targets */
295 if (!issubregiontype(s
, SEG_TEXT
) && !issubregiontype(s
, SEG_LINKEDIT
) && !issubregiontype(s
, "__UNICODE")) {
296 if (OPTIONS_DEBUG(opt
, 3)) {
298 printvr(S_RANGE(s
), "skipping read-only %s segment %s\n", S_MACHO_TYPE(s
), str_hsize(hstr
, S_SIZE(s
)));
302 if (r
->r_insharedregion
) {
304 * Part of the shared region: things get more complicated.
308 * There's a file reference here for the whole region.
309 * For __TEXT subregions, we could, in principle (though
310 * see below) generate references to the individual
311 * dylibs that dyld reports in the region. If the
312 * debugger could then use the __LINKEDIT info in the
313 * file, then we'd be done. But as long as the dump
314 * includes __LINKEDIT sections, we're going to
315 * end up generating a file reference to the combined
316 * __LINKEDIT section in the shared cache anyway, so
317 * we might as well do that for the __TEXT regions as
320 s
->s_libent
= r
->r_fileref
->fr_libent
;
321 s
->s_isuuidref
= true;
324 * If we get here, it's likely that the shared cache
325 * name can't be found e.g. update_dyld_shared_cache(1).
326 * For __TEXT subregions, we could generate refs to
327 * the individual dylibs, but note that the mach header
328 * and segment commands in memory are still pointing
329 * into the shared cache so any act of reconstruction
330 * is fiendishly complex. So copy it.
332 assert(!s
->s_isuuidref
);
335 /* Just a regular dylib? */
337 s
->s_isuuidref
= true;
342 assert(WALK_CONTINUE
== retval
);
345 if (!STAILQ_EMPTY(&srlhead
)) {
346 struct subregionlist
*srl
, *trl
;
347 STAILQ_FOREACH_SAFE(srl
, &srlhead
, srl_linkage
, trl
) {
355 * Strip region of all decoration
357 * Invoked (on every region!) after an error during the initial
358 * 'decoration' phase to discard potentially incomplete information.
361 undecorate_memory_region(struct region
*r
, __unused
void *arg
)
363 assert(&sparse_ops
!= r
->r_op
);
364 return r
->r_nsubregions
? clean_subregions(r
) : WALK_CONTINUE
;
368 * This optimization occurs -after- the vanilla_region_optimizations(),
369 * and -after- we've tagged zfod and first-pass fileref's.
372 sparse_region_optimization(struct region
*r
, __unused
void *arg
)
374 assert(&sparse_ops
!= r
->r_op
);
376 if (r
->r_inzfodregion
) {
378 * Pure zfod region: almost certainly a more compact
379 * representation - keep it that way.
381 if (OPTIONS_DEBUG(opt
, 3))
382 printr(r
, "retaining zfod region\n");
383 assert(&zfod_ops
== r
->r_op
);
384 return clean_subregions(r
);
387 if (r
->r_insharedregion
&& 0 == r
->r_nsubregions
) {
389 * A segment in the shared region needs to be
390 * identified with an LC_SEGMENT that dyld claims,
391 * otherwise (we assert) it's not useful to the dump.
393 if (OPTIONS_DEBUG(opt
, 2)) {
395 printr(r
, "not referenced in dyld info => "
396 "eliding %s range in shared region\n",
397 str_hsize(hstr
, R_SIZE(r
)));
399 if (0 == r
->r_info
.pages_dirtied
&& 0 == r
->r_info
.pages_swapped_out
)
400 return WALK_DELETE_REGION
;
401 if (OPTIONS_DEBUG(opt
, 2)) {
403 printr(r
, "dirty pages, but not referenced in dyld info => "
404 "NOT eliding %s range in shared region\n",
405 str_hsize(hstr
, R_SIZE(r
)));
411 * Already have a fileref for the whole region: already
412 * a more compact representation - keep it that way.
414 if (OPTIONS_DEBUG(opt
, 3))
415 printr(r
, "retaining fileref region\n");
416 assert(&fileref_ops
== r
->r_op
);
417 return clean_subregions(r
);
420 if (r
->r_nsubregions
> 1) {
422 * Merge adjacent or identical subregions that have no file reference
423 * (Reducing the number of subregions reduces header overhead and
424 * improves compressability)
427 while (i
< r
->r_nsubregions
) {
428 struct subregion
*s0
= r
->r_subregions
[i
-1];
429 struct subregion
*s1
= r
->r_subregions
[i
];
431 if (s0
->s_isuuidref
) {
433 continue; /* => destined to be a fileref */
435 if (!issamesubregiontype(s0
, s1
)) {
437 continue; /* merge-able subregions must have same "type" */
440 if (S_ENDADDR(s0
) == S_ADDR(s1
)) {
441 /* directly adjacent subregions */
442 if (OPTIONS_DEBUG(opt
, 2))
443 printr(r
, "merging subregions (%llx-%llx + %llx-%llx) -- adjacent\n",
444 S_ADDR(s0
), S_ENDADDR(s0
), S_ADDR(s1
), S_ENDADDR(s1
));
445 S_SETSIZE(s0
, S_ENDADDR(s1
) - S_ADDR(s0
));
446 elide_subregion(r
, i
);
450 const mach_vm_size_t pfn
[2] = {
451 S_ADDR(s0
) >> pageshift_host
,
452 S_ADDR(s1
) >> pageshift_host
454 const mach_vm_size_t endpfn
[2] = {
455 (S_ENDADDR(s0
) - 1) >> pageshift_host
,
456 (S_ENDADDR(s1
) - 1) >> pageshift_host
459 if (pfn
[0] == pfn
[1] && pfn
[0] == endpfn
[0] && pfn
[0] == endpfn
[1]) {
460 /* two small subregions share a host page */
461 if (OPTIONS_DEBUG(opt
, 2))
462 printr(r
, "merging subregions (%llx-%llx + %llx-%llx) -- same page\n",
463 S_ADDR(s0
), S_ENDADDR(s0
), S_ADDR(s1
), S_ENDADDR(s1
));
464 S_SETSIZE(s0
, S_ENDADDR(s1
) - S_ADDR(s0
));
465 elide_subregion(r
, i
);
469 if (pfn
[1] == 1 + endpfn
[0]) {
470 /* subregions are pagewise-adjacent: bigger chunks to compress */
471 if (OPTIONS_DEBUG(opt
, 2))
472 printr(r
, "merging subregions (%llx-%llx + %llx-%llx) -- adjacent pages\n",
473 S_ADDR(s0
), S_ENDADDR(s0
), S_ADDR(s1
), S_ENDADDR(s1
));
474 S_SETSIZE(s0
, S_ENDADDR(s1
) - S_ADDR(s0
));
475 elide_subregion(r
, i
);
479 i
++; /* this isn't the subregion we're looking for */
483 if (1 == r
->r_nsubregions
) {
484 struct subregion
*s
= r
->r_subregions
[0];
485 if (!s
->s_isuuidref
&&
486 R_ADDR(r
) == S_ADDR(s
) && R_ENDADDR(r
) == S_ENDADDR(s
)) {
487 if (OPTIONS_DEBUG(opt
, 3))
488 printr(r
, "subregion (%llx-%llx) reverts to region\n",
489 S_ADDR(s
), S_ENDADDR(s
));
490 return clean_subregions(r
);
494 if (r
->r_nsubregions
)
495 r
->r_op
= &sparse_ops
;
497 return WALK_CONTINUE
;