2 * Copyright (c) 2016-2018 Apple Inc. All rights reserved.
5 typedef char *kobject_description_t
[512];
19 #include <compression.h>
20 #include <sys/param.h>
24 native_mach_header_t
*
25 make_corefile_mach_header(void *data
)
27 native_mach_header_t
*mh
= data
;
28 mh
->magic
= NATIVE_MH_MAGIC
;
29 mh
->filetype
= MH_CORE
;
35 #if defined(__i386__) || defined(__x86_64__)
36 mh
->cputype
= is64
? CPU_TYPE_X86_64
: CPU_TYPE_I386
;
37 mh
->cpusubtype
= is64
? CPU_SUBTYPE_X86_64_ALL
: CPU_SUBTYPE_I386_ALL
;
38 #elif defined(__arm__) || defined(__arm64__)
39 mh
->cputype
= is64
? CPU_TYPE_ARM64
: CPU_TYPE_ARM
;
40 mh
->cpusubtype
= is64
? CPU_SUBTYPE_ARM64_ALL
: CPU_SUBTYPE_ARM_ALL
;
47 struct proto_coreinfo_command
*
48 make_coreinfo_command(native_mach_header_t
*mh
, void *data
, const uuid_t aoutid
, uint64_t address
, uint64_t dyninfo
)
50 struct proto_coreinfo_command
*cc
= data
;
51 cc
->cmd
= proto_LC_COREINFO
;
52 cc
->cmdsize
= sizeof (*cc
);
54 cc
->type
= proto_CORETYPE_USER
;
55 cc
->pageshift
= (uint16_t)pageshift_host
;
56 cc
->address
= address
;
57 uuid_copy(cc
->uuid
, aoutid
);
58 cc
->dyninfo
= dyninfo
;
59 mach_header_inc_ncmds(mh
, 1);
60 mach_header_inc_sizeofcmds(mh
, cc
->cmdsize
);
64 native_segment_command_t
*
65 make_native_segment_command(void *data
, const struct vm_range
*vr
, const struct file_range
*fr
, vm_prot_t maxprot
, vm_prot_t initprot
)
67 native_segment_command_t
*sc
= data
;
68 sc
->cmd
= NATIVE_LC_SEGMENT
;
69 sc
->cmdsize
= sizeof (*sc
);
71 sc
->vmaddr
= (unsigned long)V_ADDR(vr
);
72 sc
->vmsize
= (unsigned long)V_SIZE(vr
);
73 sc
->fileoff
= (unsigned long)F_OFF(fr
);
74 sc
->filesize
= (unsigned long)F_SIZE(fr
);
75 sc
->maxprot
= maxprot
;
76 sc
->initprot
= initprot
;
82 static struct proto_coredata_command
*
83 make_coredata_command(void *data
, const struct vm_range
*vr
, const struct file_range
*fr
, const vm_region_submap_info_data_64_t
*info
, unsigned comptype
, unsigned purgable
)
85 struct proto_coredata_command
*cc
= data
;
86 cc
->cmd
= proto_LC_COREDATA
;
87 cc
->cmdsize
= sizeof (*cc
);
89 cc
->vmaddr
= V_ADDR(vr
);
90 cc
->vmsize
= V_SIZE(vr
);
91 cc
->fileoff
= F_OFF(fr
);
92 cc
->filesize
= F_SIZE(fr
);
93 cc
->maxprot
= info
->max_protection
;
94 cc
->prot
= info
->protection
;
95 cc
->flags
= COMP_MAKE_FLAGS(comptype
);
96 cc
->share_mode
= info
->share_mode
;
97 assert(purgable
<= UINT8_MAX
);
98 cc
->purgable
= (uint8_t)purgable
;
99 assert(info
->user_tag
<= UINT8_MAX
);
100 cc
->tag
= (uint8_t)info
->user_tag
;
101 cc
->extp
= info
->external_pager
;
106 sizeof_segment_command(void) {
107 return opt
->extended
?
108 sizeof (struct proto_coredata_command
) : sizeof (native_segment_command_t
);
111 static struct load_command
*
112 make_segment_command(void *data
, const struct vm_range
*vr
, const struct file_range
*fr
, const vm_region_submap_info_data_64_t
*info
, unsigned comptype
, int purgable
)
115 make_coredata_command(data
, vr
, fr
, info
, comptype
, purgable
);
117 make_native_segment_command(data
, vr
, fr
, info
->max_protection
, info
->protection
);
122 * Increment the mach-o header data when we succeed
125 commit_load_command(struct write_segment_data
*wsd
, const struct load_command
*lc
)
127 wsd
->wsd_lc
= (caddr_t
)lc
+ lc
->cmdsize
;
128 native_mach_header_t
*mh
= wsd
->wsd_mh
;
129 mach_header_inc_ncmds(mh
, 1);
130 mach_header_inc_sizeofcmds(mh
, lc
->cmdsize
);
133 #pragma mark -- Regions written as "file references" --
136 cmdsize_fileref_command(const char *nm
)
138 size_t cmdsize
= sizeof (struct proto_fileref_command
);
140 if (0 != (len
= strlen(nm
))) {
141 len
++; // NUL-terminated for mmap sanity
142 cmdsize
+= roundup(len
, sizeof (long));
148 size_fileref_subregion(const struct subregion
*s
, struct size_core
*sc
)
152 size_t cmdsize
= cmdsize_fileref_command(S_PATHNAME(s
));
153 sc
->headersize
+= cmdsize
;
155 sc
->memsize
+= S_SIZE(s
);
159 size_fileref_region(const struct region
*r
, struct size_core
*sc
)
161 assert(0 == r
->r_nsubregions
);
162 assert(!r
->r_inzfodregion
);
164 size_t cmdsize
= cmdsize_fileref_command(r
->r_fileref
->fr_pathname
);
165 sc
->headersize
+= cmdsize
;
167 sc
->memsize
+= R_SIZE(r
);
170 static struct proto_fileref_command
*
171 make_fileref_command(void *data
, const char *pathname
, const uuid_t uuid
,
172 const struct vm_range
*vr
, const struct file_range
*fr
,
173 const vm_region_submap_info_data_64_t
*info
, unsigned purgable
)
175 struct proto_fileref_command
*fc
= data
;
178 fc
->cmd
= proto_LC_FILEREF
;
179 fc
->cmdsize
= sizeof (*fc
);
180 if (0 != (len
= strlen(pathname
))) {
182 * Strings live immediately after the
183 * command, and are included in the cmdsize
185 fc
->filename
.offset
= sizeof (*fc
);
187 strlcpy(s
, pathname
, ++len
); // NUL-terminated for mmap sanity
188 fc
->cmdsize
+= roundup(len
, sizeof (long));
189 assert(cmdsize_fileref_command(pathname
) == fc
->cmdsize
);
193 * A file reference allows different kinds of identifiers for
194 * the reference to be reconstructed.
196 assert(info
->external_pager
);
198 if (!uuid_is_null(uuid
)) {
199 uuid_copy(fc
->id
, uuid
);
200 fc
->flags
= FREF_MAKE_FLAGS(kFREF_ID_UUID
);
203 if (-1 != stat(pathname
, &st
) && 0 != st
.st_mtimespec
.tv_sec
) {
204 /* "little-endian format timespec structure" */
205 struct timespec ts
= st
.st_mtimespec
;
206 ts
.tv_nsec
= 0; // allow touch(1) to fix things
207 memset(fc
->id
, 0, sizeof(fc
->id
));
208 memcpy(fc
->id
, &ts
, sizeof(ts
));
209 fc
->flags
= FREF_MAKE_FLAGS(kFREF_ID_MTIMESPEC_LE
);
211 fc
->flags
= FREF_MAKE_FLAGS(kFREF_ID_NONE
);
214 fc
->vmaddr
= V_ADDR(vr
);
216 fc
->vmsize
= V_SIZE(vr
);
218 assert(F_OFF(fr
) >= 0);
219 fc
->fileoff
= F_OFF(fr
);
220 fc
->filesize
= F_SIZE(fr
);
222 assert(info
->max_protection
& VM_PROT_READ
);
223 fc
->maxprot
= info
->max_protection
;
224 fc
->prot
= info
->protection
;
226 fc
->share_mode
= info
->share_mode
;
227 assert(purgable
<= UINT8_MAX
);
228 fc
->purgable
= (uint8_t)purgable
;
229 assert(info
->user_tag
<= UINT8_MAX
);
230 fc
->tag
= (uint8_t)info
->user_tag
;
231 fc
->extp
= info
->external_pager
;
236 * It's almost always more efficient to write out a reference to the
237 * data than write out the data itself.
240 write_fileref_subregion(const struct region
*r
, const struct subregion
*s
, struct write_segment_data
*wsd
)
243 if (OPTIONS_DEBUG(opt
, 1) && !issubregiontype(s
, SEG_TEXT
) && !issubregiontype(s
, SEG_LINKEDIT
))
244 printf("%s: unusual segment type %s from %s\n", __func__
, S_MACHO_TYPE(s
), S_FILENAME(s
));
245 assert((r
->r_info
.max_protection
& VM_PROT_READ
) == VM_PROT_READ
);
246 assert((r
->r_info
.protection
& VM_PROT_WRITE
) == 0);
248 const struct libent
*le
= S_LIBENT(s
);
249 const struct file_range fr
= {
250 .off
= S_MACHO_FILEOFF(s
),
253 const struct proto_fileref_command
*fc
= make_fileref_command(wsd
->wsd_lc
, le
->le_pathname
, le
->le_uuid
, S_RANGE(s
), &fr
, &r
->r_info
, r
->r_purgable
);
255 commit_load_command(wsd
, (const void *)fc
);
256 if (OPTIONS_DEBUG(opt
, 3)) {
258 printr(r
, "ref '%s' %s (vm %llx-%llx, file offset %lld for %s)\n", S_FILENAME(s
), S_MACHO_TYPE(s
), (uint64_t)fc
->vmaddr
, (uint64_t)fc
->vmaddr
+ fc
->vmsize
, (int64_t)fc
->fileoff
, str_hsize(hstr
, fc
->filesize
));
260 return WALK_CONTINUE
;
264 * Note that we may be asked to write reference segments whose protections
265 * are rw- -- this -should- be ok as we don't convert the region to a file
266 * reference unless we know it hasn't been modified.
269 write_fileref_region(const struct region
*r
, struct write_segment_data
*wsd
)
271 assert(0 == r
->r_nsubregions
);
272 assert(r
->r_info
.user_tag
!= VM_MEMORY_IOKIT
);
273 assert((r
->r_info
.max_protection
& VM_PROT_READ
) == VM_PROT_READ
);
274 assert(!r
->r_inzfodregion
);
276 const struct libent
*le
= r
->r_fileref
->fr_libent
;
277 const char *pathname
= r
->r_fileref
->fr_pathname
;
278 const struct file_range fr
= {
279 .off
= r
->r_fileref
->fr_offset
,
282 const struct proto_fileref_command
*fc
= make_fileref_command(wsd
->wsd_lc
, pathname
, le
? le
->le_uuid
: UUID_NULL
, R_RANGE(r
), &fr
, &r
->r_info
, r
->r_purgable
);
284 commit_load_command(wsd
, (const void *)fc
);
285 if (OPTIONS_DEBUG(opt
, 3)) {
287 printr(r
, "ref '%s' %s (vm %llx-%llx, file offset %lld for %s)\n", pathname
, "(type?)", (uint64_t)fc
->vmaddr
, (uint64_t)fc
->vmaddr
+ fc
->vmsize
, (int64_t)fc
->fileoff
, str_hsize(hstr
, fc
->filesize
));
289 return WALK_CONTINUE
;
292 const struct regionop fileref_ops
= {
294 write_fileref_region
,
299 #pragma mark -- ZFOD segments written only to the header --
302 size_zfod_region(const struct region
*r
, struct size_core
*sc
)
304 assert(0 == r
->r_nsubregions
);
305 assert(r
->r_inzfodregion
);
306 sc
->headersize
+= sizeof_segment_command();
308 sc
->memsize
+= R_SIZE(r
);
312 write_zfod_region(const struct region
*r
, struct write_segment_data
*wsd
)
314 assert(r
->r_info
.user_tag
!= VM_MEMORY_IOKIT
);
315 assert((r
->r_info
.max_protection
& VM_PROT_READ
) == VM_PROT_READ
);
317 const struct file_range fr
= {
318 .off
= wsd
->wsd_foffset
,
321 make_segment_command(wsd
->wsd_lc
, R_RANGE(r
), &fr
, &r
->r_info
, 0, VM_PURGABLE_EMPTY
);
322 commit_load_command(wsd
, wsd
->wsd_lc
);
323 return WALK_CONTINUE
;
326 const struct regionop zfod_ops
= {
332 #pragma mark -- Regions containing data --
335 pwrite_memory(struct write_segment_data
*wsd
, const void *addr
, size_t size
, const struct vm_range
*vr
)
340 const int error
= bounded_pwrite(wsd
->wsd_fd
, addr
, size
, wsd
->wsd_foffset
, &wsd
->wsd_nocache
, &nwritten
);
342 if (error
|| OPTIONS_DEBUG(opt
, 3)) {
344 printvr(vr
, "writing %ld bytes at offset %lld -> ", size
, wsd
->wsd_foffset
);
346 printf("err #%d - %s ", error
, strerror(error
));
348 printf("%s ", str_hsize(hsz
, nwritten
));
349 if (size
!= (size_t)nwritten
)
350 printf("[%zd - incomplete write!] ", nwritten
);
351 else if (size
!= V_SIZE(vr
))
352 printf("(%s in memory) ",
353 str_hsize(hsz
, V_SIZE(vr
)));
358 walk_return_t step
= WALK_CONTINUE
;
361 if (size
!= (size_t)nwritten
)
364 wsd
->wsd_foffset
+= nwritten
;
365 wsd
->wsd_nwritten
+= nwritten
;
368 case EFAULT
: // transient mapping failure?
370 default: // EROFS, ENOSPC, EFBIG etc. */
379 * Write a contiguous range of memory into the core file.
380 * Apply compression, and chunk if necessary.
383 segment_compflags(compression_algorithm ca
, unsigned *algnum
)
386 case COMPRESSION_LZ4
:
389 case COMPRESSION_ZLIB
:
390 *algnum
= kCOMP_ZLIB
;
392 case COMPRESSION_LZMA
:
393 *algnum
= kCOMP_LZMA
;
395 case COMPRESSION_LZFSE
:
396 *algnum
= kCOMP_LZFSE
;
399 err(EX_SOFTWARE
, "unsupported compression algorithm %x", ca
);
405 is_file_mapped_shared(const struct region
*r
)
407 if (r
->r_info
.external_pager
)
408 switch (r
->r_info
.share_mode
) {
409 case SM_TRUESHARED
: // sm=shm
410 case SM_SHARED
: // sm=ali
411 case SM_SHARED_ALIASED
: // sm=s/a
420 map_memory_range(struct write_segment_data
*wsd
, const struct region
*r
, const struct vm_range
*vr
, struct vm_range
*dp
)
422 if (r
->r_incommregion
) {
424 * Special case: for commpage access, copy from our own address space.
427 V_SETSIZE(dp
, V_SIZE(vr
));
429 kern_return_t kr
= mach_vm_allocate(mach_task_self(), &dp
->addr
, dp
->size
, VM_FLAGS_ANYWHERE
);
430 if (KERN_SUCCESS
!= kr
|| 0 == dp
->addr
) {
431 err_mach(kr
, r
, "mach_vm_allocate c %llx-%llx", V_ADDR(vr
), V_ENDADDR(vr
));
432 print_one_memory_region(r
);
435 if (OPTIONS_DEBUG(opt
, 3))
436 printr(r
, "copying from self %llx-%llx\n", V_ADDR(vr
), V_ENDADDR(vr
));
437 memcpy((void *)dp
->addr
, (const void *)V_ADDR(vr
), V_SIZE(vr
));
438 return WALK_CONTINUE
;
441 if (!r
->r_insharedregion
&& 0 == (r
->r_info
.protection
& VM_PROT_READ
)) {
442 assert(0 != (r
->r_info
.max_protection
& VM_PROT_READ
)); // simple_region_optimization()
445 * Special case: region that doesn't currently have read permission.
446 * (e.g. --x/r-x permissions with tag 64 - JS JIT generated code
447 * from com.apple.WebKit.WebContent)
449 const mach_vm_offset_t pagesize_host
= 1u << pageshift_host
;
450 if (OPTIONS_DEBUG(opt
, 3))
451 printr(r
, "unreadable (%s/%s), remap with read permission\n",
452 str_prot(r
->r_info
.protection
), str_prot(r
->r_info
.max_protection
));
454 V_SETSIZE(dp
, V_SIZE(vr
));
455 vm_prot_t cprot
, mprot
;
456 kern_return_t kr
= mach_vm_remap(mach_task_self(), &dp
->addr
, V_SIZE(dp
), pagesize_host
- 1, true, wsd
->wsd_task
, V_ADDR(vr
), true, &cprot
, &mprot
, VM_INHERIT_NONE
);
457 if (KERN_SUCCESS
!= kr
) {
458 err_mach(kr
, r
, "mach_vm_remap() %llx-%llx", V_ADDR(vr
), V_ENDADDR(vr
));
461 assert(r
->r_info
.protection
== cprot
&& r
->r_info
.max_protection
== mprot
);
462 kr
= mach_vm_protect(mach_task_self(), V_ADDR(dp
), V_SIZE(dp
), false, VM_PROT_READ
);
463 if (KERN_SUCCESS
!= kr
) {
464 err_mach(kr
, r
, "mach_vm_protect() %llx-%llx", V_ADDR(vr
), V_ENDADDR(vr
));
465 mach_vm_deallocate(mach_task_self(), V_ADDR(dp
), V_SIZE(dp
));
468 return WALK_CONTINUE
;
472 * Most segments with data are read here
474 vm_offset_t data32
= 0;
475 mach_msg_type_number_t data32_count
;
476 kern_return_t kr
= mach_vm_read(wsd
->wsd_task
, V_ADDR(vr
), V_SIZE(vr
), &data32
, &data32_count
);
479 V_SETADDR(dp
, data32
);
480 V_SETSIZE(dp
, data32_count
);
482 case KERN_INVALID_ADDRESS
:
483 if (!r
->r_insharedregion
&&
484 (VM_MEMORY_SKYWALK
== r
->r_info
.user_tag
|| is_file_mapped_shared(r
))) {
485 if (OPTIONS_DEBUG(opt
, 1)) {
486 /* not necessarily an error: mitigation below */
488 printr(r
, "mach_vm_read() failed (%s) -- substituting zeroed region\n", str_tagr(tstr
, r
));
489 if (OPTIONS_DEBUG(opt
, 2))
490 print_one_memory_region(r
);
492 V_SETSIZE(dp
, V_SIZE(vr
));
493 kr
= mach_vm_allocate(mach_task_self(), &dp
->addr
, V_SIZE(dp
), VM_FLAGS_ANYWHERE
);
494 if (KERN_SUCCESS
!= kr
|| 0 == V_ADDR(dp
))
495 err_mach(kr
, r
, "mach_vm_allocate() z %llx-%llx", V_ADDR(vr
), V_ENDADDR(vr
));
500 err_mach(kr
, r
, "mach_vm_read() %llx-%llx", V_ADDR(vr
), V_SIZE(vr
));
501 if (OPTIONS_DEBUG(opt
, 1))
502 print_one_memory_region(r
);
505 if (kr
!= KERN_SUCCESS
) {
511 * Sometimes (e.g. searchd) we may not be able to fetch all the pages
512 * from the underlying mapped file, in which case replace those pages
513 * with zfod pages (at least they compress efficiently) rather than
514 * taking a SIGBUS when compressing them.
516 * XXX Perhaps we should just catch the SIGBUS, and if the faulting address
517 * is in the right range, substitute zfod pages and rerun region compression?
518 * Complex though, because the compression code may be multithreaded.
520 if (!r
->r_insharedregion
&& is_file_mapped_shared(r
)) {
521 const mach_vm_offset_t pagesize_host
= 1u << pageshift_host
;
523 if (r
->r_info
.pages_resident
* pagesize_host
== V_SIZE(dp
))
524 return WALK_CONTINUE
; // all pages resident, so skip ..
526 if (OPTIONS_DEBUG(opt
, 2))
527 printr(r
, "probing %llu pages in mapped-shared file\n", V_SIZE(dp
) / pagesize_host
);
530 for (mach_vm_offset_t a
= V_ADDR(dp
); a
< V_ENDADDR(dp
); a
+= pagesize_host
) {
532 mach_msg_type_number_t pCount
= VM_PAGE_INFO_BASIC_COUNT
;
533 vm_page_info_basic_data_t pInfo
;
535 kr
= mach_vm_page_info(mach_task_self(), a
, VM_PAGE_INFO_BASIC
, (vm_page_info_t
)&pInfo
, &pCount
);
536 if (KERN_SUCCESS
!= kr
) {
537 err_mach(kr
, NULL
, "mach_vm_page_info() at %llx", a
);
540 /* If the VM has the page somewhere, assume we can bring it back */
541 if (pInfo
.disposition
& (VM_PAGE_QUERY_PAGE_PRESENT
| VM_PAGE_QUERY_PAGE_REF
| VM_PAGE_QUERY_PAGE_DIRTY
))
544 /* Force the page to be fetched to see if it faults */
545 mach_vm_size_t tsize
= pagesize_host
;
546 void *tmp
= valloc((size_t)tsize
);
547 const mach_vm_address_t vtmp
= (mach_vm_address_t
)tmp
;
549 switch (kr
= mach_vm_read_overwrite(mach_task_self(), a
, tsize
, vtmp
, &tsize
)) {
552 case KERN_INVALID_ADDRESS
: {
553 /* Content can't be found: replace it and the rest of the region with zero-fill pages */
554 if (OPTIONS_DEBUG(opt
, 2)) {
555 printr(r
, "mach_vm_read_overwrite() failed after %llu pages -- substituting zfod\n", (a
- V_ADDR(dp
)) / pagesize_host
);
556 print_one_memory_region(r
);
558 mach_vm_address_t va
= a
;
559 kr
= mach_vm_allocate(mach_task_self(), &va
, V_ENDADDR(dp
) - va
, VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
);
560 if (KERN_SUCCESS
!= kr
) {
561 err_mach(kr
, r
, "mach_vm_allocate() %llx", a
);
564 a
= V_ENDADDR(dp
); // no need to look any further
569 err_mach(kr
, r
, "mach_vm_overwrite() %llx", a
);
573 if (KERN_SUCCESS
!= kr
)
576 if (KERN_SUCCESS
!= kr
) {
577 kr
= mach_vm_deallocate(mach_task_self(), V_ADDR(dp
), V_SIZE(dp
));
578 if (KERN_SUCCESS
!= kr
&& OPTIONS_DEBUG(opt
, 1))
579 err_mach(kr
, r
, "mach_vm_deallocate() pre %llx-%llx", V_ADDR(dp
), V_ENDADDR(dp
));
585 return WALK_CONTINUE
;
589 write_memory_range(struct write_segment_data
*wsd
, const struct region
*r
, mach_vm_offset_t vmaddr
, mach_vm_offset_t vmsize
)
591 assert(R_ADDR(r
) <= vmaddr
&& R_ENDADDR(r
) >= vmaddr
+ vmsize
);
593 mach_vm_offset_t resid
= vmsize
;
594 walk_return_t step
= WALK_CONTINUE
;
600 * Since some regions can be inconveniently large,
601 * chop them into multiple chunks as we compress them.
602 * (mach_vm_read has 32-bit limitations too).
604 vmsize
= vmsize
> INT32_MAX
? INT32_MAX
: vmsize
;
605 if (opt
->chunksize
> 0 && vmsize
> opt
->chunksize
)
606 vmsize
= opt
->chunksize
;
607 assert(vmsize
<= INT32_MAX
);
609 const struct vm_range vr
= {
613 struct vm_range d
, *dp
= &d
;
615 step
= map_memory_range(wsd
, r
, &vr
, dp
);
616 if (WALK_CONTINUE
!= step
)
618 assert(0 != V_ADDR(dp
) && 0 != V_SIZE(dp
));
619 const void *srcaddr
= (const void *)V_ADDR(dp
);
621 mach_vm_behavior_set(mach_task_self(), V_ADDR(dp
), V_SIZE(dp
), VM_BEHAVIOR_SEQUENTIAL
);
624 unsigned algorithm
= 0;
628 dstbuf
= malloc(V_SIZEOF(dp
));
630 filesize
= compression_encode_buffer(dstbuf
, V_SIZEOF(dp
), srcaddr
, V_SIZEOF(dp
), NULL
, opt
->calgorithm
);
631 if (filesize
> 0 && filesize
< V_SIZEOF(dp
)) {
632 srcaddr
= dstbuf
; /* the data source is now heap, compressed */
633 mach_vm_deallocate(mach_task_self(), V_ADDR(dp
), V_SIZE(dp
));
635 if (segment_compflags(opt
->calgorithm
, &algorithm
) != 0) {
637 mach_vm_deallocate(mach_task_self(), V_ADDR(dp
), V_SIZE(dp
));
645 filesize
= V_SIZEOF(dp
);
648 filesize
= V_SIZEOF(dp
);
649 assert(filesize
<= V_SIZEOF(dp
));
651 filesize
= V_SIZEOF(dp
);
655 const struct file_range fr
= {
656 .off
= wsd
->wsd_foffset
,
659 make_segment_command(wsd
->wsd_lc
, &vr
, &fr
, &r
->r_info
, algorithm
, r
->r_purgable
);
660 step
= pwrite_memory(wsd
, srcaddr
, filesize
, &vr
);
664 kern_return_t kr
= mach_vm_deallocate(mach_task_self(), V_ADDR(dp
), V_SIZE(dp
));
665 if (KERN_SUCCESS
!= kr
&& OPTIONS_DEBUG(opt
, 1))
666 err_mach(kr
, r
, "mach_vm_deallocate() post %llx-%llx", V_ADDR(dp
), V_SIZE(dp
));
669 if (WALK_ERROR
== step
)
671 commit_load_command(wsd
, wsd
->wsd_lc
);
681 * Sigh. This is a workaround.
682 * Find the vmsize as if the VM system manages ranges in host pagesize units
683 * rather than application pagesize units.
685 static mach_vm_size_t
686 getvmsize_host(const task_t task
, const struct region
*r
)
688 mach_vm_size_t vmsize_host
= R_SIZE(r
);
690 if (pageshift_host
!= pageshift_app
) {
691 is_actual_size(task
, r
, &vmsize_host
);
692 if (OPTIONS_DEBUG(opt
, 1) && R_SIZE(r
) != vmsize_host
)
693 printr(r
, "(region size tweak: was %llx, is %llx)\n", R_SIZE(r
), vmsize_host
);
698 static __inline mach_vm_size_t
699 getvmsize_host(__unused
const task_t task
, const struct region
*r
)
706 write_sparse_region(const struct region
*r
, struct write_segment_data
*wsd
)
708 assert(r
->r_nsubregions
);
709 assert(!r
->r_inzfodregion
);
710 assert(NULL
== r
->r_fileref
);
712 const mach_vm_size_t vmsize_host
= getvmsize_host(wsd
->wsd_task
, r
);
713 walk_return_t step
= WALK_CONTINUE
;
715 for (unsigned i
= 0; i
< r
->r_nsubregions
; i
++) {
716 const struct subregion
*s
= r
->r_subregions
[i
];
719 step
= write_fileref_subregion(r
, s
, wsd
);
721 /* Write this one out as real data */
722 mach_vm_size_t vmsize
= S_SIZE(s
);
723 if (R_SIZE(r
) != vmsize_host
) {
724 if (S_ADDR(s
) + vmsize
> R_ADDR(r
) + vmsize_host
) {
725 vmsize
= R_ADDR(r
) + vmsize_host
- S_ADDR(s
);
726 if (OPTIONS_DEBUG(opt
, 3))
727 printr(r
, "(subregion size tweak: was %llx, is %llx)\n",
731 step
= write_memory_range(wsd
, r
, S_ADDR(s
), vmsize
);
733 if (WALK_ERROR
== step
)
740 write_vanilla_region(const struct region
*r
, struct write_segment_data
*wsd
)
742 assert(0 == r
->r_nsubregions
);
743 assert(!r
->r_inzfodregion
);
744 assert(NULL
== r
->r_fileref
);
746 const mach_vm_size_t vmsize_host
= getvmsize_host(wsd
->wsd_task
, r
);
747 return write_memory_range(wsd
, r
, R_ADDR(r
), vmsize_host
);
751 region_write_memory(struct region
*r
, void *arg
)
753 assert(r
->r_info
.user_tag
!= VM_MEMORY_IOKIT
); // elided in walk_regions()
754 assert((r
->r_info
.max_protection
& VM_PROT_READ
) == VM_PROT_READ
);
755 return ROP_WRITE(r
, arg
);
759 * Handles the cases where segments are broken into chunks i.e. when
760 * writing compressed segments.
763 count_memory_range(mach_vm_offset_t vmsize
)
766 if (opt
->chunksize
) {
767 count
= (size_t)vmsize
/ opt
->chunksize
;
768 if (vmsize
!= (mach_vm_offset_t
)count
* opt
->chunksize
)
776 * A sparse region is likely a writable data segment described by
777 * native_segment_command_t somewhere in the address space.
780 size_sparse_subregion(const struct subregion
*s
, struct size_core
*sc
)
782 const unsigned long count
= count_memory_range(S_SIZE(s
));
783 sc
->headersize
+= sizeof_segment_command() * count
;
785 sc
->memsize
+= S_SIZE(s
);
789 size_sparse_region(const struct region
*r
, struct size_core
*sc_sparse
, struct size_core
*sc_fileref
)
791 assert(0 != r
->r_nsubregions
);
793 unsigned long entry_total
= sc_sparse
->count
+ sc_fileref
->count
;
794 for (unsigned i
= 0; i
< r
->r_nsubregions
; i
++) {
795 const struct subregion
*s
= r
->r_subregions
[i
];
797 size_fileref_subregion(s
, sc_fileref
);
799 size_sparse_subregion(s
, sc_sparse
);
801 if (OPTIONS_DEBUG(opt
, 3)) {
802 /* caused by compression breaking a large region into chunks */
803 entry_total
= (sc_fileref
->count
+ sc_sparse
->count
) - entry_total
;
804 if (entry_total
> r
->r_nsubregions
)
805 printr(r
, "range contains %u subregions requires %lu segment commands\n",
806 r
->r_nsubregions
, entry_total
);
810 const struct regionop sparse_ops
= {
817 size_vanilla_region(const struct region
*r
, struct size_core
*sc
)
819 assert(0 == r
->r_nsubregions
);
821 const unsigned long count
= count_memory_range(R_SIZE(r
));
822 sc
->headersize
+= sizeof_segment_command() * count
;
824 sc
->memsize
+= R_SIZE(r
);
826 if (OPTIONS_DEBUG(opt
, 3) && count
!= 1)
827 printr(r
, "range with 1 region, but requires %lu segment commands\n", count
);
830 const struct regionop vanilla_ops
= {
832 write_vanilla_region
,
837 region_size_memory(struct region
*r
, void *arg
)
839 struct size_segment_data
*ssd
= arg
;
841 if (&zfod_ops
== r
->r_op
)
842 size_zfod_region(r
, &ssd
->ssd_zfod
);
843 else if (&fileref_ops
== r
->r_op
)
844 size_fileref_region(r
, &ssd
->ssd_fileref
);
845 else if (&sparse_ops
== r
->r_op
)
846 size_sparse_region(r
, &ssd
->ssd_sparse
, &ssd
->ssd_fileref
);
847 else if (&vanilla_ops
== r
->r_op
)
848 size_vanilla_region(r
, &ssd
->ssd_vanilla
);
850 errx(EX_SOFTWARE
, "%s: bad op", __func__
);
852 return WALK_CONTINUE
;