mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-05-20 03:30:27 +02:00
64KB segment alignment
* module/system/vm/linker.scm (*lcm-page-size*): Rename from *page-size*, change to 64 KB. * libguile/loader.c (load_thunk_from_memory): Only require page size alignment, knowing that although Guile might emit ELF with 64k alignment, it only really needs page alignment.
This commit is contained in:
parent
8157c2a3ac
commit
c62f0b0256
2 changed files with 50 additions and 4 deletions
|
@ -420,7 +420,18 @@ load_thunk_from_memory (char *data, size_t len, int is_read_only)
|
||||||
if (dynamic_segment < 0)
|
if (dynamic_segment < 0)
|
||||||
ABORT ("no PT_DYNAMIC segment");
|
ABORT ("no PT_DYNAMIC segment");
|
||||||
|
|
||||||
if (!IS_ALIGNED ((scm_t_uintptr) data, alignment))
|
/* The ELF images that Guile currently emits have segments that are
|
||||||
|
aligned on 64 KB boundaries, which might be larger than the actual
|
||||||
|
page size (usually 4 KB). However Guile doesn't actually use the
|
||||||
|
absolute addresses at all. All Guile needs is for the loaded image
|
||||||
|
to be able to make the data section writable (for the mmap path),
|
||||||
|
and for that the segment just needs to be page-aligned, and a page
|
||||||
|
is always bigger than Guile's minimum alignment. Since we know
|
||||||
|
(for the mmap path) that the base _is_ page-aligned, we proceed
|
||||||
|
ahead even if the image alignment is greater than the page
|
||||||
|
size. */
|
||||||
|
if (!IS_ALIGNED ((scm_t_uintptr) data, alignment)
|
||||||
|
&& !IS_ALIGNED (alignment, page_size))
|
||||||
ABORT ("incorrectly aligned base");
|
ABORT ("incorrectly aligned base");
|
||||||
|
|
||||||
/* Allow writes to writable pages. */
|
/* Allow writes to writable pages. */
|
||||||
|
@ -433,7 +444,7 @@ load_thunk_from_memory (char *data, size_t len, int is_read_only)
|
||||||
continue;
|
continue;
|
||||||
if (ph[i].p_flags == PF_R)
|
if (ph[i].p_flags == PF_R)
|
||||||
continue;
|
continue;
|
||||||
if (ph[i].p_align != page_size)
|
if (ph[i].p_align < page_size)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (mprotect (data + ph[i].p_vaddr,
|
if (mprotect (data + ph[i].p_vaddr,
|
||||||
|
|
|
@ -317,7 +317,42 @@ segment, the order of the linker objects is preserved."
|
||||||
#:addralign (elf-section-addralign sec)
|
#:addralign (elf-section-addralign sec)
|
||||||
#:entsize (elf-section-entsize sec)))
|
#:entsize (elf-section-entsize sec)))
|
||||||
|
|
||||||
(define *page-size* 4096)
|
|
||||||
|
;; We assume that 64K is a multiple of the page size. A
|
||||||
|
;; least-common-multiple, if you will.
|
||||||
|
;;
|
||||||
|
;; It would be possible to choose smaller, target-specific page sizes.
|
||||||
|
;; This is still a little tricky; on amd64 for example, systems commonly
|
||||||
|
;; have 4KB pages, but they are allowed by the ABI to have any
|
||||||
|
;; multiple-of-2 page size up to 64 KB. On Cygwin, pages are 4kB but
|
||||||
|
;; they can only be allocated 16 at a time. MIPS and ARM64 can use 64K
|
||||||
|
;; pages too and that's not uncommon.
|
||||||
|
;;
|
||||||
|
;; At the current time, in Guile we would like to reduce the number of
|
||||||
|
;; binaries we ship to the existing 32-or-64-bit and
|
||||||
|
;; big-or-little-endian variants, if possible. It would seem that with
|
||||||
|
;; the least-common-multiple of 64 KB pages, we can do that.
|
||||||
|
;;
|
||||||
|
;; See https://github.com/golang/go/issues/10180 for a discussion of
|
||||||
|
;; this issue in the Go context.
|
||||||
|
;;
|
||||||
|
;; Using 64KB instead of the more usual 4KB will increase the size of
|
||||||
|
;; our .go files, but not the prebuilt/ part of the tarball as that part
|
||||||
|
;; of the file will be zeroes and compress well. Additionally on a
|
||||||
|
;; system with 4KB pages, the extra padding will never be paged in, nor
|
||||||
|
;; read from disk (though it causes more seeking etc so on spinning
|
||||||
|
;; metal it's a bit of a lose).
|
||||||
|
;;
|
||||||
|
;; By way of comparison, on many 64-bit platforms, binutils currently
|
||||||
|
;; defaults to aligning segments on 2MB boundaries. It does so by
|
||||||
|
;; making the file and the memory images not the same: the pages are all
|
||||||
|
;; together on disk, but then when loading, the loader will mmap a
|
||||||
|
;; region "memsz" large which might be greater than the file size, then
|
||||||
|
;; map segments into that region. We can avoid this complication for
|
||||||
|
;; now. We can consider adding it in the future in a compatible way in
|
||||||
|
;; 2.2 if it is important.
|
||||||
|
;;
|
||||||
|
(define *lcm-page-size* (ash 1 16))
|
||||||
|
|
||||||
(define (add-symbols symbols offset symtab)
|
(define (add-symbols symbols offset symtab)
|
||||||
"Add @var{symbols} to the symbol table @var{symtab}, relocating them
|
"Add @var{symbols} to the symbol table @var{symtab}, relocating them
|
||||||
|
@ -631,7 +666,7 @@ relocated headers, and the global symbol table."
|
||||||
;; loadable segments to share pages
|
;; loadable segments to share pages
|
||||||
;; with PF_R segments.
|
;; with PF_R segments.
|
||||||
(not (and (not type) (= PF_R prev-flags))))
|
(not (and (not type) (= PF_R prev-flags))))
|
||||||
*page-size*
|
*lcm-page-size*
|
||||||
8))
|
8))
|
||||||
(lp seglists
|
(lp seglists
|
||||||
(fold-values cons objs-out objects)
|
(fold-values cons objs-out objects)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue