Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
home:Ledest:erlang:24
erlang
0232-erts-Further-optimize-off-heap-traversal-d...
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 0232-erts-Further-optimize-off-heap-traversal-during-mino.patch of Package erlang
From e16e5458c37a649b467980528c3792916a650636 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson <sverker@erlang.org> Date: Thu, 26 Aug 2021 12:57:53 +0200 Subject: [PATCH 2/2] erts: Further optimize off-heap traversal during minor GC Only traverse first part of off-heap list that reside on the new-heap, except for writable binaries that are now kept in a separate list. Earlier during minor GC, traversing the off-heap list was continued to the end over terms residing on the old-heap. This for two reasons: 1. The BIN_OLD_VHEAP value was calculated by adding the sizes of all ProcBin's on the old-heap. 2. All writable binaries are handled by shrinking (reallocating) binaries that have not been written to since last GC. Even binaries residing on the old-heap. To optimize and only traverse the first part the off-heap list that reside on the new-heap, we now 1. maintain a correct value of BIN_OLD_VHEAP so it does not have to be recalculated during GC. 2. keep all writable binaries in a separate list Process.wrt_bins that is traversed to the end during every GC. The aim of this commit has been to maintain the overall GC semantics and especially not alter the value of BIN_OLD_VHEAP or how binaries have been reallocated at the end of a GC. --- erts/emulator/beam/break.c | 36 ++- erts/emulator/beam/erl_bits.c | 55 ++++- erts/emulator/beam/erl_gc.c | 355 +++++++++++++++------------ erts/emulator/beam/erl_message.c | 11 +- erts/emulator/beam/erl_message.h | 1 + erts/emulator/beam/erl_nfunc_sched.h | 3 + erts/emulator/beam/erl_nif.c | 5 + erts/emulator/beam/erl_process.c | 8 +- erts/emulator/beam/erl_process.h | 1 + 9 files changed, 296 insertions(+), 179 deletions(-) diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index 5527b62211..7c0c6332f5 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -708,25 +708,35 @@ bin_check(void) { Process *rp; struct erl_off_heap_header* hdr; + struct erl_off_heap_header* oh_list; int i, printed = 0, max = erts_ptab_max(&erts_proc); + for (i=0; i < max; i++) { rp = erts_pix2proc(i); if (!rp) continue; - for (hdr = rp->off_heap.first; hdr; hdr = hdr->next) { - if (hdr->thing_word == HEADER_PROC_BIN) { - ProcBin *bp = (ProcBin*) hdr; - if (!printed) { - erts_printf("Process %T holding binary data \n", rp->common.id); - printed = 1; - } - erts_printf("%p orig_size: %bpd, norefs = %bpd\n", - bp->val, - bp->val->orig_size, - erts_refc_read(&bp->val->intern.refc, 1)); - } - } + + oh_list = rp->off_heap.first; + for (;;) { + for (hdr = oh_list; hdr; hdr = hdr->next) { + if (hdr->thing_word == HEADER_PROC_BIN) { + ProcBin *bp = (ProcBin*) hdr; + if (!printed) { + erts_printf("Process %T holding binary data \n", rp->common.id); + printed = 1; + } + erts_printf("%p orig_size: %bpd, norefs = %bpd\n", + bp->val, + bp->val->orig_size, + erts_refc_read(&bp->val->intern.refc, 1)); + } + } + if (oh_list == rp->wrt_bins) + break; + oh_list = rp->wrt_bins; + } + if (printed) { erts_printf("--------------------------------------\n"); printed = 0; diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c index eb6ab4a92b..33c66e49b0 100644 --- a/erts/emulator/beam/erl_bits.c +++ b/erts/emulator/beam/erl_bits.c @@ -1358,6 +1358,21 @@ erts_new_bs_put_string(ERL_BITS_PROTO_2(byte* iptr, Uint num_bytes)) erts_bin_offset += num_bytes*8; } +static ERTS_INLINE +void increase_proc_bin_sz(Process* p, ProcBin* pb, Uint new_size) +{ + if (new_size > pb->size) { + if (ErtsInArea(pb, OLD_HEAP(p), ((OLD_HTOP(p) - OLD_HEAP(p)) + * sizeof(Eterm)))) { + BIN_OLD_VHEAP(p) += (new_size / sizeof(Eterm) - + pb->size / sizeof(Eterm)); + } + pb->size = new_size; + } + else + ASSERT(new_size == pb->size); +} + Eterm erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term, Uint extra_words, Uint unit) @@ -1445,7 +1460,8 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term, used_size_in_bits = erts_bin_offset + build_size_in_bits; sb->is_writable = 0; /* Make sure that no one else can write. */ - pb->size = NBYTES(used_size_in_bits); + + increase_proc_bin_sz(c_p, pb, NBYTES(used_size_in_bits)); pb->flags |= PB_ACTIVE_WRITER; /* @@ -1553,8 +1569,8 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term, hp += PROC_BIN_SIZE; pb->thing_word = HEADER_PROC_BIN; pb->size = used_size_in_bytes; - pb->next = MSO(c_p).first; - MSO(c_p).first = (struct erl_off_heap_header*)pb; + pb->next = c_p->wrt_bins; + c_p->wrt_bins = (struct erl_off_heap_header*)pb; pb->val = bptr; pb->bytes = (byte*) bptr->orig_bytes; pb->flags = PB_IS_WRITABLE | PB_ACTIVE_WRITER; @@ -1635,7 +1651,7 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit) } pos_in_bits_after_build = erts_bin_offset + build_size_in_bits; - pb->size = (pos_in_bits_after_build+7) >> 3; + increase_proc_bin_sz(p, pb, (pos_in_bits_after_build+7) >> 3); /* * Reallocate the binary if it is too small. @@ -1661,16 +1677,31 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit) * on. That means that a trace process now has (or have * had) a reference to the binary, so we are not allowed * to reallocate the binary. Instead, we must allocate a new - * binary and copy the contents of the old binary into it. + * binary and copy the contents of the old binary into it. + * + * Also make a new ProcBin as the old one may have been moved + * from the 'wrt_bins' list to the regular 'off_heap' list by + * the GC. To move it back would mean traversing the off_heap list + * from the start. So instead create a new ProcBin for this + * (hopefully) rare case. */ Binary* bptr = erts_bin_nrml_alloc(new_size); - sys_memcpy(bptr->orig_bytes, binp->orig_bytes, binp->orig_size); - pb->val = bptr; - pb->bytes = (byte *) bptr->orig_bytes; - erts_bin_release(binp); + ProcBin* new_pb; + + sys_memcpy(bptr->orig_bytes, binp->orig_bytes, binp->orig_size); + + new_pb = (ProcBin*) HeapFragOnlyAlloc(p, PROC_BIN_SIZE); + new_pb->thing_word = HEADER_PROC_BIN; + new_pb->size = pb->size; + new_pb->val = bptr; + new_pb->bytes = (byte *) bptr->orig_bytes; + new_pb->next = p->wrt_bins; + p->wrt_bins = (struct erl_off_heap_header*) new_pb; + sb->orig = make_binary(new_pb); + pb = new_pb; } } - pb->flags |= PB_IS_WRITABLE | PB_ACTIVE_WRITER; + pb->flags = PB_IS_WRITABLE | PB_ACTIVE_WRITER; erts_current_bin = pb->bytes; @@ -1717,8 +1748,8 @@ erts_bs_init_writable(Process* p, Eterm sz) hp += PROC_BIN_SIZE; pb->thing_word = HEADER_PROC_BIN; pb->size = 0; - pb->next = MSO(p).first; - MSO(p).first = (struct erl_off_heap_header*) pb; + pb->next = p->wrt_bins; + p->wrt_bins = (struct erl_off_heap_header*) pb; pb->val = bptr; pb->bytes = (byte*) bptr->orig_bytes; pb->flags = PB_IS_WRITABLE | PB_ACTIVE_WRITER; diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index 55d3379905..5f6eb4410a 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -463,14 +463,11 @@ erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity) static ERTS_INLINE void assert_no_active_writers(Process *p) { #ifdef DEBUG - struct erl_off_heap_header* ptr; - ptr = MSO(p).first; - while (ptr) { - if (ptr->thing_word == HEADER_PROC_BIN) { - ProcBin *pbp = (ProcBin*) ptr; - ERTS_ASSERT(!(pbp->flags & PB_ACTIVE_WRITER)); - } - ptr = ptr->next; + ProcBin *pb = (ProcBin*) p->wrt_bins; + while (pb) { + ASSERT(pb->thing_word == HEADER_PROC_BIN); + ERTS_ASSERT(!(pb->flags & PB_ACTIVE_WRITER)); + pb = (ProcBin*) pb->next; } #endif } @@ -1744,7 +1741,7 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end, OLD_HTOP(p) = old_htop; HIGH_WATER(p) = n_htop; - if (MSO(p).first) { + if (MSO(p).first || p->wrt_bins) { sweep_off_heap(p, 0); } @@ -1979,7 +1976,7 @@ full_sweep_heaps(Process *p, n_htop = sweep_heaps(n_heap, n_htop, oh, oh_size); - if (MSO(p).first) { + if (MSO(p).first || p->wrt_bins) { sweep_off_heap(p, 1); } @@ -2881,55 +2878,21 @@ next_vheap_size(Process* p, Uint64 vheap, Uint64 vheap_sz) { return new_vheap_sz < p->min_vheap_size ? p->min_vheap_size : new_vheap_sz; } -struct shrink_cand_data { - struct erl_off_heap_header* new_candidates; - struct erl_off_heap_header* new_candidates_end; - struct erl_off_heap_header* old_candidates; - Uint no_of_candidates; - Uint no_of_active; -}; - static ERTS_INLINE void -link_live_proc_bin(struct shrink_cand_data *shrink, - struct erl_off_heap_header*** prevppp, - struct erl_off_heap_header** currpp, - int new_heap) +shrink_writable_bin(ProcBin *pb, Uint leave_unused) { - ProcBin *pbp = (ProcBin*) *currpp; - ASSERT(**prevppp == *currpp); - - *currpp = pbp->next; - if (pbp->flags & (PB_ACTIVE_WRITER|PB_IS_WRITABLE)) { - ASSERT(pbp->flags & PB_IS_WRITABLE); + Uint new_size = pb->size; - if (pbp->flags & PB_ACTIVE_WRITER) { - pbp->flags &= ~PB_ACTIVE_WRITER; - shrink->no_of_active++; - } - else { /* inactive */ - Uint unused = pbp->val->orig_size - pbp->size; - /* Our allocators are 8 byte aligned, i.e., shrinking with - less than 8 bytes will have no real effect */ - if (unused >= 8) { /* A shrink candidate; save in candidate list */ - **prevppp = pbp->next; - if (new_heap) { - if (!shrink->new_candidates) - shrink->new_candidates_end = (struct erl_off_heap_header*)pbp; - pbp->next = shrink->new_candidates; - shrink->new_candidates = (struct erl_off_heap_header*)pbp; - } - else { - pbp->next = shrink->old_candidates; - shrink->old_candidates = (struct erl_off_heap_header*)pbp; - } - shrink->no_of_candidates++; - return; - } - } + if (leave_unused) { + new_size += (new_size * 100) / leave_unused; + /* Our allocators are 8 byte aligned, i.e., shrinking with + less than 8 bytes will have no real effect */ + if (new_size + 8 >= pb->val->orig_size) + return; } - - /* Not a shrink candidate; keep in original mso list */ - *prevppp = &pbp->next; + ASSERT(erts_refc_read(&pb->val->intern.refc, 1) == 1); + pb->val = erts_bin_realloc(pb->val, new_size); + pb->bytes = (byte *) pb->val->orig_bytes; } #ifdef ERTS_MAGIC_REF_THING_HEADER @@ -2946,23 +2909,26 @@ link_live_proc_bin(struct shrink_cand_data *shrink, static void sweep_off_heap(Process *p, int fullsweep) { - struct shrink_cand_data shrink = {0}; struct erl_off_heap_header* ptr; struct erl_off_heap_header** prev; + struct erl_off_heap_header** insert_old_here; char* oheap = NULL; Uint oheap_sz = 0; Uint64 bin_vheap = 0; #ifdef DEBUG + Uint64 orig_bin_old_vheap = BIN_OLD_VHEAP(p); int seen_mature = 0; #endif + Uint shrink_ncandidates; + Uint shrink_nactive; + ProcBin* shrink_unresolved_end; + ProcBin* pb; if (fullsweep == 0) { oheap = (char *) OLD_HEAP(p); oheap_sz = (char *) OLD_HEND(p) - oheap; } - BIN_OLD_VHEAP(p) = 0; - prev = &MSO(p).first; ptr = MSO(p).first; @@ -2986,9 +2952,9 @@ sweep_off_heap(Process *p, int fullsweep) if (to_new_heap) { bin_vheap += ptr->size / sizeof(Eterm); } else { - BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/ - } - link_live_proc_bin(&shrink, &prev, &ptr, to_new_heap); + BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); + } + ASSERT(!(((ProcBin*)ptr)->flags & (PB_ACTIVE_WRITER|PB_IS_WRITABLE))); break; } case ERTS_USED_MAGIC_REF_THING_HEADER__: { @@ -3009,19 +2975,24 @@ sweep_off_heap(Process *p, int fullsweep) make_boxed(&ptr->thing_word), ERL_NODE_INC, __FILE__, __LINE__); } - prev = &ptr->next; - ptr = ptr->next; } + prev = &ptr->next; + ptr = ptr->next; } - else if (ErtsInArea(ptr, oheap, oheap_sz)) - break; /* and let old-heap loop continue */ + else if (ErtsInArea(ptr, oheap, oheap_sz)) { + /* + * The rest of the list resides on the old heap and needs no + * attention during a minor gc. + */ + ASSERT(!fullsweep); + break; + } else { /* garbage */ switch (thing_subtag(ptr->thing_word)) { case REFC_BINARY_SUBTAG: { - Binary* bptr = ((ProcBin*)ptr)->val; - erts_bin_release(bptr); + erts_bin_release(((ProcBin*)ptr)->val); break; } case FUN_SUBTAG: @@ -3049,94 +3020,163 @@ sweep_off_heap(Process *p, int fullsweep) } } - /* The rest of the list resides on old-heap, and we just did a - * generational collection - keep objects in list. - */ - while (ptr) { - ASSERT(ErtsInArea(ptr, oheap, oheap_sz)); - ASSERT(!IS_MOVED_BOXED(ptr->thing_word)); - switch (ptr->thing_word) { - case HEADER_PROC_BIN: - BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/ - link_live_proc_bin(&shrink, &prev, &ptr, 0); - break; - case ERTS_USED_MAGIC_REF_THING_HEADER__: - ASSERT(is_magic_ref_thing(ptr)); - BIN_OLD_VHEAP(p) += - (((Uint) ((ErtsMRefThing *) ptr)->mb->orig_size) - / sizeof(Eterm)); /* for binary gc (words)*/ - /* fall through... */ - default: - ASSERT(is_fun_header(ptr->thing_word) || - is_external_header(ptr->thing_word) - || is_magic_ref_thing(ptr)); - prev = &ptr->next; - ptr = ptr->next; - break; - } - } + insert_old_here = prev; +#ifdef DEBUG if (fullsweep) { - BIN_OLD_VHEAP_SZ(p) = next_vheap_size(p, BIN_OLD_VHEAP(p) + MSO(p).overhead, BIN_OLD_VHEAP_SZ(p)); + ASSERT(ptr == NULL); + ASSERT(BIN_OLD_VHEAP(p) == orig_bin_old_vheap); } - BIN_VHEAP_SZ(p) = next_vheap_size(p, bin_vheap, BIN_VHEAP_SZ(p)); - MSO(p).overhead = bin_vheap; + else { + /* The rest of the list resides on the old heap and needs no + * attention during a minor gc. In a DEBUG build, verify + * that the binaries in the list are not writable and that + * the other terms are of the allowed types. + */ + while (ptr) { + ASSERT(ErtsInArea(ptr, oheap, oheap_sz)); + ASSERT(!IS_MOVED_BOXED(ptr->thing_word)); + switch (ptr->thing_word) { + case HEADER_PROC_BIN: + ASSERT(!(((ProcBin*)ptr)->flags & (PB_ACTIVE_WRITER|PB_IS_WRITABLE))); + break; + default: + ASSERT(is_fun_header(ptr->thing_word) || + is_external_header(ptr->thing_word) || + is_magic_ref_thing(ptr)); + break; + } + ptr = ptr->next; + } + } +#endif /* DEBUG */ /* - * If we got any shrink candidates, check them out. + * Traverse writable binaries. + * As writable binaries may reside on the old heap we traverse + * the entire wrt_bins list even during minor gc. */ + shrink_nactive = 0; /* number of active writable binaries */ + shrink_ncandidates = 0; /* number of candidates for shrinking */ + shrink_unresolved_end = NULL; /* end marker for second traversal */ + + pb = (ProcBin*) p->wrt_bins; + prev = &p->wrt_bins; + while (pb) { + int on_old_heap; + if (IS_MOVED_BOXED(pb->thing_word)) { + ASSERT(!ErtsInArea(pb, oheap, oheap_sz)); + pb = (ProcBin*) boxed_val(pb->thing_word); + *prev = (struct erl_off_heap_header*) pb; + ASSERT(pb->thing_word == HEADER_PROC_BIN); + on_old_heap = ErtsInArea(pb, oheap, oheap_sz); + if (!on_old_heap) { + bin_vheap += pb->size / sizeof(Eterm); + } else { + BIN_OLD_VHEAP(p) += pb->size / sizeof(Eterm); + } + } + else { + ASSERT(pb->thing_word == HEADER_PROC_BIN); + on_old_heap = ErtsInArea(pb, oheap, oheap_sz); + if (!on_old_heap) { + /* garbage */ + erts_bin_release(pb->val); + pb = (ProcBin*) pb->next; + *prev = (struct erl_off_heap_header*) pb; + continue; + } + } + if (pb->flags) { + ASSERT(pb->flags & PB_IS_WRITABLE); - if (shrink.no_of_candidates) { - ProcBin *candlist[] = { (ProcBin*)shrink.new_candidates, - (ProcBin*)shrink.old_candidates }; + /* + * How to shrink writable binaries. There are two distinct cases: + * + * + There are one or more active writers. We will shrink all + * writable binaries without active writers down to their + * original sizes. + * + * + There are no active writers. We will shrink all writable + * binaries, but not fully. How much margin we will leave + * depends on the number of writable binaries. + * + * That is, we don't know how to shrink the binaries before either + * + finding the first active writer, or + * + finding more than ERTS_INACT_WR_PB_LEAVE_LIMIT + * shrink candidates + */ + + if (pb->flags & PB_ACTIVE_WRITER) { + pb->flags &= ~PB_ACTIVE_WRITER; + shrink_nactive++; + if (!shrink_unresolved_end) + shrink_unresolved_end = pb; + } + else { /* inactive */ + Uint unused = pb->val->orig_size - pb->size; + /* Our allocators are 8 byte aligned, i.e., shrinking with + less than 8 bytes will have no real effect */ + if (unused >= 8) { /* A shrink candidate */ + if (shrink_unresolved_end) { + shrink_writable_bin(pb, 0); + } + else if (++shrink_ncandidates > ERTS_INACT_WR_PB_LEAVE_LIMIT) { + shrink_unresolved_end = pb; + shrink_writable_bin(pb, 0); + } + /* else unresolved, handle in second traversal below */ + } + } + prev = &pb->next; + pb = (ProcBin*) pb->next; + } + else { /* emasculated, move to regular off-heap list */ + struct erl_off_heap_header* next = pb->next; + if (on_old_heap) { + pb->next = *insert_old_here; + *insert_old_here = (struct erl_off_heap_header*)pb; + } + else { + pb->next = p->off_heap.first; + p->off_heap.first = (struct erl_off_heap_header*)pb; + if (insert_old_here == &p->off_heap.first) + insert_old_here = &pb->next; + } + pb = (ProcBin*) next; + *prev = next; + } + } + + /* + * Handle any unresolved shrink candidates left at the head of wrt_bins. + */ + if (shrink_ncandidates) { Uint leave_unused = 0; - int i; - if (shrink.no_of_active == 0) { - if (shrink.no_of_candidates <= ERTS_INACT_WR_PB_LEAVE_MUCH_LIMIT) + if (shrink_nactive == 0) { + if (shrink_ncandidates <= ERTS_INACT_WR_PB_LEAVE_MUCH_LIMIT) leave_unused = ERTS_INACT_WR_PB_LEAVE_MUCH_PERCENTAGE; - else if (shrink.no_of_candidates <= ERTS_INACT_WR_PB_LEAVE_LIMIT) - leave_unused = ERTS_INACT_WR_PB_LEAVE_PERCENTAGE; + else if (shrink_ncandidates <= ERTS_INACT_WR_PB_LEAVE_LIMIT) + leave_unused = ERTS_INACT_WR_PB_LEAVE_PERCENTAGE; } - for (i = 0; i < sizeof(candlist)/sizeof(candlist[0]); i++) { - ProcBin* pb; - for (pb = candlist[i]; pb; pb = (ProcBin*)pb->next) { - Uint new_size = pb->size; - - if (leave_unused) { - new_size += (new_size * 100) / leave_unused; - /* Our allocators are 8 byte aligned, i.e., shrinking with - less than 8 bytes will have no real effect */ - if (new_size + 8 >= pb->val->orig_size) - continue; - } - - pb->val = erts_bin_realloc(pb->val, new_size); - pb->bytes = (byte *) pb->val->orig_bytes; - } + for (pb = (ProcBin *)p->wrt_bins; + pb != shrink_unresolved_end; + pb = (ProcBin *)pb->next) { + ASSERT(pb); + ASSERT(pb->flags == PB_IS_WRITABLE); + shrink_writable_bin(pb, leave_unused); } + } - - /* - * We now potentially have the mso list divided into three lists: - * - shrink candidates on new heap (inactive writable with unused data) - * - shrink candidates on old heap (inactive writable with unused data) - * - other binaries (read only + active writable ...) + funs and externals - * - * Put them back together: new candidates -> other -> old candidates - * This order will ensure that the list only refers from new - * generation to old and never from old to new *which is important*. - */ - if (shrink.new_candidates) { - if (prev == &MSO(p).first) /* empty other binaries list */ - prev = &shrink.new_candidates_end->next; - else - shrink.new_candidates_end->next = MSO(p).first; - MSO(p).first = shrink.new_candidates; - } + if (fullsweep) { + ASSERT(BIN_OLD_VHEAP(p) == orig_bin_old_vheap); + BIN_OLD_VHEAP(p) = 0; + BIN_OLD_VHEAP_SZ(p) = next_vheap_size(p, MSO(p).overhead, BIN_OLD_VHEAP_SZ(p)); } - *prev = shrink.old_candidates; + BIN_VHEAP_SZ(p) = next_vheap_size(p, bin_vheap, BIN_VHEAP_SZ(p)); + MSO(p).overhead = bin_vheap; } /* @@ -3282,6 +3322,10 @@ offset_off_heap(Process* p, Sint offs, char* area, Uint area_size) Eterm** uptr = (Eterm**) (void *) &MSO(p).first; *uptr += offs; } + if (p->wrt_bins && ErtsInArea(p->wrt_bins, area, area_size)) { + Eterm** uptr = (Eterm**) (void *) &p->wrt_bins; + *uptr += offs; + } } #ifndef USE_VM_PROBES @@ -3841,11 +3885,13 @@ erts_check_off_heap2(Process *p, Eterm *htop) { Eterm *oheap = (Eterm *) OLD_HEAP(p); Eterm *ohtop = (Eterm *) OLD_HTOP(p); - int old; + enum { NEW_PART, OLD_PART, WRT_BIN_PART} part; union erl_off_heap_ptr u; - old = 0; - for (u.hdr = MSO(p).first; u.hdr; u.hdr = u.hdr->next) { + part = NEW_PART; + u.hdr = MSO(p).first; +repeat: + for (; u.hdr; u.hdr = u.hdr->next) { erts_aint_t refc; switch (thing_subtag(u.hdr->thing_word)) { case REFC_BINARY_SUBTAG: @@ -3871,19 +3917,26 @@ erts_check_off_heap2(Process *p, Eterm *htop) ERTS_CHK_OFFHEAP_ASSERT(!(u.hdr->thing_word & ERTS_OFFHEAP_VISITED_BIT)); u.hdr->thing_word |= ERTS_OFFHEAP_VISITED_BIT; #endif - if (old) { - ERTS_CHK_OFFHEAP_ASSERT(oheap <= u.ep && u.ep < ohtop); - } - else if (oheap <= u.ep && u.ep < ohtop) - old = 1; - else { - ERTS_CHK_OFFHEAP_ASSERT(erts_dbg_within_proc(u.ep, p, htop)); - } + if (part == OLD_PART) + ERTS_CHK_OFFHEAP_ASSERT(oheap <= u.ep && u.ep < ohtop); + else if (part == NEW_PART && oheap <= u.ep && u.ep < ohtop) + part = OLD_PART; + else + ERTS_CHK_OFFHEAP_ASSERT(erts_dbg_within_proc(u.ep, p, htop)); } + if (part != WRT_BIN_PART) { + part = WRT_BIN_PART; + u.hdr = p->wrt_bins; + goto repeat; + } + + #ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_LIST for (u.hdr = MSO(p).first; u.hdr; u.hdr = u.hdr->next) u.hdr->thing_word &= ~ERTS_OFFHEAP_VISITED_BIT; + for (u.hdr = p->wrt_bins; u.hdr; u.hdr = u.hdr->next) + u.hdr->thing_word &= ~ERTS_OFFHEAP_VISITED_BIT; #endif } diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index 86b1b6f6a4..1c61d4ca74 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -161,11 +161,11 @@ erts_resize_message_buffer(ErlHeapFragment *bp, Uint size, void -erts_cleanup_offheap(ErlOffHeap *offheap) +erts_cleanup_offheap_list(struct erl_off_heap_header* first) { union erl_off_heap_ptr u; - for (u.hdr = offheap->first; u.hdr; u.hdr = u.hdr->next) { + for (u.hdr = first; u.hdr; u.hdr = u.hdr->next) { switch (thing_subtag(u.hdr->thing_word)) { case REFC_BINARY_SUBTAG: erts_bin_release(u.pb->val); @@ -187,6 +187,13 @@ erts_cleanup_offheap(ErlOffHeap *offheap) } } +void +erts_cleanup_offheap(ErlOffHeap *offheap) +{ + erts_cleanup_offheap_list(offheap->first); +} + + void free_message_buffer(ErlHeapFragment* bp) { diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h index a4a4ec29db..ab44cbf69d 100644 --- a/erts/emulator/beam/erl_message.h +++ b/erts/emulator/beam/erl_message.h @@ -485,6 +485,7 @@ void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp); Uint erts_msg_attached_data_size_aux(ErtsMessage *msg); +void erts_cleanup_offheap_list(struct erl_off_heap_header* first); void erts_cleanup_offheap(ErlOffHeap *offheap); void erts_save_message_in_proc(Process *p, ErtsMessage *msg); Sint erts_move_messages_off_heap(Process *c_p); diff --git a/erts/emulator/beam/erl_nfunc_sched.h b/erts/emulator/beam/erl_nfunc_sched.h index bd787f5f77..cbe37840e3 100644 --- a/erts/emulator/beam/erl_nfunc_sched.h +++ b/erts/emulator/beam/erl_nfunc_sched.h @@ -248,6 +248,8 @@ erts_flush_dirty_shadow_proc(Process *sproc) } c_p->off_heap.overhead += sproc->off_heap.overhead; + + ASSERT(sproc->wrt_bins == NULL); } ERTS_GLB_INLINE void @@ -272,6 +274,7 @@ erts_cache_dirty_shadow_proc(Process *sproc) sproc->mbuf = NULL; sproc->mbuf_sz = 0; ERTS_INIT_OFF_HEAP(&sproc->off_heap); + sproc->wrt_bins = NULL; } ERTS_GLB_INLINE Process * diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index bec276a393..2b73537b2e 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -643,6 +643,7 @@ void enif_clear_env(ErlNifEnv* env) menv->env.hp = menv->env.hp_end = HEAP_TOP(p); ASSERT(!is_offheap(&MSO(p))); + ASSERT(!p->wrt_bins); } #ifdef DEBUG @@ -5042,6 +5043,9 @@ Eterm erts_nif_call_function(Process *p, Process *tracee, ErlHeapFragment *orig_hf = MBUF(p); ErlOffHeap orig_oh = MSO(p); Eterm *orig_htop = HEAP_TOP(p); +#ifdef DEBUG + struct erl_off_heap_header* orig_wrt_bins = p->wrt_bins; +#endif ASSERT(is_internal_pid(p->common.id)); MBUF(p) = NULL; clear_offheap(&MSO(p)); @@ -5067,6 +5071,7 @@ Eterm erts_nif_call_function(Process *p, Process *tracee, MBUF(p) = orig_hf; MSO(p) = orig_oh; HEAP_TOP(p) = orig_htop; + ASSERT(p->wrt_bins == orig_wrt_bins); } else { /* Nif call was done without a process context, so we create a phony one. */ diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 6b0bebff6f..e230427cdc 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -12021,6 +12021,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). */ p->off_heap.first = NULL; p->off_heap.overhead = 0; + p->wrt_bins = NULL; if (is_not_immed(group_leader)) heap_need += NC_HEAP_SIZE(group_leader); @@ -12597,6 +12598,7 @@ void erts_init_empty_process(Process *p) p->next = NULL; p->off_heap.first = NULL; p->off_heap.overhead = 0; + p->wrt_bins = NULL; p->common.u.alive.reg = NULL; p->heap_sz = 0; p->high_water = NULL; @@ -12719,6 +12721,7 @@ erts_debug_verify_clean_empty_process(Process* p) ASSERT(p->off_heap.first == NULL); ASSERT(p->off_heap.overhead == 0); + ASSERT(p->wrt_bins == NULL); ASSERT(p->mbuf == NULL); } @@ -12730,9 +12733,11 @@ erts_cleanup_empty_process(Process* p) { /* We only check fields that are known to be used... */ - erts_cleanup_offheap(&p->off_heap); + erts_cleanup_offheap_list(p->off_heap.first); p->off_heap.first = NULL; p->off_heap.overhead = 0; + erts_cleanup_offheap_list(p->wrt_bins); + p->wrt_bins = NULL; if (p->mbuf != NULL) { free_message_buffer(p->mbuf); @@ -12786,6 +12791,7 @@ delete_process(Process* p) /* Clean binaries and funs */ erts_cleanup_offheap(&p->off_heap); + erts_cleanup_offheap_list(p->wrt_bins); /* * The mso list should not be used anymore, but if it is, make sure that diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 6509edf947..ba57b19f2f 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -1101,6 +1101,7 @@ struct process { Uint16 gen_gcs; /* Number of (minor) generational GCs. */ Uint16 max_gen_gcs; /* Max minor gen GCs before fullsweep. */ ErlOffHeap off_heap; /* Off-heap data updated by copy_struct(). */ + struct erl_off_heap_header* wrt_bins; /* Writable binaries */ ErlHeapFragment* mbuf; /* Pointer to heap fragment list */ ErlHeapFragment* live_hf_end; ErtsMessage *msg_frag; /* Pointer to message fragment list */ -- 2.31.1
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor