#if SU_DEBUG
# define SU_D(X) STMT_START X STMT_END
+static void su_debug_log(const char *fmt, ...) {
+ va_list va;
+ SV *sv;
+ dTHX;
+ va_start(va, fmt);
+ sv = get_sv(__PACKAGE__ "::DEBUG", 0);
+ if (sv && SvTRUE(sv))
+ PerlIO_vprintf(Perl_debug_log, fmt, va);
+ va_end(va);
+ return;
+}
#else
# define SU_D(X)
#endif
# define MY_CXT_CLONE NOOP
#endif
+/* --- Error messages ------------------------------------------------------ */
+
+static const char su_stack_smash[] = "Cannot target a scope outside of the current stack";
+static const char su_no_such_target[] = "No targetable %s scope in the current stack";
+
/* --- Unique context ID global storage ------------------------------------ */
/* ... Sequence ID counter ................................................. */
}
typedef struct {
- su_uid **map;
- STRLEN used;
- STRLEN alloc;
+ su_uid *map;
+ STRLEN used;
+ STRLEN alloc;
} su_uid_storage;
static void su_uid_storage_dup(pTHX_ su_uid_storage *new_cxt, const su_uid_storage *old_cxt, UV max_depth) {
#define su_uid_storage_dup(N, O, D) su_uid_storage_dup(aTHX_ (N), (O), (D))
- su_uid **old_map = old_cxt->map;
+ su_uid *old_map = old_cxt->map;
if (old_map) {
- su_uid **new_map = new_cxt->map;
- STRLEN old_used = old_cxt->used;
- STRLEN new_used, new_alloc;
- STRLEN i;
+ su_uid *new_map = new_cxt->map;
+ STRLEN old_used = old_cxt->used;
+ STRLEN new_used, new_alloc;
+ STRLEN i;
- new_used = max_depth < old_used ? max_depth : old_used;
+ new_used = max_depth < old_used ? max_depth : old_used;
new_cxt->used = new_used;
- if (new_used <= new_cxt->alloc)
- new_alloc = new_cxt->alloc;
- else {
- new_alloc = new_used;
- Renew(new_map, new_alloc, su_uid *);
- for (i = new_cxt->alloc; i < new_alloc; ++i)
- new_map[i] = NULL;
+ if (new_used <= new_cxt->alloc) {
+ new_alloc = new_cxt->alloc;
+ } else {
+ new_alloc = new_used;
+ Renew(new_map, new_alloc, su_uid);
new_cxt->map = new_map;
new_cxt->alloc = new_alloc;
}
for (i = 0; i < new_alloc; ++i) {
- su_uid *new_uid = new_map[i];
+ su_uid *new_uid = new_map + i;
if (i < new_used) { /* => i < max_depth && i < old_used */
- su_uid *old_uid = old_map[i];
+ su_uid *old_uid = old_map + i;
if (old_uid && (old_uid->flags & SU_UID_ACTIVE)) {
- if (!new_uid) {
- Newx(new_uid, 1, su_uid);
- new_map[i] = new_uid;
- }
*new_uid = *old_uid;
continue;
}
}
- if (new_uid)
- new_uid->flags &= ~SU_UID_ACTIVE;
+ new_uid->seq = 0;
+ new_uid->flags = 0;
}
}
SvREFCNT_dec(si->si_stack);
Safefree(si);
- if (sud->tmp_uid_storage.map) {
- su_uid **map = sud->tmp_uid_storage.map;
- STRLEN alloc = sud->tmp_uid_storage.alloc;
- STRLEN i;
-
- for (i = 0; i < alloc; ++i)
- Safefree(map[i]);
-
- Safefree(map);
- }
+ Safefree(sud->tmp_uid_storage.map);
Safefree(sud);
dSP;
- SU_D({
- PerlIO_printf(Perl_debug_log,
- "@@@ call scope_ix=%2d save_ix=%2d\n",
- PL_scopestack_ix, PL_savestack_ix);
- });
+ SU_D(su_debug_log("@@@ call scope_ix=%2d save_ix=%2d\n",
+ PL_scopestack_ix, PL_savestack_ix));
ENTER;
SAVETMPS;
SU_D({
SV *z = newSV(0);
SvUPGRADE(z, t);
- PerlIO_printf(Perl_debug_log, "%p: === localize a %s\n",ud, sv_reftype(z, 0));
- PerlIO_printf(Perl_debug_log,
- "%p: depth=%2d scope_ix=%2d save_ix=%2d\n",
- ud, SU_UD_DEPTH(ud), PL_scopestack_ix, PL_savestack_ix);
+ su_debug_log("%p: === localize a %s\n",ud, sv_reftype(z, 0));
+ su_debug_log("%p: depth=%2d scope_ix=%2d save_ix=%2d\n",
+ ud, SU_UD_DEPTH(ud), PL_scopestack_ix, PL_savestack_ix);
SvREFCNT_dec(z);
});
/* ... Unique context ID ................................................... */
+/* We must pass the index because MY_CXT.uid_storage might be reallocated
+ * between the UID fetch and the invalidation at the end of scope. */
+
typedef struct {
su_ud_common ci;
- su_uid *uid;
+ I32 idx;
} su_ud_uid;
-#define SU_UD_UID_UID(U) (((su_ud_uid *) (U))->uid)
-
static void su_uid_drop(pTHX_ void *ud_) {
- su_uid *uid = ud_;
+ su_ud_uid *ud = ud_;
+ dMY_CXT;
- uid->flags &= ~SU_UID_ACTIVE;
+ MY_CXT.uid_storage.map[ud->idx].flags &= ~SU_UID_ACTIVE;
+
+ SU_UD_FREE(ud);
return;
}
I32 depth, base, mark, *origin;
depth = SU_UD_DEPTH(ud);
- SU_D(
- PerlIO_printf(Perl_debug_log,
- "%p: --- pop a %s\n"
- "%p: leave scope at depth=%2d scope_ix=%2d cur_top=%2d cur_base=%2d\n",
- ud, SU_CXNAME(cxstack + cxstack_ix),
- ud, depth, PL_scopestack_ix,PL_savestack_ix,PL_scopestack[PL_scopestack_ix])
- );
+ SU_D(su_debug_log(
+ "%p: --- pop a %s\n"
+ "%p: leave scope at depth=%2d scope_ix=%2d cur_top=%2d cur_base=%2d\n",
+ ud, SU_CXNAME(cxstack + cxstack_ix),
+ ud, depth, PL_scopestack_ix,PL_savestack_ix,PL_scopestack[PL_scopestack_ix]
+ ));
origin = SU_UD_ORIGIN(ud);
mark = origin[depth];
base = origin[depth - 1];
- SU_D(PerlIO_printf(Perl_debug_log,
- "%p: original scope was %*c top=%2d base=%2d\n",
- ud, 24, ' ', mark, base));
+ SU_D(su_debug_log("%p: original scope was %*c top=%2d base=%2d\n",
+ ud, 24, ' ', mark, base));
if (base < mark) {
#if SU_HAS_PERL(5, 19, 4)
PERL_CONTEXT *cx;
#endif
- SU_D(PerlIO_printf(Perl_debug_log, "%p: clear leftovers\n", ud));
+ SU_D(su_debug_log("%p: clear leftovers\n", ud));
#if SU_HAS_PERL(5, 19, 4)
cx = cxstack + cxstack_ix;
if ((pad = SU_UD_PAD(ud)) > 0) {
dMY_CXT;
do {
- SU_D(PerlIO_printf(Perl_debug_log,
+ SU_D(su_debug_log(
"%p: push a pad slot at depth=%2d scope_ix=%2d save_ix=%2d\n",
ud, depth, PL_scopestack_ix, PL_savestack_ix));
SU_SAVE_PLACEHOLDER();
} while (--pad);
}
- SU_D(PerlIO_printf(Perl_debug_log,
- "%p: push destructor at depth=%2d scope_ix=%2d save_ix=%2d\n",
- ud, depth, PL_scopestack_ix, PL_savestack_ix));
+ SU_D(su_debug_log(
+ "%p: push destructor at depth=%2d scope_ix=%2d save_ix=%2d\n",
+ ud, depth, PL_scopestack_ix, PL_savestack_ix));
SAVEDESTRUCTOR_X(su_pop, ud);
} else {
switch (SU_UD_TYPE(ud)) {
case SU_UD_TYPE_REAP: {
- SU_D({
- PerlIO_printf(Perl_debug_log,
- "%p: === reap\n%p: depth=%2d scope_ix=%2d save_ix=%2d\n",
- ud, ud, SU_UD_DEPTH(ud), PL_scopestack_ix, PL_savestack_ix);
- });
+ SU_D(su_debug_log("%p: === reap\n%p: depth=%2d scope_ix=%2d save_ix=%2d\n",
+ ud, ud, SU_UD_DEPTH(ud), PL_scopestack_ix, PL_savestack_ix));
SAVEDESTRUCTOR_X(su_call, SU_UD_REAP_CB(ud));
SU_UD_FREE(ud);
break;
SU_UD_LOCALIZE_FREE(ud);
break;
case SU_UD_TYPE_UID:
- SAVEDESTRUCTOR_X(su_uid_drop, SU_UD_UID_UID(ud));
- SU_UD_FREE(ud);
+ SAVEDESTRUCTOR_X(su_uid_drop, ud);
break;
}
}
- SU_D(PerlIO_printf(Perl_debug_log,
- "%p: --- end pop: cur_top=%2d == cur_base=%2d\n",
- ud, PL_savestack_ix, PL_scopestack[PL_scopestack_ix]));
+ SU_D(su_debug_log("%p: --- end pop: cur_top=%2d == cur_base=%2d\n",
+ ud, PL_savestack_ix, PL_scopestack[PL_scopestack_ix]));
}
/* --- Initialize the stack and the action userdata ------------------------ */
I32 i, depth, offset, base, *origin;
U8 pad;
- SU_D(PerlIO_printf(Perl_debug_log, "%p: ### init for cx %d\n", ud, cxix));
-
- if (size <= SU_SAVE_DESTRUCTOR_SIZE)
+ SU_D(su_debug_log("%p: ### init for cx %d\n", ud, cxix));
+
+ /* su_pop() is going to be called from leave_scope(), so before pushing the
+ * next callback, we'll want to flush the current scope stack slice first.
+ * However, if we want the next callback not to be processed immediately by
+ * the current leave_scope(), we'll need to hide it by artificially
+ * incrementing the scope stack marker before. For the intermediate bumps,
+ * we will only need a bump of SU_SAVE_DESTRUCTOR_SIZE items, but for the
+ * last one we will need a bump of size items. However, in order to preserve
+ * the natural ordering between scope stack markers, we cannot bump lower
+ * markers more than higher ones. This is why we bump the intermediate markers
+ * by the smallest multiple of SU_SAVE_PLACEHOLDER_SIZE greater or equal to
+ * max(SU_SAVE_DESTRUCTOR_SIZE, size). */
+
+ if (size <= SU_SAVE_DESTRUCTOR_SIZE) {
pad = 0;
- else {
+ } else {
I32 extra = size - SU_SAVE_DESTRUCTOR_SIZE;
pad = extra / SU_SAVE_PLACEHOLDER_SIZE;
if (extra % SU_SAVE_PLACEHOLDER_SIZE)
++pad;
}
offset = SU_SAVE_DESTRUCTOR_SIZE + SU_SAVE_PLACEHOLDER_SIZE * pad;
-
- SU_D(PerlIO_printf(Perl_debug_log, "%p: size=%d pad=%d offset=%d\n",
- ud, size, pad, offset));
+ SU_D(su_debug_log("%p: size=%d pad=%d offset=%d\n", ud, size, pad, offset));
depth = PL_scopestack_ix - cxstack[cxix].blk_oldscopesp;
- SU_D(PerlIO_printf(Perl_debug_log, "%p: going down to depth %d\n", ud, depth));
+ SU_D(su_debug_log("%p: going down to depth %d\n", ud, depth));
+
+ /* We need to bump all the intermediary stack markers just in case an
+ * exception is thrown before the target scope is reached. Indeed, in this
+ * case there might be arbitrary many scope frames flushed at the same time,
+ * and since we cannot know in advance whether this will happen or not, we
+ * have to make sure the final frame is protected for the actual action. But
+ * of course, in order to do that, we also need to bump all the previous stack
+ * markers. If not for this, it should have been possible to just bump the two
+ * next frames in su_pop(). */
Newx(origin, depth + 1, I32);
base = PL_scopestack_ix - depth;
PL_scopestack[base] += size;
for (i = 1; i < depth; ++i) {
I32 j = i + base;
+ /* origin[depth - i] == PL_scopestack[PL_scopestack_ix - i] */
origin[i] = PL_scopestack[j];
PL_scopestack[j] += offset;
}
<= PL_scopestack[PL_scopestack_ix - 1]) {
dMY_CXT;
do {
- SU_D(PerlIO_printf(Perl_debug_log,
- "%p: push a fake slot at scope_ix=%2d save_ix=%2d\n",
- ud, PL_scopestack_ix, PL_savestack_ix));
+ SU_D(su_debug_log("%p: push a fake slot at scope_ix=%2d save_ix=%2d\n",
+ ud, PL_scopestack_ix, PL_savestack_ix));
SU_SAVE_PLACEHOLDER();
} while (PL_savestack_ix + SU_SAVE_DESTRUCTOR_SIZE
<= PL_scopestack[PL_scopestack_ix - 1]);
}
- SU_D(PerlIO_printf(Perl_debug_log,
- "%p: push first destructor at scope_ix=%2d save_ix=%2d\n",
- ud, PL_scopestack_ix, PL_savestack_ix));
+ SU_D(su_debug_log("%p: push first destructor at scope_ix=%2d save_ix=%2d\n",
+ ud, PL_scopestack_ix, PL_savestack_ix));
SAVEDESTRUCTOR_X(su_pop, ud);
SU_D({
for (i = 0; i <= depth; ++i) {
I32 j = PL_scopestack_ix - i;
- PerlIO_printf(Perl_debug_log,
- "%p: depth=%2d scope_ix=%2d saved_floor=%2d new_floor=%2d\n",
- ud, i, j, origin[depth - i],
+ su_debug_log("%p: depth=%2d scope_ix=%2d saved_floor=%2d new_floor=%2d\n",
+ ud, i, j, origin[depth - i],
i == 0 ? PL_savestack_ix : PL_scopestack[j]);
}
});
SU_D({
I32 gimme = GIMME_V;
- PerlIO_printf(Perl_debug_log,
- "%p: cx=%d gimme=%s items=%d sp=%d oldmark=%d mark=%d\n",
+ su_debug_log("%p: cx=%d gimme=%s items=%d sp=%d oldmark=%d mark=%d\n",
&MY_CXT, cxix,
gimme == G_VOID ? "void" : gimme == G_ARRAY ? "list" : "scalar",
items, PL_stack_sp - PL_stack_base, *PL_markstack_ptr, mark);
sud->tmp_uid_storage = MY_CXT.uid_storage;
MY_CXT.uid_storage = sud->old_uid_storage;
{
- su_uid **map;
- UV i, alloc;
+ su_uid *map;
+ STRLEN i, alloc;
map = sud->tmp_uid_storage.map;
alloc = sud->tmp_uid_storage.alloc;
- for (i = 0; i < alloc; ++i) {
- if (map[i])
- map[i]->flags &= SU_UID_ACTIVE;
- }
+ for (i = 0; i < alloc; ++i)
+ map[i].flags &= ~SU_UID_ACTIVE;
}
MY_CXT.uplevel_storage.top = sud->next;
static su_uid *su_uid_storage_fetch(pTHX_ UV depth) {
#define su_uid_storage_fetch(D) su_uid_storage_fetch(aTHX_ (D))
- su_uid **map, *uid;
- STRLEN alloc;
+ su_uid *map;
+ STRLEN alloc;
dMY_CXT;
map = MY_CXT.uid_storage.map;
if (depth >= alloc) {
STRLEN i;
- Renew(map, depth + 1, su_uid *);
- for (i = alloc; i <= depth; ++i)
- map[i] = NULL;
+ Renew(map, depth + 1, su_uid);
+ for (i = alloc; i <= depth; ++i) {
+ map[i].seq = 0;
+ map[i].flags = 0;
+ }
MY_CXT.uid_storage.map = map;
MY_CXT.uid_storage.alloc = depth + 1;
}
- uid = map[depth];
-
- if (!uid) {
- Newx(uid, 1, su_uid);
- uid->seq = 0;
- uid->flags = 0;
- map[depth] = uid;
- }
-
if (depth >= MY_CXT.uid_storage.used)
MY_CXT.uid_storage.used = depth + 1;
- return uid;
+ return map + depth;
}
static int su_uid_storage_check(pTHX_ UV depth, UV seq) {
if (depth >= MY_CXT.uid_storage.used)
return 0;
- uid = MY_CXT.uid_storage.map[depth];
+ uid = MY_CXT.uid_storage.map + depth;
- return uid && (uid->seq == seq) && (uid->flags & SU_UID_ACTIVE);
+ return (uid->seq == seq) && (uid->flags & SU_UID_ACTIVE);
}
static SV *su_uid_get(pTHX_ I32 cxix) {
#define su_uid_get(I) su_uid_get(aTHX_ (I))
su_uid *uid;
- SV *uid_sv;
- UV depth;
+ SV *uid_sv;
+ UV depth;
depth = su_uid_depth(cxix);
uid = su_uid_storage_fetch(depth);
Newx(ud, 1, su_ud_uid);
SU_UD_TYPE(ud) = SU_UD_TYPE_UID;
- ud->uid = uid;
+ ud->idx = depth;
su_init(ud, cxix, SU_SAVE_DESTRUCTOR_SIZE);
}
uid_sv = sv_newmortal();
sv_setpvf(uid_sv, "%"UVuf"-%"UVuf, depth, uid->seq);
+
return uid_sv;
}
static void su_local_teardown(pTHX_ void *param) {
su_uplevel_ud *cur;
- su_uid **map;
dMY_CXT;
- map = MY_CXT.uid_storage.map;
- if (map) {
- STRLEN i;
- for (i = 0; i < MY_CXT.uid_storage.used; ++i)
- Safefree(map[i]);
- Safefree(map);
- }
+ Safefree(MY_CXT.uid_storage.map);
cur = MY_CXT.uplevel_storage.root;
if (cur) {
--cxix;
cxix = su_context_skip_db(cxix);
cxix = su_context_normalize_up(cxix);
+ } else {
+ warn(su_stack_smash);
}
EXTEND(SP, 1);
mPUSHi(cxix);
XSRETURN(1);
}
}
+ warn(su_no_such_target, "subroutine");
XSRETURN_UNDEF;
void
XSRETURN(1);
}
}
+ warn(su_no_such_target, "eval");
XSRETURN_UNDEF;
void
SU_GET_LEVEL(0, 0);
cxix = su_context_here();
while (--level >= 0) {
- if (cxix <= 0)
+ if (cxix <= 0) {
+ warn(su_stack_smash);
break;
+ }
--cxix;
cxix = su_context_skip_db(cxix);
cxix = su_context_normalize_up(cxix);
}
}
done:
+ if (level >= 0)
+ warn(su_stack_smash);
EXTEND(SP, 1);
mPUSHi(cxix);
XSRETURN(1);