#define PERL_NO_GET_CONTEXT
#include "EXTERN.h"
-#include "perl.h"
+#include "perl.h"
#include "XSUB.h"
#define __PACKAGE__ "Scope::Upper"
# define MY_CXT_CLONE NOOP
#endif
+/* --- Unique context ID global storage ------------------------------------ */
+
+/* ... Sequence ID counter ................................................. */
+
+typedef struct {
+ UV *seqs;
+ STRLEN size;
+} su_uv_array;
+
+STATIC su_uv_array su_uid_seq_counter;
+
+#ifdef USE_ITHREADS
+
+STATIC perl_mutex su_uid_seq_counter_mutex;
+
+#define SU_LOCK(M) MUTEX_LOCK(M)
+#define SU_UNLOCK(M) MUTEX_UNLOCK(M)
+
+#else /* USE_ITHREADS */
+
+#define SU_LOCK(M)
+#define SU_UNLOCK(M)
+
+#endif /* !USE_ITHREADS */
+
+STATIC UV su_uid_seq_next(pTHX_ UV depth) {
+#define su_uid_seq_next(D) su_uid_seq_next(aTHX_ (D))
+ UV seq;
+ UV *seqs;
+
+ SU_LOCK(&su_uid_seq_counter_mutex);
+
+ seqs = su_uid_seq_counter.seqs;
+
+ if (depth >= su_uid_seq_counter.size) {
+ UV i;
+
+ seqs = PerlMemShared_realloc(seqs, (depth + 1) * sizeof(UV));
+ for (i = su_uid_seq_counter.size; i <= depth; ++i)
+ seqs[i] = 0;
+
+ su_uid_seq_counter.seqs = seqs;
+ su_uid_seq_counter.size = depth + 1;
+ }
+
+ seq = ++seqs[depth];
+
+ SU_UNLOCK(&su_uid_seq_counter_mutex);
+
+ return seq;
+}
+
+/* ... UID storage ......................................................... */
+
+typedef struct {
+ UV seq;
+ U32 flags;
+} su_uid;
+
+#define SU_UID_ACTIVE 1
+
+STATIC UV su_uid_depth(pTHX_ I32 cxix) {
+#define su_uid_depth(I) su_uid_depth(aTHX_ (I))
+ const PERL_SI *si;
+ UV depth;
+
+ depth = cxix;
+ for (si = PL_curstackinfo->si_prev; si; si = si->si_prev)
+ depth += si->si_cxix + 1;
+
+ return depth;
+}
+
+typedef struct {
+ su_uid **map;
+ STRLEN used;
+ STRLEN alloc;
+} su_uid_storage;
+
+STATIC void su_uid_storage_dup(pTHX_ su_uid_storage *new_cxt, const su_uid_storage *old_cxt, UV max_depth) {
+#define su_uid_storage_dup(N, O, D) su_uid_storage_dup(aTHX_ (N), (O), (D))
+ su_uid **old_map = old_cxt->map;
+
+ if (old_map) {
+ su_uid **new_map = new_cxt->map;
+ STRLEN old_used = old_cxt->used;
+ STRLEN old_alloc = old_cxt->alloc;
+ STRLEN new_used, new_alloc;
+ STRLEN i;
+
+ new_used = max_depth < old_used ? max_depth : old_used;
+ new_cxt->used = new_used;
+
+ if (new_used <= new_cxt->alloc)
+ new_alloc = new_cxt->alloc;
+ else {
+ new_alloc = new_used;
+ Renew(new_map, new_alloc, su_uid *);
+ for (i = new_cxt->alloc; i < new_alloc; ++i)
+ new_map[i] = NULL;
+ new_cxt->map = new_map;
+ new_cxt->alloc = new_alloc;
+ }
+
+ for (i = 0; i < new_alloc; ++i) {
+ su_uid *new_uid = new_map[i];
+
+ if (i < new_used) { /* => i < max_depth && i < old_used */
+ su_uid *old_uid = old_map[i];
+
+ if (old_uid && (old_uid->flags & SU_UID_ACTIVE)) {
+ if (!new_uid) {
+ Newx(new_uid, 1, su_uid);
+ new_map[i] = new_uid;
+ }
+ *new_uid = *old_uid;
+ continue;
+ }
+ }
+
+ if (new_uid)
+ new_uid->flags &= ~SU_UID_ACTIVE;
+ }
+ }
+
+ return;
+}
+
/* --- unwind() global storage --------------------------------------------- */
typedef struct {
/* --- uplevel() data tokens and global storage ---------------------------- */
+#define SU_UPLEVEL_HIJACKS_RUNOPS SU_HAS_PERL(5, 8, 0)
+
typedef struct {
void *next;
CV *callback;
CV *renamed;
- AV *args;
PERL_SI *si;
PERL_SI *old_curstackinfo;
COP *old_curcop;
- bool old_catch;
- OP *old_op;
+#if SU_UPLEVEL_HIJACKS_RUNOPS
+ runops_proc_t old_runops;
+#endif
+ bool old_catch;
+ OP *old_op;
- OP *goto_op;
- CV *goto_code;
- U32 goto_perldb;
+ su_uid_storage new_uid_storage, old_uid_storage;
} su_uplevel_ud;
STATIC su_uplevel_ud *su_uplevel_ud_new(pTHX) {
Newx(sud, 1, su_uplevel_ud);
sud->next = NULL;
+ sud->new_uid_storage.map = NULL;
+ sud->new_uid_storage.used = 0;
+ sud->new_uid_storage.alloc = 0;
+
Newx(si, 1, PERL_SI);
si->si_stack = newAV();
AvREAL_off(si->si_stack);
Safefree(si->si_cxstack);
SvREFCNT_dec(si->si_stack);
Safefree(si);
+
+ if (sud->new_uid_storage.map) {
+ su_uid **map = sud->new_uid_storage.map;
+ STRLEN alloc = sud->new_uid_storage.alloc;
+ STRLEN i;
+
+ for (i = 0; i < alloc; ++i)
+ Safefree(map[i]);
+
+ Safefree(map);
+ }
+
Safefree(sud);
return;
}
typedef struct {
+ su_uplevel_ud *top;
su_uplevel_ud *root;
I32 count;
} su_uplevel_storage;
char *stack_placeholder;
su_unwind_storage unwind_storage;
su_uplevel_storage uplevel_storage;
+ su_uid_storage uid_storage;
} my_cxt_t;
START_MY_CXT
#define SU_UPLEVEL_SAVE(f, t) STMT_START { sud->old_##f = PL_##f; PL_##f = (t); } STMT_END
#define SU_UPLEVEL_RESTORE(f) STMT_START { PL_##f = sud->old_##f; } STMT_END
-STATIC su_uplevel_ud *su_uplevel_storage_new(pTHX) {
-#define su_uplevel_storage_new() su_uplevel_storage_new(aTHX)
+STATIC su_uplevel_ud *su_uplevel_storage_new(pTHX_ I32 cxix) {
+#define su_uplevel_storage_new(I) su_uplevel_storage_new(aTHX_ (I))
su_uplevel_ud *sud;
+ UV depth;
dMY_CXT;
sud = MY_CXT.uplevel_storage.root;
sud = su_uplevel_ud_new();
}
+ sud->next = MY_CXT.uplevel_storage.top;
+ MY_CXT.uplevel_storage.top = sud;
+
+ depth = su_uid_depth(cxix);
+ su_uid_storage_dup(&sud->new_uid_storage, &MY_CXT.uid_storage, depth);
+ sud->old_uid_storage = MY_CXT.uid_storage;
+ MY_CXT.uid_storage = sud->new_uid_storage;
+
return sud;
}
#define su_uplevel_storage_delete(S) su_uplevel_storage_delete(aTHX_ (S))
dMY_CXT;
+ sud->new_uid_storage = MY_CXT.uid_storage;
+ MY_CXT.uid_storage = sud->old_uid_storage;
+ {
+ su_uid **map;
+ UV i, alloc;
+ map = sud->new_uid_storage.map;
+ alloc = sud->new_uid_storage.alloc;
+ for (i = 0; i < alloc; ++i) {
+ if (map[i])
+ map[i]->flags &= SU_UID_ACTIVE;
+ }
+ }
+ MY_CXT.uplevel_storage.top = sud->next;
+
if (MY_CXT.uplevel_storage.count >= SU_UPLEVEL_STORAGE_SIZE) {
su_uplevel_ud_delete(sud);
} else {
}
}
-#define SU_HAS_EXT_MAGIC SU_HAS_PERL(5, 8, 0)
-
-#if SU_HAS_EXT_MAGIC && !SU_HAS_PERL(5, 13, 7)
+STATIC int su_uplevel_goto_static(const OP *o) {
+ for (; o; o = o->op_sibling) {
+ /* goto ops are unops with kids. */
+ if (!(o->op_flags & OPf_KIDS))
+ continue;
-STATIC int su_uplevel_restore_free(pTHX_ SV *sv, MAGIC *mg) {
- su_uplevel_storage_delete((su_uplevel_ud *) mg->mg_ptr);
+ switch (o->op_type) {
+ case OP_LEAVEEVAL:
+ case OP_LEAVETRY:
+ /* Don't care about gotos inside eval, as they are forbidden at run time. */
+ break;
+ case OP_GOTO:
+ return 1;
+ default:
+ if (su_uplevel_goto_static(cUNOPo->op_first))
+ return 1;
+ break;
+ }
+ }
return 0;
}
-STATIC MGVTBL su_uplevel_restore_vtbl = {
- 0,
- 0,
- 0,
- 0,
- su_uplevel_restore_free
-};
+#if SU_UPLEVEL_HIJACKS_RUNOPS
+
+STATIC int su_uplevel_goto_runops(pTHX) {
+#define su_uplevel_goto_runops() su_uplevel_goto_runops(aTHX)
+ register OP *op;
+ dVAR;
-#endif /* SU_HAS_EXT_MAGIC && !SU_HAS_PERL(5, 13, 7) */
+ op = PL_op;
+ do {
+ if (op->op_type == OP_GOTO) {
+ AV *argarray = NULL;
+ I32 cxix;
+
+ for (cxix = cxstack_ix; cxix >= 0; --cxix) {
+ const PERL_CONTEXT *cx = cxstack + cxix;
+
+ switch (CxTYPE(cx)) {
+ case CXt_SUB:
+ if (CxHASARGS(cx)) {
+ argarray = cx->blk_sub.argarray;
+ goto done;
+ }
+ break;
+ case CXt_EVAL:
+ case CXt_FORMAT:
+ goto done;
+ default:
+ break;
+ }
+ }
+
+done:
+ if (argarray) {
+ dMY_CXT;
+
+ if (MY_CXT.uplevel_storage.top->cxix == cxix) {
+ AV *args = GvAV(PL_defgv);
+ I32 items = AvFILLp(args);
+
+ av_extend(argarray, items);
+ Copy(AvARRAY(args), AvARRAY(argarray), items + 1, SV *);
+ AvFILLp(argarray) = items;
+ }
+ }
+ }
+
+ PL_op = op = op->op_ppaddr(aTHX);
+
+#if !SU_HAS_PERL(5, 13, 0)
+ PERL_ASYNC_CHECK();
+#endif
+ } while (op);
+
+ TAINT_NOT;
+
+ return 0;
+}
+
+#endif /* SU_UPLEVEL_HIJACKS_RUNOPS */
#define su_at_underscore(C) AvARRAY(AvARRAY(CvPADLIST(C))[CvDEPTH(C)])[0]
PERL_SI *cur = sud->old_curstackinfo;
PERL_SI *si = sud->si;
+#if SU_UPLEVEL_HIJACKS_RUNOPS
+ if (PL_runops == su_uplevel_goto_runops)
+ PL_runops = sud->old_runops;
+#endif
+
if (sud->callback) {
PERL_CONTEXT *cx = cxstack + sud->cxix;
AV *argarray = MUTABLE_AV(su_at_underscore(sud->callback));
/* This issue has been fixed in perl with commit 8f89e5a9, which was made
* public in perl 5.13.7. */
su_uplevel_storage_delete(sud);
-#elif SU_HAS_EXT_MAGIC
- /* If 'ext' magic is available, we work around this by attaching the state
- * data to a scalar that will be freed "soon". */
- {
- SV *sv = sv_newmortal();
-
- sv_magicext(sv, NULL, PERL_MAGIC_ext, &su_uplevel_restore_vtbl,
- (const char *) sud, 0);
- }
#else
/* Otherwise, we just enqueue it back in the global storage list. */
{
dMY_CXT;
+ sud->new_uid_storage = MY_CXT.uid_storage;
+ MY_CXT.uid_storage = sud->old_uid_storage;
+
+ MY_CXT.uplevel_storage.top = sud->next;
sud->next = MY_CXT.uplevel_storage.root;
MY_CXT.uplevel_storage.root = sud;
MY_CXT.uplevel_storage.count++;
Perl_sv_add_backref(aTHX_ CvSTASH(proto), MUTABLE_SV(cv));
#endif
- OP_REFCNT_LOCK;
- CvROOT(cv) = OpREFCNT_inc(CvROOT(proto));
- OP_REFCNT_UNLOCK;
- CvSTART(cv) = CvSTART(proto);
+ if (CvISXSUB(proto)) {
+ CvXSUB(cv) = CvXSUB(proto);
+ CvXSUBANY(cv) = CvXSUBANY(proto);
+ } else {
+ OP_REFCNT_LOCK;
+ CvROOT(cv) = OpREFCNT_inc(CvROOT(proto));
+ OP_REFCNT_UNLOCK;
+ CvSTART(cv) = CvSTART(proto);
+ }
CvOUTSIDE(cv) = CvOUTSIDE(proto);
#ifdef CVf_WEAKOUTSIDE
if (!(CvFLAGS(proto) & CVf_WEAKOUTSIDE))
return cv;
}
-#if SU_HAS_PERL(5, 8, 0)
-
-STATIC int su_uplevel_guard_free(pTHX_ SV *sv, MAGIC *mg) {
- MAGIC *omg = (MAGIC *) mg->mg_ptr;
- su_uplevel_ud *sud = (su_uplevel_ud *) omg->mg_ptr;
- AV *args;
-
- /* This code should be triggered by the FREETMPS in the first
- * nextstate/dbstate op of the goto'd code. Its job is to reset the sub
- * arguments to what the uplevel'd code was called with. */
-
- if (PL_op != CvSTART(sud->goto_code))
- croak("su_uplevel_guard_free() was called at an incorrect time");
- sud->goto_code = NULL;
-
- /* get_db_sub() has called save_item() on the SV member of the fake GV we
- * used to replace PL_DBsub, so we can't kill it yet. Since set magic will
- * be called when the item is restored, we save the fake GV so that we can
- * correctly drop its refcount just after the restore. */
- omg->mg_obj = MUTABLE_SV(PL_DBsub);
- PL_DBsub = NULL;
-
- args = sud->args;
- if (args) {
- PERL_CONTEXT *cx;
- I32 items = AvFILLp(args);
- AV *argarray;
- dSP;
-
- EXTEND(SP, items + 2);
- Copy(AvARRAY(args), SP + 1, items + 1, SV *);
-
- cx = cxstack + cxstack_ix;
- argarray = cx->blk_sub.argarray;
- av_extend(argarray, items);
- Copy(AvARRAY(args), AvARRAY(argarray), items + 1, SV *);
- AvFILLp(argarray) = items;
- }
-
- return 0;
-}
-
-STATIC MGVTBL su_uplevel_guard_vtbl = {
- 0,
- 0,
- 0,
- 0,
- su_uplevel_guard_free
-};
-
-STATIC int su_uplevel_dbsv_get(pTHX_ SV *sv, MAGIC *mg) {
- su_uplevel_ud *sud = (su_uplevel_ud *) mg->mg_ptr;
- SV *guard;
-
- /* This code should be called at the very end of pp_goto, after the
- * SAVETMPS enclosing the sub was isseud and the blk_sub.cv member is set.
- * It creates a magical mortal guard that will be destroyed soon at the next
- * FREETMPS. */
-
- if (PL_op != sud->goto_op)
- croak("su_uplevel_dbsv_get() was called at an incorrect time");
- sud->goto_op = NULL;
-
- sud->goto_code = cxstack[cxstack_ix].blk_sub.cv;
- PL_perldb = sud->goto_perldb;
-
- guard = sv_newmortal();
- sv_magicext(guard, 0, PERL_MAGIC_ext, &su_uplevel_guard_vtbl,
- (const char *) mg, 0);
-
- return 0;
-}
-
-STATIC int su_uplevel_dbsv_set(pTHX_ SV *sv, MAGIC *mg) {
- su_uplevel_ud *sud = (su_uplevel_ud *) mg->mg_ptr;
- SV *guard;
-
- /* This handler is supposed to be executed when the saved GvSV(PL_DBsub)
- * is restored, which happens when the goto'd code terminates. Its aim is
- * just to clean up after our hack. */
-
- if (sud->goto_op)
- croak("su_uplevel_dbsv_set() called before su_uplevel_dbsv_get");
- if (sud->goto_code)
- croak("su_uplevel_dbsv_set() called before su_uplevel_goto_2_free");
-
- /* Don't free the current magical SV right now, because the mg_*() calls above
- * us may still need it. */
- sv_2mortal(sv);
- SvREFCNT_dec(mg->mg_obj);
-
- return 0;
-}
-
-STATIC MGVTBL su_uplevel_dbsv_vtbl = {
- su_uplevel_dbsv_get,
- su_uplevel_dbsv_set,
- 0,
- 0,
- 0
-};
-
-#ifndef GvSVn
-# ifdef PERL_DONT_CREATE_GVSV
-# define GvSVn(gv) (*(GvGP(gv)->gp_sv ? \
- &(GvGP(gv)->gp_sv) : \
- &(GvGP(gv_SVadd(gv))->gp_sv)))
-# else
-# define GvSVn(gv) GvSV(gv)
-# endif
-#endif
-
-STATIC void su_uplevel_goto_handler(pTHX_ void *ud_) {
- su_uplevel_ud *sud = ud_;
-
- if (PL_op && PL_op->op_type == OP_GOTO && !PL_DBsub) {
- SV *dbsv;
-
- sud->goto_op = PL_op;
- sud->goto_code = NULL;
- sud->goto_perldb = PL_perldb;
-
- PL_DBsub = (GV *) newSV(0);
- gv_init(PL_DBsub, NULL, "", 0, 0);
- PL_perldb = PERLDBf_SUB;
-
- dbsv = GvSVn(PL_DBsub);
- sv_magicext(dbsv, NULL, PERL_MAGIC_ext, &su_uplevel_dbsv_vtbl,
- (const char *) sud, 0);
- SvREFCNT_inc(dbsv);
- }
-}
-
-#else /* SU_HAS_PERL(5, 8, 0) */
-
-STATIC void su_uplevel_goto_handler(pTHX_ void *ud_) {
- su_uplevel_ud *sud = ud_;
-
- if (PL_op && PL_op->op_type == OP_GOTO) {
- /* Don't let the last sub context in an mixed state while we throw an
- * exception, as this may cause double free errors (the blk_sub.cv member
- * is still the renamed CV). Let our su_uplevel_restore() properly handle the
- * destruction. */
- cxstack[cxstack_ix].blk_sub.cv = NULL;
- croak("Can't goto to an uplevel'd stack frame on perl 5.6");
- }
-}
-
-#endif /* !SU_HAS_PERL(5, 8, 0) */
-
STATIC I32 su_uplevel(pTHX_ CV *callback, I32 cxix, I32 args) {
#define su_uplevel(C, I, A) su_uplevel(aTHX_ (C), (I), (A))
su_uplevel_ud *sud;
old_mark = AvFILLp(PL_curstack) = PL_stack_sp - PL_stack_base;
SPAGAIN;
- sud = su_uplevel_storage_new();
+ sud = su_uplevel_storage_new(cxix);
sud->cxix = cxix;
sud->died = 1;
sud->callback = NULL;
sud->renamed = NULL;
- sud->args = NULL;
SAVEDESTRUCTOR_X(su_uplevel_restore, sud);
si = sud->si;
SU_UPLEVEL_SAVE(op, (OP *) &sub_op);
+#if SU_UPLEVEL_HIJACKS_RUNOPS
+ sud->old_runops = PL_runops;
+#endif
+
sud->old_catch = CATCH_GET;
CATCH_SET(TRUE);
if ((PL_op = PL_ppaddr[OP_ENTERSUB](aTHX))) {
PERL_CONTEXT *sub_cx = cxstack + cxstack_ix;
+ /* If pp_entersub() returns a non-null OP, it means that the callback is not
+ * an XSUB. */
+
sud->callback = MUTABLE_CV(SvREFCNT_inc(callback));
CvDEPTH(callback)++;
} else {
SvREFCNT_inc_simple_void(sub_cx->blk_sub.argarray);
}
- sud->args = GvAV(PL_defgv);
- SAVEDESTRUCTOR_X(su_uplevel_goto_handler, sud);
+ if (su_uplevel_goto_static(CvROOT(renamed))) {
+#if SU_UPLEVEL_HIJACKS_RUNOPS
+ if (PL_runops != PL_runops_std) {
+ if (PL_runops == PL_runops_dbg) {
+ if (PL_debug)
+ croak("uplevel() can't execute code that calls goto when debugging flags are set");
+ } else if (PL_runops != su_uplevel_goto_runops)
+ croak("uplevel() can't execute code that calls goto with a custom runloop");
+ }
- CALLRUNOPS(aTHX);
+ PL_runops = su_uplevel_goto_runops;
+#else /* SU_UPLEVEL_HIJACKS_RUNOPS */
+ croak("uplevel() can't execute code that calls goto before perl 5.8");
+#endif /* !SU_UPLEVEL_HIJACKS_RUNOPS */
+ }
- ret = PL_stack_sp - (PL_stack_base + new_mark);
+ CALLRUNOPS(aTHX);
}
sud->died = 0;
- SPAGAIN;
-
+ ret = PL_stack_sp - (PL_stack_base + new_mark);
if (ret > 0) {
AV *old_stack = sud->old_curstackinfo->si_stack;
AvFILLp(old_stack) += ret;
}
- PUTBACK;
-
LEAVE;
return ret;
}
+/* --- Unique context ID --------------------------------------------------- */
+
+STATIC su_uid *su_uid_storage_fetch(pTHX_ UV depth) {
+#define su_uid_storage_fetch(D) su_uid_storage_fetch(aTHX_ (D))
+ su_uid **map, *uid;
+ STRLEN alloc;
+ dMY_CXT;
+
+ map = MY_CXT.uid_storage.map;
+ alloc = MY_CXT.uid_storage.alloc;
+
+ if (depth >= alloc) {
+ STRLEN i;
+
+ Renew(map, depth + 1, su_uid *);
+ for (i = alloc; i <= depth; ++i)
+ map[i] = NULL;
+
+ MY_CXT.uid_storage.map = map;
+ MY_CXT.uid_storage.alloc = depth + 1;
+ }
+
+ uid = map[depth];
+
+ if (!uid) {
+ Newx(uid, 1, su_uid);
+ uid->seq = 0;
+ uid->flags = 0;
+ map[depth] = uid;
+ }
+
+ if (depth >= MY_CXT.uid_storage.used)
+ MY_CXT.uid_storage.used = depth + 1;
+
+ return uid;
+}
+
+STATIC int su_uid_storage_check(pTHX_ UV depth, UV seq) {
+#define su_uid_storage_check(D, S) su_uid_storage_check(aTHX_ (D), (S))
+ su_uid *uid;
+ dMY_CXT;
+
+ if (depth >= MY_CXT.uid_storage.used)
+ return 0;
+
+ uid = MY_CXT.uid_storage.map[depth];
+
+ return uid && (uid->seq == seq) && (uid->flags & SU_UID_ACTIVE);
+}
+
+STATIC void su_uid_drop(pTHX_ void *ud_) {
+ su_uid *uid = ud_;
+
+ uid->flags &= ~SU_UID_ACTIVE;
+}
+
+STATIC void su_uid_bump(pTHX_ void *ud_) {
+ su_ud_reap *ud = ud_;
+
+ SAVEDESTRUCTOR_X(su_uid_drop, ud->cb);
+}
+
+STATIC SV *su_uid_get(pTHX_ I32 cxix) {
+#define su_uid_get(I) su_uid_get(aTHX_ (I))
+ su_uid *uid;
+ SV *uid_sv;
+ UV depth;
+
+ depth = su_uid_depth(cxix);
+ uid = su_uid_storage_fetch(depth);
+
+ if (!(uid->flags & SU_UID_ACTIVE)) {
+ su_ud_reap *ud;
+
+ uid->seq = su_uid_seq_next(depth);
+ uid->flags |= SU_UID_ACTIVE;
+
+ Newx(ud, 1, su_ud_reap);
+ SU_UD_ORIGIN(ud) = NULL;
+ SU_UD_HANDLER(ud) = su_uid_bump;
+ ud->cb = (SV *) uid;
+ su_init(ud, cxix, SU_SAVE_DESTRUCTOR_SIZE);
+ }
+
+ uid_sv = sv_newmortal();
+ sv_setpvf(uid_sv, "%"UVuf"-%"UVuf, depth, uid->seq);
+ return uid_sv;
+}
+
+#ifdef grok_number
+
+#define su_grok_number(S, L, VP) grok_number((S), (L), (VP))
+
+#else /* grok_number */
+
+#define IS_NUMBER_IN_UV 0x1
+
+STATIC int su_grok_number(pTHX_ const char *s, STRLEN len, UV *valuep) {
+#define su_grok_number(S, L, VP) su_grok_number(aTHX_ (S), (L), (VP))
+ STRLEN i;
+ SV *tmpsv;
+
+ /* This crude check should be good enough for a fallback implementation.
+ * Better be too strict than too lax. */
+ for (i = 0; i < len; ++i) {
+ if (!isDIGIT(s[i]))
+ return 0;
+ }
+
+ tmpsv = sv_newmortal();
+ sv_setpvn(tmpsv, s, len);
+ *valuep = sv_2uv(tmpsv);
+
+ return IS_NUMBER_IN_UV;
+}
+
+#endif /* !grok_number */
+
+STATIC int su_uid_validate(pTHX_ SV *uid) {
+#define su_uid_validate(U) su_uid_validate(aTHX_ (U))
+ const char *s;
+ STRLEN len, p = 0;
+ UV depth, seq;
+ int type;
+
+ s = SvPV_const(uid, len);
+
+ while (p < len && s[p] != '-')
+ ++p;
+ if (p >= len)
+ croak("UID contains only one part");
+
+ type = su_grok_number(s, p, &depth);
+ if (type != IS_NUMBER_IN_UV)
+ croak("First UID part is not an unsigned integer");
+
+ ++p; /* Skip '-'. As we used to have p < len, len - (p + 1) >= 0. */
+
+ type = su_grok_number(s + p, len - p, &seq);
+ if (type != IS_NUMBER_IN_UV)
+ croak("Second UID part is not an unsigned integer");
+
+ return su_uid_storage_check(depth, seq);
+}
+
/* --- Interpreter setup/teardown ------------------------------------------ */
STATIC void su_teardown(pTHX_ void *param) {
su_uplevel_ud *cur;
+ su_uid **map;
dMY_CXT;
+ map = MY_CXT.uid_storage.map;
+ if (map) {
+ STRLEN i;
+ for (i = 0; i < MY_CXT.uid_storage.used; ++i)
+ Safefree(map[i]);
+ Safefree(map);
+ }
+
cur = MY_CXT.uplevel_storage.root;
if (cur) {
su_uplevel_ud *prev;
MY_CXT.unwind_storage.proxy_op.op_type = OP_STUB;
MY_CXT.unwind_storage.proxy_op.op_ppaddr = NULL;
+ MY_CXT.uplevel_storage.top = NULL;
MY_CXT.uplevel_storage.root = NULL;
MY_CXT.uplevel_storage.count = 0;
+ MY_CXT.uid_storage.map = NULL;
+ MY_CXT.uid_storage.used = 0;
+ MY_CXT.uid_storage.alloc = 0;
+
call_atexit(su_teardown, NULL);
return;
{
HV *stash;
+ MUTEX_INIT(&su_uid_seq_counter_mutex);
+
+ su_uid_seq_counter.seqs = NULL;
+ su_uid_seq_counter.size = 0;
+
stash = gv_stashpv(__PACKAGE__, 1);
newCONSTSUB(stash, "TOP", newSViv(0));
newCONSTSUB(stash, "SU_THREADSAFE", newSVuv(SU_THREADSAFE));
void
CLONE(...)
PROTOTYPE: DISABLE
+PREINIT:
+ su_uid_storage new_cxt;
PPCODE:
+ {
+ dMY_CXT;
+ new_cxt.map = NULL;
+ new_cxt.used = 0;
+ new_cxt.alloc = 0;
+ su_uid_storage_dup(&new_cxt, &MY_CXT.uid_storage, MY_CXT.uid_storage.used);
+ }
{
MY_CXT_CLONE;
+ MY_CXT.uplevel_storage.top = NULL;
MY_CXT.uplevel_storage.root = NULL;
MY_CXT.uplevel_storage.count = 0;
+ MY_CXT.uid_storage = new_cxt;
}
XSRETURN(0);
}
} while (--cxix >= 0);
croak("Can't uplevel outside a subroutine");
+
+void
+uid(...)
+PROTOTYPE: ;$
+PREINIT:
+ I32 cxix;
+ SV *uid;
+PPCODE:
+ SU_GET_CONTEXT(0, 0);
+ uid = su_uid_get(cxix);
+ EXTEND(SP, 1);
+ PUSHs(uid);
+ XSRETURN(1);
+
+void
+validate_uid(SV *uid)
+PROTOTYPE: $
+PREINIT:
+ SV *ret;
+PPCODE:
+ ret = su_uid_validate(uid) ? &PL_sv_yes : &PL_sv_no;
+ EXTEND(SP, 1);
+ PUSHs(ret);
+ XSRETURN(1);