+/* --- Unique context ID global storage ------------------------------------ */
+
+/* ... Sequence ID counter ................................................. */
+
+typedef struct {
+ UV *seqs;
+ STRLEN size;
+} su_uv_array;
+
+STATIC su_uv_array su_uid_seq_counter;
+
+#ifdef USE_ITHREADS
+
+STATIC perl_mutex su_uid_seq_counter_mutex;
+
+#define SU_LOCK(M) MUTEX_LOCK(M)
+#define SU_UNLOCK(M) MUTEX_UNLOCK(M)
+
+#else /* USE_ITHREADS */
+
+#define SU_LOCK(M)
+#define SU_UNLOCK(M)
+
+#endif /* !USE_ITHREADS */
+
+STATIC UV su_uid_seq_next(pTHX_ UV depth) {
+#define su_uid_seq_next(D) su_uid_seq_next(aTHX_ (D))
+ UV seq;
+ UV *seqs;
+
+ SU_LOCK(&su_uid_seq_counter_mutex);
+
+ seqs = su_uid_seq_counter.seqs;
+
+ if (depth >= su_uid_seq_counter.size) {
+ UV i;
+
+ seqs = PerlMemShared_realloc(seqs, (depth + 1) * sizeof(UV));
+ for (i = su_uid_seq_counter.size; i <= depth; ++i)
+ seqs[i] = 0;
+
+ su_uid_seq_counter.seqs = seqs;
+ su_uid_seq_counter.size = depth + 1;
+ }
+
+ seq = ++seqs[depth];
+
+ SU_UNLOCK(&su_uid_seq_counter_mutex);
+
+ return seq;
+}
+
+/* ... UID storage ......................................................... */
+
+typedef struct {
+ UV seq;
+ U32 flags;
+} su_uid;
+
+#define SU_UID_ACTIVE 1
+
+STATIC UV su_uid_depth(pTHX_ I32 cxix) {
+#define su_uid_depth(I) su_uid_depth(aTHX_ (I))
+ const PERL_SI *si;
+ UV depth;
+
+ depth = cxix;
+ for (si = PL_curstackinfo->si_prev; si; si = si->si_prev)
+ depth += si->si_cxix + 1;
+
+ return depth;
+}
+
+typedef struct {
+ su_uid **map;
+ STRLEN used;
+ STRLEN alloc;
+} su_uid_storage;
+
+STATIC void su_uid_storage_dup(pTHX_ su_uid_storage *new_cxt, const su_uid_storage *old_cxt, UV max_depth) {
+#define su_uid_storage_dup(N, O, D) su_uid_storage_dup(aTHX_ (N), (O), (D))
+ su_uid **old_map = old_cxt->map;
+
+ if (old_map) {
+ su_uid **new_map = new_cxt->map;
+ STRLEN old_used = old_cxt->used;
+ STRLEN old_alloc = old_cxt->alloc;
+ STRLEN new_used, new_alloc;
+ STRLEN i;
+
+ new_used = max_depth < old_used ? max_depth : old_used;
+ new_cxt->used = new_used;
+
+ if (new_used <= new_cxt->alloc)
+ new_alloc = new_cxt->alloc;
+ else {
+ new_alloc = new_used;
+ Renew(new_map, new_alloc, su_uid *);
+ for (i = new_cxt->alloc; i < new_alloc; ++i)
+ new_map[i] = NULL;
+ new_cxt->map = new_map;
+ new_cxt->alloc = new_alloc;
+ }
+
+ for (i = 0; i < new_alloc; ++i) {
+ su_uid *new_uid = new_map[i];
+
+ if (i < new_used) { /* => i < max_depth && i < old_used */
+ su_uid *old_uid = old_map[i];
+
+ if (old_uid && (old_uid->flags & SU_UID_ACTIVE)) {
+ if (!new_uid) {
+ Newx(new_uid, 1, su_uid);
+ new_map[i] = new_uid;
+ }
+ *new_uid = *old_uid;
+ continue;
+ }
+ }
+
+ if (new_uid)
+ new_uid->flags &= ~SU_UID_ACTIVE;
+ }
+ }
+
+ return;
+}
+