--- /dev/null
+blib*
+pm_to_blib*
+
+Makefile{,.old}
+Build
+_build*
+
+*.tar.gz
+Scalar-Vec-Util-*
+
+core.*
+*.{c,o,so,bs,out,def,exp}
+
+cover_db
+*.{gcda,gcov,gcno}
+
--- /dev/null
+Revision history for Scalar-Vec-Util
+
+0.01 2008-05-08 17:15 UTC
+ First version, released on an unsuspecting world.
+
--- /dev/null
+Changes
+MANIFEST
+Makefile.PL
+README
+Util.xs
+bitvect.h
+lib/Scalar/Vec/Util.pm
+samples/bench.pl
+t/00-load.t
+t/01-import.t
+t/02-pp.t
+t/03-size.t
+t/10-veq-pp.t
+t/11-veq.t
+t/12-veq-long.t
+t/20-vfill-pp.t
+t/21-vfill.t
+t/22-vfill-long.t
+t/30-vcopy-pp.t
+t/31-vcopy-copy.t
+t/32-vcopy-move.t
+t/33-vcopy-long.t
+t/90-boilerplate.t
+t/91-pod.t
+t/92-pod-coverage.t
+t/95-portability-files.t
+t/99-kwalitee.t
+META.yml Module meta-data (added by MakeMaker)
--- /dev/null
+--- #YAML:1.0
+name: Scalar-Vec-Util
+version: 0.01
+abstract: Utility routines for vec strings.
+license: perl
+author:
+ - Vincent Pit <perl@profvince.com>
+generated_by: ExtUtils::MakeMaker version 6.42
+distribution_type: module
+requires:
+ Carp: 0
+ Exporter: 0
+ XSLoader: 0
+meta-spec:
+ url: http://module-build.sourceforge.net/META-spec-v1.3.html
+ version: 1.3
+build_requires:
+ Config: 0
+ ExtUtils::MakeMaker: 0
+ Test::More: 0
--- /dev/null
+use strict;
+use warnings;
+use ExtUtils::MakeMaker;
+
+BEGIN {
+ eval { require Config };
+ die 'OS unsupported' if $@;
+ Config->import(qw/%Config/);
+ eval { require File::Spec };
+ die 'OS unsupported' if $@;
+}
+
+# Inspired from Module::Install::Can
+print "Checking for a valid C compiler in the PATH... ";
+my @ccs = ($Config{cc});
+unshift @ccs, $ENV{CC} if $ENV{CC};
+my $cc;
+CC:
+for my $c (@ccs) {
+ for my $dir (split /$Config{path_sep}/, $ENV{PATH}) {
+ my $abs = File::Spec->catfile($dir, $c);
+ if (-x $abs or MM->maybe_command($abs)) {
+ $cc = $c;
+ last CC;
+ }
+ }
+}
+my @C;
+if ($cc) {
+ push @C, 'Util.c';
+ print $cc, "\n";
+} else {
+ print "none\n";
+}
+
+my $arch = $Config{archname} || '';
+my ($cpu) = $arch =~ /^([^-]+)/;
+
+my @DEFINES;
+my $unit;
+if (unpack("h*", pack("s", 0x1234)) != 4321) {
+ print "Forcing unit size of 8 on non-little-endian systems.\n";
+ $unit = 8;
+} else {
+ my $align = int($Config{alignbytes} || 0);
+ print "Checking unit size in bits... ";
+ for (8, 16, 32, 64) {
+ my $size = int($Config{'u' . $_ . 'size'} || 0);
+ $unit = $_ if $size && $size <= $align;
+ }
+ print $unit, "\n";
+}
+push @DEFINES, DEFINE => '-DBV_UNIT="'
+ . ($unit == 64 ? 'uint64_t' : 'U' . $unit)
+ . '"'
+ . ' -DSVU_SIZE=' . $unit;
+
+my $BUILD_REQUIRES = {
+ 'Config' => 0,
+ 'ExtUtils::MakeMaker' => 0,
+ 'Test::More' => 0,
+};
+
+sub build_req {
+ my $tometa = ' >> $(DISTVNAME)/META.yml;';
+ my $build_req = 'echo "build_requires:" ' . $tometa;
+ foreach my $mod ( sort { lc $a cmp lc $b } keys %$BUILD_REQUIRES ) {
+ my $ver = $BUILD_REQUIRES->{$mod};
+ $build_req .= sprintf 'echo " %-30s %s" %s', "$mod:", $ver, $tometa;
+ }
+ return $build_req;
+}
+
+WriteMakefile(
+ NAME => 'Scalar::Vec::Util',
+ AUTHOR => 'Vincent Pit <perl@profvince.com>',
+ LICENSE => 'perl',
+ VERSION_FROM => 'lib/Scalar/Vec/Util.pm',
+ ABSTRACT_FROM => 'lib/Scalar/Vec/Util.pm',
+ PL_FILES => {},
+ C => \@C,
+ @DEFINES,
+ PREREQ_PM => {
+ 'Exporter' => 0,
+ 'Carp' => 0,
+ 'XSLoader' => 0
+ },
+ dist => {
+ PREOP => 'pod2text lib/Scalar/Vec/Util.pm > $(DISTVNAME)/README; '
+ . build_req,
+ COMPRESS => 'gzip -9f', SUFFIX => 'gz'
+ },
+ clean => { FILES => 'Scalar-Vec-Util-* *.gcov *.gcda *.gcno cover_db' },
+);
+
+1;
+
+package MY;
+
+sub postamble {
+ my $cv = join ' -coverage ', 'cover',
+ qw/statement branch condition path subroutine time/;
+ <<POSTAMBLE;
+cover test_cover:
+ $cv -test
+POSTAMBLE
+}
--- /dev/null
+NAME
+ Scalar::Vec::Util - Utility routines for vec strings.
+
+VERSION
+ Version 0.01
+
+SYNOPSIS
+ use Scalar::Vec::Util qw/vfill vcopy veq/;
+
+ my $s;
+ vfill $s, 0, 100, 1; # Fill with 100 bits 1 starting at 0.
+ my $t;
+ vcopy $s, 20, $t, 10, 30; # Copy 30 bits from $s, starting at 20,
+ # to $t, starting at 10.
+ vcopy $t, 10, $t, 20, 30; # Overalapping areas DWIM.
+ if (veq $t, 10, $t, 20, 30) { ... } # Yes, they are equal now.
+
+DESCRIPTION
+ A set of utilities to manipulate bits in vec strings. Highly optimized
+ XS routines are used when available, but straightforward pure perl
+ replacements are also provided for platforms without a C compiler.
+
+ This module doesn't reimplement bit vectors. It can be used on the very
+ same scalars that "vec" builds, or actually on any Perl string
+ ("SVt_PV").
+
+CONSTANTS
+ "SVU_PP"
+ True when pure perl fallbacks are used instead of XS functions.
+
+ "SVU_SIZE"
+ Size in bits of the unit used for moves. The higher this value is, the
+ faster the XS functions are. It's usually "CHAR_BIT *
+ $Config{alignbytes}", except on non-little-endian architectures where it
+ currently falls back to "CHAR_BIT" (e.g. SPARC).
+
+FUNCTIONS
+ "vfill $vec, $start, $length, $bit"
+ Starting at $start in $vec, fills $length bits with $bit. Grows $vec if
+ necessary.
+
+ "vcopy $from => $from_start, $to => $to_start, $length"
+ Copies $length bits starting at $from_start in $from to $to_start in
+ $to. If "$from_start + $length" is too long for $from, zeros are copied
+ past $length. Grows $to if necessary.
+
+ "veq $v1 => $v1_start, $v2 => $v2_start, $length"
+ Returns true if the $length bits starting at $v1_start in $v1 and
+ $v2_start in $v2 are equal, and false otherwise. If needed, $length is
+ decreased to fit inside $v1 and $v2 boundaries.
+
+EXPORT
+ The functions "vfill", "vcopy" and "veq" are only exported on request.
+ All of them are exported by the tags ':funcs' and ':all'.
+
+ The constants "SVU_PP" and "SVU_SIZE" are also only exported on request.
+ They are all exported by the tags ':consts' and ':all'.
+
+CAVEATS
+ Please report architectures where we can't use the alignment as the move
+ unit. I'll add exceptions for them.
+
+DEPENDENCIES
+ Carp, Exporter (core modules since perl 5), XSLoader (since perl 5.006).
+
+SEE ALSO
+ Bit::Vector gives a complete reimplementation of bit vectors.
+
+AUTHOR
+ Vincent Pit, "<perl at profvince.com>", <http://www.profvince.com>.
+
+ You can contact me by mail or on #perl @ FreeNode (vincent or
+ Prof_Vince).
+
+BUGS
+ Please report any bugs or feature requests to "bug-scalar-vec-util at
+ rt.cpan.org", or through the web interface at
+ <http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Scalar-Vec-Util>. I will
+ be notified, and then you'll automatically be notified of progress on
+ your bug as I make changes.
+
+SUPPORT
+ You can find documentation for this module with the perldoc command.
+
+ perldoc Scalar::Vec::Util
+
+ Tests code coverage report is available at
+ <http://www.profvince.com/perl/cover/Scalar-Vec-Util>.
+
+COPYRIGHT & LICENSE
+ Copyright 2008 Vincent Pit, all rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the same terms as Perl itself.
+
--- /dev/null
+/* This file is part of the Scalar::Vec::Util Perl module.
+ * See http://search.cpan.org/dist/Scalar-Vec-Util/ */
+
+#define PERL_NO_GET_CONTEXT
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+
+#define __PACKAGE__ "Scalar::Vec::Util"
+
+#include "bitvect.h"
+
+STATIC const char svu_error_invarg[] = "Invalid argument";
+
+/* --- XS ------------------------------------------------------------------ */
+
+MODULE = Scalar::Vec::Util PACKAGE = Scalar::Vec::Util
+
+PROTOTYPES: ENABLE
+
+BOOT:
+{
+ HV *stash = gv_stashpv(__PACKAGE__, 1);
+ newCONSTSUB(stash, "SVU_PP", newSVuv(0));
+ newCONSTSUB(stash, "SVU_SIZE", newSVuv(SVU_SIZE));
+}
+
+void
+vfill(SV *sv, SV *ss, SV *sl, SV *sf)
+PREINIT:
+ size_t s, l, n, o;
+ char f, *v;
+CODE:
+ if (!SvOK(sv) || !SvOK(ss) || !SvOK(sl) || !SvOK(sf)) {
+ croak(svu_error_invarg);
+ }
+
+ l = SvUV(sl);
+ if (!l) { XSRETURN(0); }
+ s = SvUV(ss);
+ f = SvTRUE(sf);
+ if (SvTYPE(sv) < SVt_PV) { SvUPGRADE(sv, SVt_PV); }
+
+ n = BV_SIZE(s + l);
+ o = SvLEN(sv);
+ if (n > o) {
+ v = SvGROW(sv, n);
+ Zero(v + o, n - o, char);
+ } else {
+ v = SvPVX(sv);
+ }
+ if (SvCUR(sv) < n) {
+ SvCUR_set(sv, n);
+ }
+
+ bv_fill(v, s, l, f);
+
+ XSRETURN(0);
+
+void
+vcopy(SV *sf, SV *sfs, SV *st, SV *sts, SV *sl)
+PREINIT:
+ size_t fs, ts, l, lf = 0, n, o;
+ char *t, *f;
+CODE:
+ if (!SvOK(sf) || !SvOK(sfs) || !SvOK(st) || !SvOK(sts) || !SvOK(sl)) {
+ croak(svu_error_invarg);
+ }
+
+ l = SvUV(sl);
+ if (!l) { XSRETURN(0); }
+ fs = SvUV(sfs);
+ ts = SvUV(sts);
+ if (SvTYPE(sf) < SVt_PV) { SvUPGRADE(sf, SVt_PV); }
+ if (SvTYPE(st) < SVt_PV) { SvUPGRADE(st, SVt_PV); }
+
+ n = BV_SIZE(ts + l);
+ o = SvLEN(st);
+ if (n > o) {
+ t = SvGROW(st, n);
+ Zero(t + o, n - o, char);
+ } else {
+ t = SvPVX(st);
+ }
+ if (SvCUR(st) < n) {
+ SvCUR_set(st, n);
+ }
+ f = SvPVX(sf); /* We do it there in case st == sf. */
+
+ n = BV_SIZE(fs + l);
+ o = SvLEN(sf);
+ if (n > o) {
+ lf = fs + l - o * CHAR_BIT;
+ l = o * CHAR_BIT - fs;
+ }
+
+ if (f == t) {
+ bv_move(f, ts, fs, l);
+ } else {
+ bv_copy(t, ts, f, fs, l);
+ }
+
+ if (lf) {
+ bv_fill(t, ts + l, lf, 0);
+ }
+
+ XSRETURN(0);
+
+SV *
+veq(SV *sv1, SV *ss1, SV *sv2, SV *ss2, SV *sl)
+PREINIT:
+ size_t s1, s2, l, o, n;
+ char *v1, *v2;
+CODE:
+ if (!SvOK(sv1) || !SvOK(ss1) || !SvOK(sv2) || !SvOK(ss2) || !SvOK(sl)) {
+ croak(svu_error_invarg);
+ }
+
+ l = SvUV(sl);
+ s1 = SvUV(ss1);
+ s2 = SvUV(ss2);
+ if (SvTYPE(sv1) < SVt_PV) { SvUPGRADE(sv1, SVt_PV); }
+ if (SvTYPE(sv2) < SVt_PV) { SvUPGRADE(sv2, SVt_PV); }
+
+ n = BV_SIZE(s1 + l);
+ o = SvLEN(sv1);
+ if (n > o) {
+ l = o * CHAR_BIT - s1;
+ }
+
+ n = BV_SIZE(s2 + l);
+ o = SvLEN(sv2);
+ if (n > o) {
+ l = o * CHAR_BIT - s2;
+ }
+
+ v1 = SvPVX(sv1);
+ v2 = SvPVX(sv2);
+
+ RETVAL = newSVuv(bv_eq(v1, s1, v2, s2, l));
+OUTPUT:
+ RETVAL
--- /dev/null
+#ifndef BITVECT_H
+#define BITVECT_H 1
+
+/* === Bit vectors ========================================================= */
+
+/* ... Generic macros ...................................................... */
+
+#define INLINE_DECLARE(P) STATIC P
+#define INLINE_DEFINE
+
+#ifndef BV_UNIT
+# define BV_UNIT unsigned char
+#endif
+
+#define BV_SIZE(I) ((((I) % CHAR_BIT) != 0) + ((I) / CHAR_BIT))
+
+#define BITS(T) (CHAR_BIT * sizeof(T))
+
+/* 0 <= I < CHAR_BIT * sizeof(T) */
+#define BV_MASK_LOWER(T, I) (~((~((T) 0)) << (I)))
+/* 0 < I <= CHAR_BIT * sizeof(T) */
+#define BV_MASK_HIGHER(T, I) ((~((T) 0)) << (BITS(T) - (I)))
+
+#define BV_DO_ALIGNED(T, A) \
+ mask = BV_MASK_HIGHER(T, BITS(T) - fs); \
+ if (fs + l <= BITS(T)) { \
+ if (fs + l < BITS(T)) { \
+ /* Branching is apparently useless, \
+ * but since we can't portably shift \
+ * CHAR_BITS from a char... \
+ * Actually, we only copy up to this */ \
+ mask &= BV_MASK_LOWER(T, fs + l); \
+ } \
+ *t = (*t & ~mask) | (*f & mask); \
+ } else { \
+ size_t lo, lk; \
+ *t = (*t & ~mask) | (*f & mask); \
+ ++t; \
+ ++f; \
+ l -= (BITS(T) - ts); \
+ lo = l % BITS(T); \
+ lk = l / BITS(T); \
+ BV_##A##_UNIT_ALIGNED(T, t, f, lk); \
+ if (lo) { \
+ mask = BV_MASK_LOWER(T, lo); \
+ t[lk] = (t[lk] & ~mask) \
+ | (f[lk] & mask); \
+ } \
+ }
+
+#define BV_DO_LEFT_FORWARD(T, A) \
+ step = ts - fs; \
+ mask = BV_MASK_HIGHER(T, BITS(T) - ts); \
+ if (ts + l <= BITS(T)) { \
+ if (ts + l < BITS(T)) { \
+ mask &= BV_MASK_LOWER(T, ts + l); \
+ } \
+ *t = (*t & ~mask) | ((*f & (mask >> step)) << step); \
+ } else { \
+ size_t pets = BITS(T) - step; \
+ l -= (BITS(T) - ts); \
+ *t = (*t & ~mask) | ((*f & (mask >> step)) << step); \
+ ++t; \
+ if (l <= step) { \
+ mask = BV_MASK_LOWER(T, l); \
+ *t = (*t & ~mask) | ((*f & (mask << pets)) >> pets); \
+ } else { \
+ ins = (*f & BV_MASK_HIGHER(T, step)) >> pets; \
+ ++f; \
+ offset = l % BITS(T); \
+ end = t + l / BITS(T); \
+ while (t < end) { \
+ BV_##A##_UNIT_LEFT_FORWARD(T, t, f, step); \
+ ++t; ++f; \
+ } \
+ if (offset > step) { \
+ mask = BV_MASK_LOWER(T, offset - step); \
+ *t = (*t & (~mask << step)) \
+ | ((*f & mask) << step) \
+ | ins; \
+ } else if (offset) { \
+ mask = BV_MASK_LOWER(T, offset); \
+ *t = (*t & ~mask) | (ins & mask); \
+ } \
+ } \
+ }
+
+#define BV_DO_RIGHT_FORWARD(T, A) \
+ step = fs - ts; \
+ mask = BV_MASK_HIGHER(T, BITS(T) - fs); \
+ if (fs + l <= BITS(T)) { \
+ if (fs + l < BITS(T)) { \
+ mask &= BV_MASK_LOWER(T, fs + l); \
+ } \
+ *t = (*t & ~(mask >> step)) | ((*f & mask) >> step); \
+ } else { \
+ l -= (BITS(T) - fs); \
+ ins = ((*f & mask) >> step) | (*t & BV_MASK_LOWER(T, ts)); \
+ ++f; \
+ offset = l % BITS(T); \
+ end = f + l / BITS(T) + (offset > step); \
+ while (f < end) { \
+ BV_##A##_UNIT_RIGHT_FORWARD(T, t, f, step); \
+ ++t; ++f; \
+ } \
+ if (!offset) { offset += BITS(T); } \
+ if (offset > step) { \
+ mask = BV_MASK_LOWER(T, offset - step); \
+ *t = (*t & ~mask) | (ins & mask); \
+ } else { \
+ mask = BV_MASK_LOWER(T, offset); \
+ *t = (*t & (~mask << (BITS(T) - step))) \
+ | ((*f & mask) << (BITS(T) - step)) \
+ | ins; \
+ } \
+ }
+
+#define BV_DO_LEFT_BACKWARD(T, A) \
+ step = ts - fs; \
+ mask = BV_MASK_LOWER(T, BITS(T) - ts); \
+ if (ts + l <= BITS(T)) { \
+ if (ts + l < BITS(T)) { \
+ mask &= BV_MASK_HIGHER(T, ts + l); \
+ } \
+ *t = (*t & ~mask) | ((*f & (mask << step)) >> step); \
+ } else { \
+ size_t pets = BITS(T) - step; \
+ l -= (BITS(T) - ts); \
+ *t = (*t & ~mask) | ((*f & (mask << step)) >> step); \
+ --t; \
+ if (l <= step) { \
+ mask = BV_MASK_HIGHER(T, l); \
+ *t = (*t & ~mask) | ((*f & (mask >> pets)) << pets); \
+ } else { \
+ ins = (*f & BV_MASK_LOWER(T, step)) << pets; \
+ --f; \
+ offset = l % BITS(T); \
+ begin = t - l / BITS(T); \
+ while (t > begin) { \
+ BV_##A##_UNIT_LEFT_BACKWARD(T, t, f, step); \
+ --t; --f; \
+ } \
+ if (offset > step) { \
+ mask = BV_MASK_HIGHER(T, offset - step); \
+ *t = (*t & (~mask >> step)) \
+ | ((*f & mask) >> step) \
+ | ins; \
+ } else if (offset) { \
+ mask = BV_MASK_HIGHER(T, offset); \
+ *t = (*t & ~mask) | (ins & mask); \
+ } \
+ } \
+ }
+
+#define BV_DO_RIGHT_BACKWARD(T, A) \
+ step = fs - ts; \
+ mask = BV_MASK_LOWER(T, BITS(T) - fs); \
+ if (fs + l <= BITS(T)) { \
+ if (fs + l < BITS(T)) { \
+ mask &= BV_MASK_HIGHER(T, fs + l); \
+ } \
+ *t = (*t & ~(mask << step)) | ((*f & mask) << step); \
+ } else { \
+ l -= (BITS(T) - fs); \
+ ins = ((*f & mask) << step) | (*t & BV_MASK_HIGHER(T, ts)); \
+ --f; \
+ offset = l % BITS(T); \
+ begin = f - l / BITS(T) - (offset > step); \
+ while (f > begin) { \
+ BV_##A##_UNIT_RIGHT_BACKWARD(T, t, f, step); \
+ --t; --f; \
+ } \
+ if (!offset) { offset += BITS(T); } \
+ if (offset > step) { \
+ mask = BV_MASK_HIGHER(T, offset - step); \
+ *t = (*t & ~mask) | (ins & mask); \
+ } else { \
+ mask = BV_MASK_HIGHER(T, offset); \
+ *t = (*t & (~mask >> (BITS(T) - step))) \
+ | ((*f & mask) >> (BITS(T) - step)) \
+ | ins; \
+ } \
+ }
+
+/* ... Copy ................................................................. */
+
+#define BV_COPY_UNIT_ALIGNED(X, T, F, L) memcpy((T), (F), (L) * sizeof(X))
+
+/* Save the O - B higher bits, shift B bits left, add B bits from f at right */
+#define BV_COPY_UNIT_LEFT_FORWARD(X, T, F, B) \
+ *(T) = (*(F) << (B)) | ins; \
+ ins = *(F) >> (BITS(X) - (B));
+
+/* Save the B lower bits, shift B bits right, add B bits from F at left */
+#define BV_COPY_UNIT_RIGHT_FORWARD(X, T, F, B) \
+ *(T) = (*(F) << (BITS(X) - (B))) | ins; \
+ ins = *(F) >> (B);
+
+#define T BV_UNIT
+INLINE_DECLARE(void bv_copy(void *t_, size_t ts, const void *f_, size_t fs, size_t l))
+#ifdef INLINE_DEFINE
+{
+ size_t offset, step;
+ T ins, mask, *t = t_;
+ const T *f = f_, *end;
+
+ t += ts / BITS(T);
+ ts %= BITS(T);
+
+ f += fs / BITS(T);
+ fs %= BITS(T);
+
+ if (ts == fs) {
+ BV_DO_ALIGNED(T, COPY);
+ } else if (ts < fs) {
+ BV_DO_RIGHT_FORWARD(T, COPY);
+ } else { /* ts > fs */
+ BV_DO_LEFT_FORWARD(T, COPY);
+ }
+
+}
+#endif /* INLINE_DEFINE */
+#undef T
+
+/* ... Move ................................................................ */
+
+#define BV_MOVE_UNIT_ALIGNED(X, T, F, L) memmove((T), (F), (L) * sizeof(X))
+
+#define BV_MOVE_UNIT_LEFT_FORWARD(X, T, F, B) \
+ tmp = *(F) >> (BITS(X) - (B)); \
+ *(T) = (*(F) << (B)) | ins; \
+ ins = tmp;
+
+#define BV_MOVE_UNIT_RIGHT_FORWARD(X, T, F, B) \
+ tmp = *(F) >> (B); \
+ *(T) = (*(F) << (BITS(X) - (B))) | ins; \
+ ins = tmp;
+
+#define BV_MOVE_UNIT_LEFT_BACKWARD(X, T, F, B) \
+ tmp = *(F) << (BITS(X) - (B)); \
+ *(T) = (*(F) >> (B)) | ins; \
+ ins = tmp;
+
+#define BV_MOVE_UNIT_RIGHT_BACKWARD(X, T, F, B) \
+ tmp = *(F) << (B); \
+ *(T) = (*(F) >> (BITS(X) - (B))) | ins; \
+ ins = tmp;
+
+#define BV_MOVE_INIT_REVERSE(T, V, VS) \
+ z = (VS) + l; \
+ (VS) = z % BITS(T); \
+ if ((VS) > 0) { \
+ (V) = bv + (z / BITS(T)); \
+ (VS) = BITS(T) - (VS); \
+ } else { \
+ /* z >= BITS(T) because l > 0 */ \
+ (V) = bv + (z / BITS(T)) - 1; \
+ }
+
+#define T BV_UNIT
+INLINE_DECLARE(void bv_move(void *bv_, size_t ts, size_t fs, size_t l))
+#ifdef INLINE_DEFINE
+{
+ size_t to, fo, offset, step;
+ T ins, tmp, mask, *bv = bv_, *t, *f;
+ const T *begin, *end;
+
+ to = ts % BITS(T);
+ fo = fs % BITS(T);
+
+ if (to == fo) {
+ t = bv + ts / BITS(T);
+ ts = to;
+ f = bv + fs / BITS(T);
+ fs = fo;
+ BV_DO_ALIGNED(T, MOVE);
+ } else if (ts < fs) {
+ t = bv + ts / BITS(T);
+ ts = to;
+ f = bv + fs / BITS(T);
+ fs = fo;
+ if (ts < fs) {
+ BV_DO_RIGHT_FORWARD(T, MOVE);
+ } else { /* ts > fs */
+ BV_DO_LEFT_FORWARD(T, MOVE);
+ }
+ } else { /* ts > fs */
+ size_t z;
+ BV_MOVE_INIT_REVERSE(T, t, ts);
+ BV_MOVE_INIT_REVERSE(T, f, fs);
+ if (ts < fs) {
+ BV_DO_RIGHT_BACKWARD(T, MOVE);
+ } else { /* ts > fs */
+ BV_DO_LEFT_BACKWARD(T, MOVE);
+ }
+ }
+
+}
+#endif /* INLINE_DEFINE */
+#undef T
+
+/* ... Compare ............................................................. */
+
+#define BV_EQ(T, B1, B2) \
+ if (((T) (B1)) != ((T) (B2))) { return 0; }
+
+#define BV_EQ_MASK(T, B1, B2, M) BV_EQ(T, (B1) & (M), (B2) & (M))
+
+#define BV_EQ_LEFT(T, B1, B2, L, B) \
+ offset = (L) % BITS(T); \
+ end = (B1) + (L) / BITS(T); \
+ while ((B1) < end) { \
+ BV_EQ(T, *(B1), (*(B2) << (B)) | ins); \
+ ins = *(B2) >> (BITS(T) - (B)); \
+ ++(B1); ++(B2); \
+ } \
+ if (offset > (B)) { \
+ mask = BV_MASK_LOWER(T, offset - (B)); \
+ BV_EQ(T, *(B1) & ~(~mask << (B)), \
+ ((*(B2) & mask) << (B)) | ins); \
+ } else if (offset) { \
+ mask = BV_MASK_LOWER(T, offset); \
+ BV_EQ_MASK(T, *(B1), ins, mask); \
+ }
+
+#define BV_EQ_RIGHT(T, B1, B2, L, B) \
+ offset = (L) % BITS(T); \
+ end = (B2) + (L) / BITS(T) + (offset >= (B)); \
+ while ((B2) < end) { \
+ BV_EQ(T, *(B1), (*(B2) << (BITS(T) - (B))) | ins); \
+ ins = *(B2) >> (B); \
+ ++(B1); ++(B2); \
+ } \
+ if (!offset) { offset += BITS(T); } \
+ if (offset >= (B)) { \
+ mask = BV_MASK_LOWER(T, offset - (B)); \
+ BV_EQ_MASK(T, *(B1), ins, mask); \
+ } else { \
+ mask = BV_MASK_LOWER(T, offset); \
+ BV_EQ(T, *(B1) & ~(~mask << (BITS(T) - (B))), \
+ ((*(B2) & mask) << (BITS(T) - (B))) | ins); \
+ }
+
+#define T BV_UNIT
+INLINE_DECLARE(int bv_eq(const void *bv1_, size_t s1, const void *bv2_, size_t s2, size_t l))
+#ifdef INLINE_DEFINE
+{
+ size_t offset, step;
+ T ins, mask;
+ const T *bv1 = bv1_, *bv2 = bv2_, *end;
+
+ bv1 += s1 / BITS(T);
+ s1 %= BITS(T);
+
+ bv2 += s2 / BITS(T);
+ s2 %= BITS(T);
+
+ if (s1 == s2) {
+
+ mask = BV_MASK_HIGHER(T, BITS(T) - s2);
+ if (s2 + l <= BITS(T)) {
+ if (s2 + l < BITS(T)) {
+ mask &= BV_MASK_LOWER(T, s2 + l);
+ }
+ BV_EQ_MASK(T, *bv1, *bv2, mask);
+ } else {
+ int ret = 0;
+ size_t lo, lk;
+ BV_EQ_MASK(T, *bv1, *bv2, mask);
+ ++bv1;
+ ++bv2;
+ l -= (BITS(T) - s2);
+ lo = l % BITS(T);
+ lk = l / BITS(T);
+ if ((ret = memcmp(bv1, bv2, lk * sizeof(T))) != 0) { return 0; }
+ if (lo) {
+ mask = BV_MASK_LOWER(T, lo);
+ BV_EQ_MASK(T, *bv1, *bv2, mask);
+ }
+ }
+
+ } else if (s1 < s2) {
+
+ step = s2 - s1;
+ mask = BV_MASK_HIGHER(T, BITS(T) - s2);
+ if (s2 + l <= BITS(T)) {
+ if (s2 + l < BITS(T)) {
+ mask &= BV_MASK_LOWER(T, s2 + l);
+ }
+ BV_EQ(T, *bv1 & (mask >> step), (*bv2 & mask) >> step);
+ } else {
+ l -= (BITS(T) - s2);
+ ins = ((*bv2 & mask) >> step) | (*bv1 & BV_MASK_LOWER(T, s1));
+ ++bv2;
+ offset = l % BITS(T);
+ end = bv2 + l / BITS(T) + (offset >= step);
+ while (bv2 < end) {
+ BV_EQ(T, *bv1, (*bv2 << (BITS(T) - step)) | ins);
+ ins = *bv2 >> step;
+ ++bv1; ++bv2;
+ }
+ if (!offset) { offset += BITS(T); }
+ if (offset >= step) {
+ mask = BV_MASK_LOWER(T, offset - step);
+ BV_EQ_MASK(T, *bv1, ins, mask);
+ } else {
+ mask = BV_MASK_LOWER(T, offset);
+ BV_EQ(T, *bv1 & ~(~mask << (BITS(T) - step)),
+ ((*bv2 & mask) << (BITS(T) - step)) | ins);
+ }
+ }
+
+ } else { /* s1 > s2 */
+
+ step = s1 - s2;
+ mask = BV_MASK_HIGHER(T, BITS(T) - s1);
+ if (s1 + l <= BITS(T)) {
+ if (s1 + l < BITS(T)) {
+ mask &= BV_MASK_LOWER(T, s1 + l);
+ }
+ BV_EQ(T, *bv1 & mask, (*bv2 & (mask >> step)) << step);
+ } else {
+ size_t pets = BITS(T) - step;
+ l -= (BITS(T) - s1);
+ BV_EQ(T, *bv1 & mask, (*bv2 & (mask >> step)) << step);
+ ++bv1;
+ if (l <= step) {
+ mask = BV_MASK_LOWER(T, l);
+ BV_EQ(T, *bv1 & mask, (*bv2 & (mask << pets)) >> pets);
+ } else {
+ ins = (*bv2 & BV_MASK_HIGHER(T, step)) >> pets;
+ ++bv2;
+ offset = l % BITS(T);
+ end = bv1 + l / BITS(T);
+ while (bv1 < end) {
+ BV_EQ(T, *bv1, (*bv2 << step) | ins);
+ ins = *bv2 >> (BITS(T) - step);
+ ++bv1; ++bv2;
+ }
+ if (offset > step) {
+ mask = BV_MASK_LOWER(T, offset - step);
+ BV_EQ(T, *bv1 & ~(~mask << step),
+ ((*bv2 & mask) << step) | ins);
+ } else if (offset) {
+ mask = BV_MASK_LOWER(T, offset);
+ BV_EQ_MASK(T, *bv1, ins, mask);
+ }
+ }
+ }
+
+ }
+
+ return 1;
+}
+#endif /* INLINE_DEFINE */
+#undef T
+
+/* ... Fill ................................................................ */
+
+#define T unsigned char
+INLINE_DECLARE(void bv_fill(void *bv_, size_t s, size_t l, unsigned int f))
+#ifdef INLINE_DEFINE
+{
+ size_t o, k;
+ T mask, *bv = bv_;
+
+ if (f) { f = ~0; }
+
+ bv += s / BITS(T);
+ o = s % BITS(T);
+
+ mask = BV_MASK_HIGHER(T, BITS(T) - o);
+ if (o + l <= BITS(T)) {
+ if (o + l < BITS(T)) {
+ mask &= BV_MASK_LOWER(T, o + l);
+ }
+ *bv = (*bv & ~mask) | (f & mask);
+ } else {
+ *bv = (*bv & ~mask) | (f & mask);
+ ++bv;
+ l -= (BITS(T) - o);
+ k = l / BITS(T);
+ o = l % BITS(T);
+ memset(bv, f, k);
+ if (o) {
+ mask = BV_MASK_LOWER(T, o);
+ bv += k;
+ *bv = (*bv & ~mask) | (f & mask);
+ }
+ }
+
+}
+#endif /* INLINE_DEFINE */
+#undef T
+
+/* ========================================================================= */
+
+#endif /* BITVECT_H */
--- /dev/null
+package Scalar::Vec::Util;
+
+use strict;
+use warnings;
+
+use Carp qw/croak/;
+
+=head1 NAME
+
+Scalar::Vec::Util - Utility routines for vec strings.
+
+=head1 VERSION
+
+Version 0.01
+
+=cut
+
+our $VERSION;
+BEGIN {
+ $VERSION = '0.01';
+ eval {
+ require XSLoader;
+ XSLoader::load(__PACKAGE__, $VERSION);
+ 1;
+ } or do {
+ sub SVU_PP () { 1 }
+ sub SVU_SIZE () { 1 }
+ *vfill = *vfill_pp;
+ *vcopy = *vcopy_pp;
+ *veq = *veq_pp;
+ }
+}
+
+=head1 SYNOPSIS
+
+ use Scalar::Vec::Util qw/vfill vcopy veq/;
+
+ my $s;
+ vfill $s, 0, 100, 1; # Fill with 100 bits 1 starting at 0.
+ my $t;
+ vcopy $s, 20, $t, 10, 30; # Copy 30 bits from $s, starting at 20,
+ # to $t, starting at 10.
+ vcopy $t, 10, $t, 20, 30; # Overalapping areas DWIM.
+ if (veq $t, 10, $t, 20, 30) { ... } # Yes, they are equal now.
+
+=head1 DESCRIPTION
+
+A set of utilities to manipulate bits in vec strings. Highly optimized XS routines are used when available, but straightforward pure perl replacements are also provided for platforms without a C compiler.
+
+This module doesn't reimplement bit vectors. It can be used on the very same scalars that C<vec> builds, or actually on any Perl string (C<SVt_PV>).
+
+=head1 CONSTANTS
+
+=head2 C<SVU_PP>
+
+True when pure perl fallbacks are used instead of XS functions.
+
+=head2 C<SVU_SIZE>
+
+Size in bits of the unit used for moves. The higher this value is, the faster the XS functions are. It's usually C<CHAR_BIT * $Config{alignbytes}>, except on non-little-endian architectures where it currently falls back to C<CHAR_BIT> (e.g. SPARC).
+
+=head1 FUNCTIONS
+
+=head2 C<vfill $vec, $start, $length, $bit>
+
+Starting at C<$start> in C<$vec>, fills C<$length> bits with C<$bit>. Grows C<$vec> if necessary.
+
+=cut
+
+sub _alldef {
+ for (@_) { return 0 unless defined }
+ return 1;
+}
+
+sub vfill_pp {
+ (undef, my $s, my $l, my $x) = @_;
+ croak "Invalid argument" unless _alldef @_;
+ return unless $l;
+ $x = 1 if $x;
+ vec($_[0], $_, 1) = $x for $s .. $s + $l - 1;
+}
+
+=head2 C<< vcopy $from => $from_start, $to => $to_start, $length >>
+
+Copies C<$length> bits starting at C<$from_start> in C<$from> to C<$to_start> in C<$to>. If C<$from_start + $length> is too long for C<$from>, zeros are copied past C<$length>. Grows C<$to> if necessary.
+
+=cut
+
+sub vcopy_pp {
+ my ($fs, $ts, $l) = @_[1, 3, 4];
+ croak "Invalid argument" unless _alldef @_;
+ return unless $l;
+ my $step = $ts - $fs;
+ if ($step <= 0) {
+ vec($_[2], $_ + $step, 1) = vec($_[0], $_, 1) for $fs .. $fs + $l - 1;
+ } else { # There's a risk of overwriting if $_[0] and $_[2] are the same SV.
+ vec($_[2], $_ + $step, 1) = vec($_[0], $_, 1) for reverse $fs .. $fs + $l - 1;
+ }
+}
+
+=head2 C<< veq $v1 => $v1_start, $v2 => $v2_start, $length >>
+
+Returns true if the C<$length> bits starting at C<$v1_start> in C<$v1> and C<$v2_start> in C<$v2> are equal, and false otherwise. If needed, C<$length> is decreased to fit inside C<$v1> and C<$v2> boundaries.
+
+=cut
+
+sub veq_pp {
+ my ($s1, $s2, $l) = @_[1, 3, 4];
+ croak "Invalid argument" unless _alldef @_;
+ my $i = 0;
+ while ($i < $l) {
+ return 0 if vec($_[0], $s1 + $i, 1) != vec($_[2], $s2 + $i, 1);
+ ++$i;
+ }
+ return 1;
+}
+
+=head1 EXPORT
+
+The functions L</vfill>, L</vcopy> and L</veq> are only exported on request. All of them are exported by the tags C<':funcs'> and C<':all'>.
+
+The constants L</SVU_PP> and L</SVU_SIZE> are also only exported on request. They are all exported by the tags C<':consts'> and C<':all'>.
+
+=cut
+
+use base qw/Exporter/;
+
+our @EXPORT = ();
+our %EXPORT_TAGS = (
+ 'funcs' => [ qw/vfill vcopy veq/ ],
+ 'consts' => [ qw/SVU_PP SVU_SIZE/ ]
+);
+our @EXPORT_OK = map { @$_ } values %EXPORT_TAGS;
+$EXPORT_TAGS{'all'} = [ @EXPORT_OK ];
+
+=head1 CAVEATS
+
+Please report architectures where we can't use the alignment as the move unit. I'll add exceptions for them.
+
+=head1 DEPENDENCIES
+
+L<Carp>, L<Exporter> (core modules since perl 5), L<XSLoader> (since perl 5.006).
+
+=head1 SEE ALSO
+
+L<Bit::Vector> gives a complete reimplementation of bit vectors.
+
+=head1 AUTHOR
+
+Vincent Pit, C<< <perl at profvince.com> >>, L<http://www.profvince.com>.
+
+You can contact me by mail or on #perl @ FreeNode (vincent or Prof_Vince).
+
+=head1 BUGS
+
+Please report any bugs or feature requests to C<bug-scalar-vec-util at rt.cpan.org>, or through the web interface at L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Scalar-Vec-Util>. I will be notified, and then you'll automatically be notified of progress on your bug as I make changes.
+
+=head1 SUPPORT
+
+You can find documentation for this module with the perldoc command.
+
+ perldoc Scalar::Vec::Util
+
+Tests code coverage report is available at L<http://www.profvince.com/perl/cover/Scalar-Vec-Util>.
+
+=head1 COPYRIGHT & LICENSE
+
+Copyright 2008 Vincent Pit, all rights reserved.
+
+This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself.
+
+=cut
+
+1; # End of Scalar::Vec::Util
--- /dev/null
+#!/usr/bin/env perl
+
+use strict;
+use warnings;
+
+use Benchmark qw/cmpthese/;
+
+use lib qw{blib/arch blib/lib};
+use Scalar::Vec::Util qw/vfill vcopy veq/;
+
+my $n = 100_000;
+my $i = 0;
+my $x = '';
+
+sub inc {
+ ++$_[0];
+ $_[0] = 0 if $_[0] >= $n;
+ return $_[0];
+}
+
+print "fill:\n";
+cmpthese -3, {
+ vfill => sub { vfill $x, inc($i), $n - $i, 1; },
+ vfill_pp => sub { Scalar::Vec::Util::vfill_pp($x, inc($i), $n - $i, 1); }
+};
+
+$i = 0;
+my $j = int $n / 2;
+my $y = '';
+print "\ncopy:\n";
+cmpthese -3, {
+ vcopy => sub { vcopy $x, inc($i), $y, inc($j), $n - ($i > $j ? $i : $j); },
+ vcopy_pp => sub { Scalar::Vec::Util::vcopy_pp($x, inc($i), $y, inc($j), $n - ($i > $j ? $i : $j)); }
+};
+
+$i = 0;
+$j = int $n / 2;
+print "\nmove:\n";
+cmpthese -3, {
+ vcopy => sub { vcopy $x, inc($i), $x, inc($j), $n - ($i > $j ? $i : $j); },
+ vcopy_pp => sub { Scalar::Vec::Util::vcopy_pp($x, inc($i), $x, inc($j), $n - ($i > $j ? $i : $j)); }
+};
+
+$i = 0;
+$j = int $n / 2;
+vfill $x, 0, $n, 1;
+vfill $y, 0, $n, 1;
+print "\neq:\n";
+cmpthese -3, {
+ veq => sub { veq $x, inc($i), $y, inc($j), $n - ($i > $j ? $i : $j); },
+ veq_pp => sub { Scalar::Vec::Util::veq_pp($x, inc($i), $y, inc($j), $n - ($i > $j ? $i : $j)); }
+};
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More tests => 1;
+
+BEGIN {
+ use_ok( 'Scalar::Vec::Util' );
+}
+
+diag( "Testing Scalar::Vec::Util $Scalar::Vec::Util::VERSION, Perl $], $^X" );
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More tests => 5;
+
+require Scalar::Vec::Util;
+
+for (qw/vfill vcopy veq SVU_PP SVU_SIZE/) {
+ eval { Scalar::Vec::Util->import($_) };
+ ok(!$@, 'import ' . $_);
+}
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More tests => 4;
+
+BEGIN { @INC = grep !/arch$/, @INC }
+use Scalar::Vec::Util qw/vfill vcopy veq SVU_PP/;
+
+is(SVU_PP, 1, 'using pure perl subroutines');
+for (qw/vfill vcopy veq/) {
+ no strict 'refs';
+ is(*{$_}{CODE}, *{'Scalar::Vec::Util::'.$_}{CODE}, $_ .' is ' . $_ . '_pp');
+}
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More;
+
+use Scalar::Vec::Util qw/SVU_SIZE SVU_PP/;
+
+if (SVU_PP) {
+ plan tests => 1;
+
+ diag('Using pure perl fallbacks');
+
+ is(SVU_SIZE, 1, 'SVU_SIZE is 1');
+} else {
+ plan tests => 2;
+
+ diag('Using an unit of ' . SVU_SIZE . ' bits');
+
+ ok(SVU_SIZE >= 8, 'SVU_SIZE is greater than 8');
+ is(SVU_SIZE % 8, 0, 'SVU_SIZE is a multiple of 8');
+}
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More 'no_plan';
+
+use Scalar::Vec::Util qw/SVU_SIZE/;
+
+eval { Scalar::Vec::Util::veq_pp(undef, 0, my $y, 0, 0) };
+like($@, qr/Invalid\s+argument/, 'first argument undef croaks');
+eval { Scalar::Vec::Util::veq_pp(my $x, undef, my $y, 0, 0) };
+like($@, qr/Invalid\s+argument/, 'second argument undef croaks');
+eval { Scalar::Vec::Util::veq_pp(my $x, 0, undef, 0, 0) };
+like($@, qr/Invalid\s+argument/, 'third argument undef croaks');
+eval { Scalar::Vec::Util::veq_pp(my $x, 0, my $y, undef, 0) };
+like($@, qr/Invalid\s+argument/, 'fourth argument undef croaks');
+eval { Scalar::Vec::Util::veq_pp(my $x, 0, my $y, 0, undef) };
+like($@, qr/Invalid\s+argument/, 'fifth argument undef croaks');
+
+my $p = SVU_SIZE;
+$p = 8 if $p < 8;
+my $n = 3 * $p;
+my $q = 1;
+
+*myfill = *Scalar::Vec::Util::vfill_pp;
+
+sub rst { myfill($_[0], 0, $n, 0) }
+
+sub pat {
+ (undef, my $a, my $b, my $x) = @_;
+ myfill($_[0], 0, $a, $x);
+ myfill($_[0], $a, $b, 1 - $x);
+ myfill($_[0], $a + $b, $n - ($a + $b) , $x);
+}
+
+my ($v1, $v2) = ('') x 2;
+
+my @s = ($p - $q) .. ($p + $q);
+for my $s1 (@s) {
+ for my $s2 (@s) {
+ for my $l (0 .. $n - 1) {
+ last if $s1 + $l > $n or $s2 + $l > $n;
+ pat $v1, $s1, $l, 0;
+ pat $v2, $s2, $l, 0;
+ ok(Scalar::Vec::Util::veq_pp($v1 => $s1, $v2 => $s2, $l), "veq_pp $s1, $s2, $l");
+ ok(!Scalar::Vec::Util::veq_pp($v1 => $s1 - 1, $v2 => $s2, $l), 'not veq_pp ' . ($s1 - 1) . ", $s2, $l") if $l > 0;
+ ok(!Scalar::Vec::Util::veq_pp($v1 => $s1 + 1, $v2 => $s2, $l), 'not veq_pp ' . ($s1 + 1) . ", $s2, $l") if $l > 0;
+ }
+ }
+}
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More 'no_plan';
+
+use Scalar::Vec::Util qw/veq SVU_SIZE/;
+
+eval { veq undef, 0, my $y, 0, 0 };
+like($@, qr/Invalid\s+argument/, 'first argument undef croaks');
+eval { veq my $x, undef, my $y, 0, 0 };
+like($@, qr/Invalid\s+argument/, 'second argument undef croaks');
+eval { veq my $x, 0, undef, 0, 0 };
+like($@, qr/Invalid\s+argument/, 'third argument undef croaks');
+eval { veq my $x, 0, my $y, undef, 0 };
+like($@, qr/Invalid\s+argument/, 'fourth argument undef croaks');
+eval { veq my $x, 0, my $y, 0, undef };
+like($@, qr/Invalid\s+argument/, 'fifth argument undef croaks');
+
+my $p = SVU_SIZE;
+$p = 8 if $p < 8;
+my $n = 3 * $p;
+my $q = 1;
+
+*myfill = *Scalar::Vec::Util::vfill_pp;
+
+sub rst { myfill($_[0], 0, $n, 0) }
+
+sub pat {
+ (undef, my $a, my $b, my $x) = @_;
+ myfill($_[0], 0, $a, $x);
+ myfill($_[0], $a, $b, 1 - $x);
+ myfill($_[0], $a + $b, $n - ($a + $b) , $x);
+}
+
+my ($v1, $v2) = ('') x 2;
+
+my @s = ($p - $q) .. ($p + $q);
+for my $s1 (@s) {
+ for my $s2 (@s) {
+ for my $l (0 .. $n - 1) {
+ last if $s1 + $l > $n or $s2 + $l > $n;
+ pat $v1, $s1, $l, 0;
+ pat $v2, $s2, $l, 0;
+ ok(veq($v1 => $s1, $v2 => $s2, $l), "veq $s1, $s2, $l");
+ ok(!veq($v1 => $s1 - 1, $v2 => $s2, $l), 'not veq_pp ' . ($s1 - 1) . ", $s2, $l") if $l > 0;
+ ok(!veq($v1 => $s1 + 1, $v2 => $s2, $l), 'not veq_pp ' . ($s1 + 1) . ", $s2, $l") if $l > 0;
+ }
+ }
+}
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More tests => 1;
+use Config qw/%Config/;
+
+use Scalar::Vec::Util qw/veq/;
+
+my ($v1, $v2) = ('') x 2;
+my $n = ($Config{alignbytes} - 1) * 8;
+vec($v1, $_, 1) = 0 for 0 .. $n - 1;
+vec($v2, $_, 1) = 0 for 0 .. $n - 1;
+my $i = $n / 2;
+while ($i >= 1) {
+ vec($v1, $i, 1) = 1;
+ vec($v2, $i + 1, 1) = 1;
+ $i /= 2;
+}
+ok(veq($v1, 0, $v2, 1, 10 * $n), 'long veq is loooong');
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More 'no_plan';
+
+use Scalar::Vec::Util;
+
+eval { Scalar::Vec::Util::vfill_pp(undef, 0, 0, 0) };
+like($@, qr/Invalid\s+argument/, 'first argument undef croaks');
+eval { Scalar::Vec::Util::vfill_pp(my $x, undef, 0, 0) };
+like($@, qr/Invalid\s+argument/, 'second argument undef croaks');
+eval { Scalar::Vec::Util::vfill_pp(my $x, 0, undef, 0) };
+like($@, qr/Invalid\s+argument/, 'third argument undef croaks');
+eval { Scalar::Vec::Util::vfill_pp(my $x, 0, 0, undef) };
+like($@, qr/Invalid\s+argument/, 'fourth argument undef croaks');
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More 'no_plan';
+
+use Scalar::Vec::Util qw/vfill SVU_SIZE/;
+
+eval { vfill undef, 0, 0, 0 };
+like($@, qr/Invalid\s+argument/, 'first argument undef croaks');
+eval { vfill my $x, undef, 0, 0 };
+like($@, qr/Invalid\s+argument/, 'second argument undef croaks');
+eval { vfill my $x, 0, undef, 0 };
+like($@, qr/Invalid\s+argument/, 'third argument undef croaks');
+eval { vfill my $x, 0, 0, undef };
+like($@, qr/Invalid\s+argument/, 'fourth argument undef croaks');
+
+my $p = SVU_SIZE;
+$p = 8 if $p < 8;
+my $n = 3 * $p;
+my $q = 1;
+
+*myfill = *Scalar::Vec::Util::vfill_pp;
+*myeq = *Scalar::Vec::Util::veq_pp;
+
+sub rst { myfill($_[0], 0, $n, 0); $_[0] = '' }
+
+sub pat {
+ (undef, my $a, my $b, my $x) = @_;
+ $_[0] = '';
+ if ($b) {
+ myfill($_[0], 0, $a, $x);
+ myfill($_[0], $a, $b, 1 - $x);
+ }
+}
+
+my ($v, $c) = ('') x 2;
+
+my @s = ($p - $q) .. ($p + $q);
+for my $s (@s) {
+ for my $l (0 .. $n - 1) {
+ next if $s + $l > $n;
+ pat $c, $s, $l, 0;
+ rst $v;
+ vfill $v, $s, $l, 1;
+ ok(myeq($v, 0, $c, 0, $n), "vfill $s, $l");
+ is(length $v, length $c, "length is ok");
+ }
+}
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More tests => 34;
+
+use Scalar::Vec::Util qw/vfill/;
+
+my $n = 2 ** 16;
+
+*myfill = *Scalar::Vec::Util::vfill_pp;
+*myeq = *Scalar::Vec::Util::veq_pp;
+
+my ($v, $c) = ('') x 2;
+
+my $l = 1;
+while ($l <= $n) {
+ myfill($c, 0, $l, 1);
+ vfill($v, 0, $l, 1);
+ ok(myeq($v, 0, $c, 0, $l), "vfill 0, $l, 1");
+ is(length $v, length $c, "length is ok");
+ $l *= 2;
+}
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More 'no_plan';
+
+use Scalar::Vec::Util qw/SVU_SIZE/;
+
+eval { Scalar::Vec::Util::vcopy_pp(undef, 0, my $y, 0, 0) };
+like($@, qr/Invalid\s+argument/, 'first argument undef croaks');
+eval { Scalar::Vec::Util::vcopy_pp(my $x, undef, my $y, 0, 0) };
+like($@, qr/Invalid\s+argument/, 'second argument undef croaks');
+eval { Scalar::Vec::Util::vcopy_pp(my $x, 0, undef, 0, 0) };
+like($@, qr/Invalid\s+argument/, 'third argument undef croaks');
+eval { Scalar::Vec::Util::vcopy_pp(my $x, 0, my $y, undef, 0) };
+like($@, qr/Invalid\s+argument/, 'fourth argument undef croaks');
+eval { Scalar::Vec::Util::vcopy_pp(my $x, 0, my $y, 0, undef) };
+like($@, qr/Invalid\s+argument/, 'fifth argument undef croaks');
+
+my $p = SVU_SIZE;
+$p = 8 if $p < 8;
+my $n = 3 * $p;
+my $q = 1;
+
+*myfill = *Scalar::Vec::Util::vfill_pp;
+
+sub rst { myfill($_[0], 0, $n, 0) }
+
+sub pat {
+ (undef, my $a, my $b, my $x) = @_;
+ myfill($_[0], 0, $a, $x);
+ myfill($_[0], $a, $b, 1 - $x);
+ myfill($_[0], $a + $b, $n - ($a + $b) , $x);
+}
+
+my ($f, $t, $c) = ('') x 3;
+
+my @s = ($p - $q) .. ($p + $q);
+for my $s1 (@s) {
+ for my $s2 (@s) {
+ for my $l (0 .. $n - 1) {
+ last if $s1 + $l > $n or $s2 + $l > $n;
+ pat $f, $s1, $l, 0;
+ rst $t;
+ pat $c, $s2, $l, 0;
+ Scalar::Vec::Util::vcopy_pp($f => $s1, $t => $s2, $l);
+ is($t, $c, "vcopy_pp $s1, $s2, $l");
+ }
+ }
+}
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More 'no_plan';
+
+use Scalar::Vec::Util qw/vcopy SVU_SIZE/;
+
+eval { vcopy undef, 0, my $y, 0, 0 };
+like($@, qr/Invalid\s+argument/, 'first argument undef croaks');
+eval { vcopy my $x, undef, my $y, 0, 0 };
+like($@, qr/Invalid\s+argument/, 'second argument undef croaks');
+eval { vcopy my $x, 0, undef, 0, 0 };
+like($@, qr/Invalid\s+argument/, 'third argument undef croaks');
+eval { vcopy my $x, 0, my $y, undef, 0 };
+like($@, qr/Invalid\s+argument/, 'fourth argument undef croaks');
+eval { vcopy my $x, 0, my $y, 0, undef };
+like($@, qr/Invalid\s+argument/, 'fifth argument undef croaks');
+
+my $p = SVU_SIZE;
+$p = 8 if $p < 8;
+my $n = 3 * $p;
+my $q = 1;
+
+*myfill = *Scalar::Vec::Util::vfill_pp;
+*myeq = *Scalar::Vec::Util::veq_pp;
+
+sub rst { myfill($_[0], 0, $n, 0); $_[0] = '' }
+
+sub pat {
+ (undef, my $a, my $b, my $x) = @_;
+ $_[0] = '';
+ if ($b) {
+ myfill($_[0], 0, $a, $x);
+ myfill($_[0], $a, $b, 1 - $x);
+ }
+}
+
+my ($f, $t, $c) = ('') x 3;
+
+my @s = ($p - $q) .. ($p + $q);
+for my $s1 (@s) {
+ for my $s2 (@s) {
+ for my $l (0 .. $n - 1) {
+ last if $s1 + $l > $n or $s2 + $l > $n;
+ pat $f, $s1, $l, 0;
+ rst $t;
+ pat $c, $s2, $l, 0;
+ vcopy $f => $s1, $t => $s2, $l;
+ ok(myeq($t, 0, $c, 0, $n), "vcopy $s1, $s2, $l");
+ is(length $t, length $c, "length is ok");
+ }
+ }
+}
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More 'no_plan';
+
+use Scalar::Vec::Util qw/vcopy SVU_SIZE/;
+
+my $p = SVU_SIZE;
+$p = 8 if $p < 8;
+my $n = 3 * $p;
+my $q = 2;
+
+*myfill = *Scalar::Vec::Util::vfill_pp;
+*myeq = *Scalar::Vec::Util::veq_pp;
+
+sub rst { myfill($_[0], 0, $n, 0); $_[0] = '' }
+
+sub pat {
+ (undef, my $a, my $b, my $x) = @_;
+ $_[0] = '';
+ if ($b) {
+ myfill($_[0], 0, $a, $x);
+ myfill($_[0], $a, $b, 1 - $x);
+ }
+}
+
+my ($v, $c) = ('') x 2;
+
+my @s = ($p - $q) .. ($p + $q);
+for my $s1 (@s) {
+ for my $s2 (@s) {
+ for my $l (0 .. $n - 1) {
+ last if $s1 + $l > $n or $s2 + $l > $n;
+ pat $v, $s1, $l, 0;
+ $c = '';
+ myfill($c, $s1, $l, 1);
+ myfill($c, $s2, $l, 1);
+ vcopy $v => $s1, $v => $s2, $l;
+ ok(myeq($v, 0, $c, 0, $n), "vcopy $s1, $s2, $l (move)");
+ is(length $v, length $c, "length is ok");
+ }
+ }
+}
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More tests => 34 + 2;
+use Config qw/%Config/;
+
+use Scalar::Vec::Util qw/vcopy/;
+
+my $n = 2 ** 16;
+
+*myfill = *Scalar::Vec::Util::vfill_pp;
+*myeq = *Scalar::Vec::Util::veq_pp;
+
+my ($v, $c) = ('') x 2;
+
+my $l = 1;
+vec($v, 0, 1) = 1;
+vec($c, 0, 1) = 1;
+while ($l <= $n) {
+ myfill($c, $l, $l, 1);
+ vcopy $v, 0, $v, $l, $l;
+ $l *= 2;
+ ok(myeq($v, 0, $c, 0, $l), "vcopy $l");
+ is(length $v, length $c, "length is ok");
+}
+
+my ($w, $k) = ('') x 2;
+$n = ($Config{alignbytes} - 1) * 8;
+my $p = 4 + $n / 2;
+vec($w, $_, 1) = 1 for 0 .. $n - 1;
+vec($k, $_, 1) = 0 for 0 .. $n - 1;
+vec($k, $_ - $p, 1) = 1 for $p .. $n - 1;
+vcopy $w, $p, $w, 0, $n;
+ok(myeq($w, 0, $k, 0, $n), "vcopy with fill");
+is(length $w, length $k, "length is ok");
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+use Test::More tests => 3;
+
+sub not_in_file_ok {
+ my ($filename, %regex) = @_;
+ open( my $fh, '<', $filename )
+ or die "couldn't open $filename for reading: $!";
+
+ my %violated;
+
+ while (my $line = <$fh>) {
+ while (my ($desc, $regex) = each %regex) {
+ if ($line =~ $regex) {
+ push @{$violated{$desc}||=[]}, $.;
+ }
+ }
+ }
+
+ if (%violated) {
+ fail("$filename contains boilerplate text");
+ diag "$_ appears on lines @{$violated{$_}}" for keys %violated;
+ } else {
+ pass("$filename contains no boilerplate text");
+ }
+}
+
+sub module_boilerplate_ok {
+ my ($module) = @_;
+ not_in_file_ok($module =>
+ 'the great new $MODULENAME' => qr/ - The great new /,
+ 'boilerplate description' => qr/Quick summary of what the module/,
+ 'stub function definition' => qr/function[12]/,
+ );
+}
+
+not_in_file_ok(README =>
+ "The README is used..." => qr/The README is used/,
+ "'version information here'" => qr/to provide version information/,
+);
+
+not_in_file_ok(Changes =>
+ "placeholder date/time" => qr(Date/time)
+);
+
+module_boilerplate_ok('lib/Scalar/Vec/Util.pm');
+
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+use Test::More;
+
+# Ensure a recent version of Test::Pod
+my $min_tp = 1.22;
+eval "use Test::Pod $min_tp";
+plan skip_all => "Test::Pod $min_tp required for testing POD" if $@;
+
+all_pod_files_ok();
--- /dev/null
+use strict;
+use warnings;
+use Test::More;
+
+# Ensure a recent version of Test::Pod::Coverage
+my $min_tpc = 1.08;
+eval "use Test::Pod::Coverage $min_tpc";
+plan skip_all => "Test::Pod::Coverage $min_tpc required for testing POD coverage" if $@;
+
+# Test::Pod::Coverage doesn't require a minimum Pod::Coverage version,
+# but older versions don't recognize some common documentation styles
+my $min_pc = 0.18;
+eval "use Pod::Coverage $min_pc";
+plan skip_all => "Pod::Coverage $min_pc required for testing POD coverage" if $@;
+
+all_pod_coverage_ok(
+ { also_private => [ qr/_pp$/ ] }
+);
--- /dev/null
+#!perl -T
+
+use strict;
+use warnings;
+
+use Test::More;
+
+eval "use Test::Portability::Files";
+plan skip_all => "Test::Portability::Files required for testing filenames portability" if $@;
+run_tests();
--- /dev/null
+#!perl
+
+use strict;
+use warnings;
+
+use Test::More;
+
+eval { require Test::Kwalitee; Test::Kwalitee->import() };
+plan( skip_all => 'Test::Kwalitee not installed; skipping' ) if $@;