From fa15ccb6c48c197a0dd1909d1d9836838d912528 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Mon, 25 Aug 2025 16:48:21 -0500 Subject: [PATCH 01/22] Switch to pgxn-tools based testing --- .github/workflows/ci.yml | 17 +++++++++++++++++ pg-travis-test.sh | 36 ------------------------------------ 2 files changed, 17 insertions(+), 36 deletions(-) create mode 100644 .github/workflows/ci.yml delete mode 100644 pg-travis-test.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..d4aaec9 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,17 @@ +name: CI +on: [push, pull_request] +jobs: + test: + strategy: + matrix: + pg: [17, 16, 15, 14, 13, 12, 11, 10] + name: 🐘 PostgreSQL ${{ matrix.pg }} + runs-on: ubuntu-latest + container: pgxn/pgxn-tools + steps: + - name: Start PostgreSQL ${{ matrix.pg }} + run: pg-start ${{ matrix.pg }} + - name: Check out the repo + uses: actions/checkout@v4 + - name: Test on PostgreSQL ${{ matrix.pg }} + run: pg-build-test diff --git a/pg-travis-test.sh b/pg-travis-test.sh deleted file mode 100644 index f63ae43..0000000 --- a/pg-travis-test.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# Based on https://gist.github.com/petere/6023944 - -set -eux - -sudo apt-get update - -packages="python-setuptools postgresql-$PGVERSION postgresql-server-dev-$PGVERSION postgresql-common" - -# bug: http://www.postgresql.org/message-id/20130508192711.GA9243@msgid.df7cb.de -sudo update-alternatives --remove-all postmaster.1.gz - -# stop all existing instances (because of https://github.com/travis-ci/travis-cookbooks/pull/221) -sudo service postgresql stop -# and make sure they don't come back -echo 'exit 0' | sudo tee /etc/init.d/postgresql -sudo chmod a+x /etc/init.d/postgresql - -sudo apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install $packages - -sudo easy_install pgxnclient - -PGPORT=55435 -PGCLUSTER_NAME=test - -export PGCLUSTER=9.6/$PGCLUSTER_NAME -env | grep PG -which pg_dump - -sudo pg_createcluster --start $PGVERSION $PGCLUSTER_NAME -p $PGPORT -- -A trust - -# TODO: have base.mk support dynamic sudo -sudo PGPORT=$PGPORT PGUSER=postgres PG_CONFIG=/usr/lib/postgresql/$PGVERSION/bin/pg_config make test - -[ ! -e test/regression.diffs ] From 99083b85d738c37639b860171783632bc01632cf Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Mon, 25 Aug 2025 16:56:03 -0500 Subject: [PATCH 02/22] Remove .travis.yml --- .travis.yml | 13 ------------- 1 file changed, 13 deletions(-) delete mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 0c4e016..0000000 --- a/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: c -before_install: - - wget https://gist.github.com/petere/5893799/raw/apt.postgresql.org.sh - - sudo sh ./apt.postgresql.org.sh - - sudo sh -c "echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs 2>/dev/null)-pgdg main $PGVERSION >> /etc/apt/sources.list.d/pgdg.list" -env: - - PGVERSION=9.6 - - PGVERSION=9.5 - - PGVERSION=9.4 - - PGVERSION=9.3 - - PGVERSION=9.2 - -script: bash ./pg-travis-test.sh From aa53b9118ab7d57b489259830f5f3872687ad244 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Mon, 25 Aug 2025 17:00:46 -0500 Subject: [PATCH 03/22] Remove errant vim swapfile --- sql/.object_reference.sql.swo | Bin 45056 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 sql/.object_reference.sql.swo diff --git a/sql/.object_reference.sql.swo b/sql/.object_reference.sql.swo deleted file mode 100644 index b3912fe9de65698c7165ac4804adbf23100b021e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 45056 zcmeI535+CHdB@9+W3Uaj6=Hz^iMN}z=3sZ$2S(1UHn=_0yW95c%(`dB2gWot-8D1i z?dfW|s&_mF0g)hy5JF(UCRXeqPzYcUB!D6l#2tkMf(b&H05%DP1Sb+0VG;xe<@dd# zs=BA=+FcxuD$ReYtKRj!@15WG-ur6dlItr+)s86*ip!>#_-pxN&8Y5g>iHEm!u?BOvOik5HK-@GPOunsf>u4)AHTG@?&TOSH%+>5 zEI3ebU^^V>CX4&7e5!Z(WtZxv{(HN3si*zU{B}YXehLm094I(YaG>Bo!GVGU1qTWa z6dd?9;Xu+p$9pAtIn(Cn8hbyZ@BZ8NdBonY?Q=iNK0n9aZ|HOXE&F`P-f!)5Ki59H z`U{*dAQI0Z2SC7d;hOKcl%h}3Jw$;C^%4X zpx{8kfr0}C2MP`p94I(YaG>D8uOtWjhUe`;_+1tO8R7XFn~Mi8@Vu{p*Me7rSAZ+P zd(QW~H-ipnfjvNhXM<;fi@*ioU!UrEe+xbc-Vg2r?*V@aUILy5KKB&QdoOqw_#@%t^!wr{oqlQ2>$`T1HK484?Y7v3GN0Ba4GmXy!tc{p1lDC;2JOn&Ib>|&-=mM z;3{xFcnk%~!{966i{Kt`1UwD=80E<~z(>Jrz!EqDzK7D}qu}k}b>M|y7<>jL$i3ha z@L3cbaZryG*Tj+X#I?h!y;Q6FiQkNt#`w7uBsII&!f!M>K^za;$7HP?jE^V5ZOL9` zU$y*|AWonB&Qd(0Dzj>OW=>7dPfe*2Ptw`9PtsT)v04qAO_eNnqE+R$RM6=}9ThI} zdo5WGTT3d8)oLedE$vltgukFCZX8#lIBBk_b`-XfCX|xeUNtc@Jv&z|Ri@`0l{K4@ zTI{#NWNjl~U6*g2tD1X~T&el#%60SQVHcyYBF*-CCg^;p?m7Y`e7pG;VMp)fc;yN0 zdA9B-L=-k8T5dX?POu~-$}2AW2DQS12deG>0AP2hAC+8fsgN~n2E3~tF$}*x^-}XsVf@e<~@Bg0C9#B)e zH63Tz(ACf0;_d>)>#Csjpesw*&^O~j8e8sBCrrcWCBQ!A=uozmntNIYiyHO+SEJG* zvpuvea@YM$R%Yia(-U**P<7@gZ7r!KQPhmbZ10hFcQ#nQ{~oc0ovV7T!mV`Uq_*NG z_2snd>yOEk3g(D0MA~YSQtW=@Dp*>jY?ri-VEaB7#OB0wYD5_aozQQ(xU>7QYUOCD zdXu`ge3Lg(EtlrXYOZwkR9U&sJKZj5nKSGu8ac+Jv7rr&MlJ#{${R) zh{~ZH3-kzHnO4HCj4^8b;DFlZ*w$0c4tQ6W4_74Hx!^X-2V_Dp3)dLx8Pw_fp~08_ zbkq)jDr_)hgo_~qjAplg!VjDNLNo9t%M(+jYS}BzDpQQ!Ya_5@z8tLh%5O3*H`eU%+)yV%U)jb}zHzQRJqx`|)O#99`Ns0ZJgo~x zNToI{VzPYo{9!dD%sHqors``~Q}7u#Pe>QBj+Q}GZ!)oLbfUJd4UiDDVx|VWRK<8w(W2^v z@o|}q(l)7pD@=Foye8^)RJ7WXcxI#?uZA%LKeJRl;px&}O;%@)sfp6;L}{`tQBenl z9<{n0xcV1MdMao&nCdbcmdNM_%j2rV)Y$nq1B@KmJ(6m4+s%;58(8is+fUi5P@SP0?XO|BHbLUl z49(lM^2r&sXHP!C-ujViob_eDwFKo!q$^Cl+hH?zQj3Emfycu`xdu5jA~nr)p%7dx zE6PVV&4X&F9(4)U>NcCUSJOiw{7TD^u*b^PLo?N*^bSeQbc($RP?DRbuU`IgMXer^ zyG=Wm$>#VtZ8x24I*};pbb}G;aO4LKLT_(pw%ZheqlY0OQiu#^xy;%Iv^hP!>?S4M zfnPl<9oj-r_q)`;9Ao5lClE3c={mI#g)=jEst_F@VwPk$ zzYhK$yaU_={uI0w#GnRV432?G@CV@Q$n#$V9|L~|UI`9@=YnT}Cj$@s1bO~Z@O|(v zU=(~ES^gv79pEl-CwLv`f*N=n`Ta5Q5cm*y7x;7VCU6Is0!M(z`!4|Jf(Mc19{?W* z_kq6vZv)qXGr@225`DNf3 zZZqHv;DHa|_I~hca1OX1KX-%Mz*XRE@Hle)W8e$m5uP6cN?-HGCS_iENDCbkjz)8%EjA{D%$&%oH=cs5jEge~UCYE2pEFdU0$dAjc z`8k-K*-T0FL5-j-<4YrDrS55o%f6T`Pn8ixnhbhP^;~u7Ui$X7D-lz+qja><%8K7= z)DnMbFMUr>H!a*nxTSbXQ&VX&N^McLOo!-^hMhd3C*JGiVToIrDPeE6^cY>UN{JfT zVT8~m#>1|9+nilGJ}y!;6BeCETWg7R6g>v>0iyzO0~?uCt5FBtg1-=Tse<(XjLcCS zjL}^)V6SRj3SN$@1sw@RA_tpvt5JTZ<1;fu8)o^=kTxpY! zPQU=ACpn#{yRfstPS^zRaUdGC~Z{t7K=<9qxP;L)48c5qQz#qt0#I^ z?b?!dMQdx*FOLnS%8IF(nPX;%Ms?xFYBTXyTz!-OZYz9gH>jNmqoyy)P&fG(uT~+_ zv7NY!%iZa$rQ>z7fylu0@1kF@y>Gq~&ZYsWdP?_V1Kx|8+>`X>BHuF-y^ASpg zOI2y{<7h{GTlws&5rc#iE=7{E%Ve97ze$~N9h1{|2tPxq6U`=b4BeJ9>hgV-$-stH zQin8!9SZ56h?hkep3jyg%C z;T?5L8%9TLu7fd4iK2NE?UZOgT}{)TQqE=m^khf0q=Q*+W_r2=q_DDbF|(wBeNL(- zv=mE!JEB#n*P|4toYESrsbBLBY`S$Hopw8;N% z|NhUA_rDGP5xf_~;2GdM$o~HZz6nl%bHN?R_6ppG>|O?w-~w<4_#v|USHWH2^`Hr! z2lj&Jf*+IaC&25$OMuXEAvhC=Jb#q?v%xdK+2Dt~6WRXd-~f0IcnXkp`@aIkRdAr- zK*5260|f^P4*XJcKqL!+Y+q|NM8*-l94l+-JM$~&PybvW;!tURYEE6k6fB)rGAXmW zr(~XGBI-?MeV_rp2D~cg1NPINce4|`UP{mm>Q!cDi)gyGpOu~X)v{V7Asiq@>xShy zLQSdJWZs0EJ=yXg9&_cwZ8X_THUAgS-E^$%+1M$K%>smt%(oS{Hui5lxDdH{^u}Rw zo=mu{wLLb7C24cY8rm$W)MGBa^qt$4Wpr(}o@|_`+sm_6n$BM?N%v}p zDQ5eGwn{VenM*Z$xx;kLlKWO^8eh5eY_OdbrLL1(CM8PBd`j}%n<){?jdny-C9-yd z=xpmIa<;WSXVhpcOsvULI@=43V(Lm+{XZ0T*raiV)wCgoXIq)HnngD=S@21fal>Y} zY$Hx^?3Cf3 zkoDgQUIt3wam%t~%hrv6+z2L1N02cz;8}JEm4HyF-Mi=lFa3PTO{ZE4t za2}9#{1x!u$nkFkU2rq_F7o`BfUL>4z`5Yxq4NX~dd~r8f#NDSP;j8&K*53EG!C%h z&$dvmiPP}QMOB=l%Ad9RwK+vaCCN^bSpmbH&T&WQ@^XhPBr=I|($QvH<=uI^;q(-M zIhVnz_)ZUQQJa}`{(_EZ^|K(Ac?R$yb<-2-lVwI4%TSIZ=}UbeD}6Ezxu=YrgfxIY z=X6Tsxk&QVxp(v`l%9K%{q|hFRY)&h$`0ZI^rdd6L|__=P$)HHz@a{&OxLv3v!*19F);o>DfmX!0eIy_J&4o(2$Ugv|R+4TCDQd)l6j)%H=|z zMkuQt={pJNb{%-)G1v9zFtzM}pB=vNrnM2o=PsdW(3PIe6Z!uJBo!2vjc z^mQT}=Lk`ovlX0!Q}OD-%Ty8{yo_zha(X{+VqiO-#m~O}D@!AbBKzf1wr{wZ@DfVD z%N(|%)%x5dJE@QKht9b?^}gAH7qWt#u7@9xmEoKn2hF)`PssYM6~>B#wb3vs_80Po z8qAiFSQ{~rc-_IDY{h$hjO))@$-?=*ecIFkr%vTu%di?MP05nC-uxmb`!%8FNiX}u z0y%})2o}0aSaVt;3YO4wr$cL;g>?($S3pp3P$e4Ev>Ur0`fMT8~clEpGoO*xudvum# z2E6Yvs35V3ACawkVjK`q?*tg|e!~cR^PAKhJD7O@xp2WAz&Xyjqw6+~T1IQ(3Cm+(-+nB_H69&RIKWk!6u-MXMk@#S0M8oko z)aF&31qN#yvJo@`ZPUes#OR+;yMq*W=>0ShC`>ldh~%!VU7Zfw*uCOaIpXgGL-24h0o_-jWPvlKIfoMGUQ0w%n% z%q_M+d~B+R+CG$;n3Drn1Uc$VOqsZ`G^wpXY2)4t0mdr*#YLKN(9rw;{~G!Hd}MZ! z|M}C`UC8_Uft>fh8{7%*0IvnNfKjj;{ETpq09nUB3Z4lhuD22AwLokE><6#m-2%87 zTn~Bo!GVGUzjPchAJWKdQyPPp`s*4~gWa6q z$f{+J3^c9{#;3{`kx>j;tlg0%!e6{eRlP{ww?ZxTL9XFm zGtVburqP`LcP2ZG{z};G4EGUo+ z-Bwx$G;OCj9UZ5Z|F1^Qmp%O={9|TbUk}@NpXL5~@FFk{o&~;u+3$noC* zZv<}u4e(QB`k#QW0NJyDKlmUhu7U#v2MP`p94I(YaG>Bo!GVGU1qXhkIACO`%|2s= zXk{fWCk8qzZYFH@zdsfmx4muhrL1%_M}G{?CY%+=JQLMmItHxY587+79co;LjbCfJ zZ9^l5+oS4q#-(?2>?lI*`3c~zlfx}fNcJ=(YI3__WLr3+N6380FZr&iRW29{u%}6F zU{o1{zgoIAFJ%A!A~O7YfXM%D8~#s`^WO*l82ll46%bp1W8h)@e!zygh`XHK7uo;$ zK-T}y17Ahfe>b=XG{NWD*fhxuHm&|=~{)(PV73g4rY4?$zmpU*S5BE?rc<#U4M-b(p=qM1zw`L|4hW4<4tgPTt*%LktMy0qT0Z@(96vs3jYRdI$toi+P`U5*6Q pyL^Jw*I%jD-yP;t(dyPNCJ5zA1Zts!^FETO<8wKvAJ-{+{|80Vb!Pwo From 68e8afc25c70301e286eb6652f1529c5129eefbe Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Mon, 25 Aug 2025 17:15:46 -0500 Subject: [PATCH 04/22] Update test workflow --- .github/workflows/ci.yml | 4 +++- .gitignore | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d4aaec9..ce8882f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,4 +14,6 @@ jobs: - name: Check out the repo uses: actions/checkout@v4 - name: Test on PostgreSQL ${{ matrix.pg }} - run: pg-build-test + run: make test + - name: Test pg_dump + run: make dump_test diff --git a/.gitignore b/.gitignore index facf651..8e63124 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,4 @@ regression.out # Misc tmp/ .DS_Store +.claude/settings.local.json From 9859566d2b12a5340b9b00be8819ce5f2dddebaa Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Mon, 25 Aug 2025 17:26:46 -0500 Subject: [PATCH 05/22] Add missing PGUSER argument --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ce8882f..cea4aa8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,6 +14,6 @@ jobs: - name: Check out the repo uses: actions/checkout@v4 - name: Test on PostgreSQL ${{ matrix.pg }} - run: make test + run: make test PGUSER=postgres - name: Test pg_dump - run: make dump_test + run: make dump_test PGUSER=postgres From 9a1e50bd56c106bb73458f21fb200b485f36ace6 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Mon, 25 Aug 2025 17:29:31 -0500 Subject: [PATCH 06/22] Fix dump test --- test/dump/run.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/dump/run.sh b/test/dump/run.sh index 4d0c7a3..6896f68 100755 --- a/test/dump/run.sh +++ b/test/dump/run.sh @@ -26,7 +26,7 @@ if [ "$1" == "-f" ]; then fi echo Creating dump database -createdb test_dump && psql -f test/dump/load_all.sql test_dump > $create_log || die 3 "Unable to create dump database" +createdb test_dump && psql -Xf test/dump/load_all.sql test_dump > $create_log || die 3 "Unable to create dump database" # Ensure no errors in log check_log() { @@ -45,8 +45,8 @@ check_log $create_log creation echo Running dump and restore # No real need to cat the log on failure here; psql will generate an error and even if not verify will almost certainly catch it -createdb test_load && PAGER='' psql -c '\df pg_get_object_address' test_load || die 5 'crap' -(echo 'BEGIN;' && pg_dump test_dump && echo 'COMMIT;') | psql -q -v VERBOSITY=verbose -v ON_ERROR_STOP=true test_load > $restore_log +createdb test_load && PAGER='' psql -Xc '\df pg_get_object_address' test_load || die 5 'crap' +(echo 'BEGIN;' && pg_dump test_dump && echo 'COMMIT;') | psql -q -X -v VERBOSITY=verbose -v ON_ERROR_STOP=true test_load > $restore_log rc=$? if [ $rc -ne 0 ]; then cat $restore_log @@ -54,7 +54,7 @@ if [ $rc -ne 0 ]; then fi echo Verifying restore -psql -f test/dump/verify.sql test_load > $verify_log || die 5 "Test failed" +psql -Xf test/dump/verify.sql test_load > $verify_log || die 5 "Test failed" check_log $create_log verify From 594d248ce1ae0822e8940fc5bc0861005ab016d2 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Mon, 25 Aug 2025 17:35:03 -0500 Subject: [PATCH 07/22] Squashed 'pgxntool/' changes from 890053c..e9c24de e9c24de Fix pg_regress on versions > 12 (#5) c0af00f Improvements to HISTORY.asc 6e8f2a7 Allow use of sudo when installing an extension 705f1ec Don't run clean as part of make test 370fa8e Create test/sql during setup git-subtree-dir: pgxntool git-subtree-split: e9c24de986ddc85bbd1fb3149076888d075ce100 --- HISTORY.asc | 13 ++++++++++--- base.mk | 14 ++++++++++---- setup.sh | 1 + 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/HISTORY.asc b/HISTORY.asc index b69c9d1..9cb793b 100644 --- a/HISTORY.asc +++ b/HISTORY.asc @@ -1,5 +1,11 @@ STABLE ------ +== Support 13+ +The `--load-language` option was removed from `pg_regress` in 13. + +== Reduce verbosity from test setup +As part of this change, you will want to review the changes to test/deps.sql. + === Support asciidoc documentation targets By default, if asciidoctor or asciidoc exists on the system, any files in doc/ that end in .adoc or .asciidoc will be processed to html. See the README for full details. @@ -12,11 +18,12 @@ If a test input file changes we certainly need to re-run tests. === Have test/pgxntool/setup.sql install tap before running deps.sql -=== Reduce verbosity from test setup -As part of this change, you will want to review the changes to test/deps.sql. - === Support other asciidoc extensions +=== Create the test/sql/ directory during setup + +=== Use `--sudo` option when installing pgtap + 0.2.0 ----- ### Stop using $(VERSION) diff --git a/base.mk b/base.mk index ce918db..a976ebb 100644 --- a/base.mk +++ b/base.mk @@ -36,7 +36,7 @@ TEST_SQL_FILES += $(wildcard $(TESTDIR)/sql/*.sql) TEST_RESULT_FILES = $(patsubst $(TESTDIR)/sql/%.sql,$(TESTDIR)/expected/%.out,$(TEST_SQL_FILES)) TEST_FILES = $(TEST_SOURCE_FILES) $(TEST_SQL_FILES) REGRESS = $(sort $(notdir $(subst .source,,$(TEST_FILES:.sql=)))) # Sort is to get unique list -REGRESS_OPTS = --inputdir=$(TESTDIR) --outputdir=$(TESTOUT) --load-language=plpgsql +REGRESS_OPTS = --inputdir=$(TESTDIR) --outputdir=$(TESTOUT) # See additional setup below MODULES = $(patsubst %.c,%,$(wildcard src/*.c)) ifeq ($(strip $(MODULES)),) MODULES =# Set to NUL so PGXS doesn't puke @@ -57,8 +57,10 @@ GE91 = $(call test, $(MAJORVER), -ge, 91) ifeq ($(GE91),yes) all: $(EXTENSION_VERSION_FILES) +endif -#DATA = $(wildcard sql/*--*.sql) +ifeq ($($call test, $(MAJORVER), -lt 13), yes) + REGRESS_OPTS += --load-language=plpgsql endif PGXS := $(shell $(PG_CONFIG) --pgxs) @@ -77,8 +79,12 @@ installcheck: $(TEST_RESULT_FILES) $(TEST_OUT_FILES) $(TEST_SQL_FILES) $(TEST_SO # make test: run any test dependencies, then do a `make install installcheck`. # If regressions are found, it will output them. +# +# This used to depend on clean as well, but that causes problems with +# watch-make if you're generating intermediate files. If tests end up needing +# clean it's an indication of a missing dependency anyway. .PHONY: test -test: clean testdeps install installcheck +test: testdeps install installcheck @if [ -r $(TESTOUT)/regression.diffs ]; then cat $(TESTOUT)/regression.diffs; fi # make results: runs `make test` and copy all result files to expected @@ -220,6 +226,6 @@ installcheck: pgtap pgtap: $(DESTDIR)$(datadir)/extension/pgtap.control $(DESTDIR)$(datadir)/extension/pgtap.control: - pgxn install pgtap + pgxn install pgtap --sudo endif # fndef PGXNTOOL_NO_PGXS_INCLUDE diff --git a/setup.sh b/setup.sh index 3730a2b..881ccaa 100755 --- a/setup.sh +++ b/setup.sh @@ -46,6 +46,7 @@ git add META.json mkdir -p sql test src cd test +mkdir -p sql safecp ../pgxntool/test/deps.sql deps.sql [ -d pgxntool ] || ln -s ../pgxntool/test/pgxntool . git add pgxntool From 8dc5cfcb90896f6269a25aa35e902311fc688116 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Mon, 25 Aug 2025 17:36:24 -0500 Subject: [PATCH 08/22] dump test is actually included in make test --- .github/workflows/ci.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cea4aa8..3183031 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,5 +15,3 @@ jobs: uses: actions/checkout@v4 - name: Test on PostgreSQL ${{ matrix.pg }} run: make test PGUSER=postgres - - name: Test pg_dump - run: make dump_test PGUSER=postgres From c0325029502d22e29c61f27f52731ca838ecdfaf Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Mon, 25 Aug 2025 18:29:18 -0500 Subject: [PATCH 09/22] Stop using reg* pseudotypes in tables Use of those types is not supported by pg_upgrade --- sql/object_reference.sql | 80 +++---------------------------------- test/expected/base.out | 2 +- test/expected/zzz_build.out | 6 +-- test/sql/base.sql | 6 +-- 4 files changed, 12 insertions(+), 82 deletions(-) diff --git a/sql/object_reference.sql b/sql/object_reference.sql index 44dc1c6..fafb79a 100644 --- a/sql/object_reference.sql +++ b/sql/object_reference.sql @@ -192,7 +192,7 @@ GRANT REFERENCES ON _object_reference.object TO object_reference__dependency; CREATE TABLE _object_reference._object_oid( object_id int PRIMARY KEY REFERENCES _object_reference.object ON DELETE CASCADE ON UPDATE CASCADE - , classid regclass NOT NULL + , classid oid NOT NULL /* TODO: needs to be a trigger CONSTRAINT classid_must_match__object__address_classid CHECK( classid IS NOT DISTINCT FROM cat_tools.object__address_classid(object_type) ) @@ -200,50 +200,11 @@ CREATE TABLE _object_reference._object_oid( , objid oid NOT NULL , objsubid int NOT NULL CONSTRAINT objid_must_match CHECK( -- _object_reference._sanity() depends on this! - objid IS NOT DISTINCT FROM coalesce( - regclass::oid -- Need to cast first item to generic OID - , regconfig - , regdictionary - , regnamespace -- SED: REQUIRES 9.5! - , regoperator - , regprocedure - , regtype - , object_oid - ) + objid IS NOT DISTINCT FROM object_oid ) , CONSTRAINT object__u_classid__objid__objsubid UNIQUE( classid, objid, objsubid ) - , regclass regclass - CONSTRAINT regclass_classid CHECK( regclass IS NULL OR classid = cat_tools.object__reg_type_catalog('regclass') ) - , regconfig regconfig - CONSTRAINT regconfig_classid CHECK( regconfig IS NULL OR classid = cat_tools.object__reg_type_catalog('regconfig') ) - , regdictionary regdictionary - CONSTRAINT regdictionary_classid CHECK( regdictionary IS NULL OR classid = cat_tools.object__reg_type_catalog('regdictionary') ) - , regnamespace regnamespace -- SED: REQUIRES 9.5! - CONSTRAINT regnamespace_classid CHECK( regnamespace IS NULL OR classid = cat_tools.object__reg_type_catalog('regnamespace') ) -- SED: REQUIRES 9.5! - , regoperator regoperator - CONSTRAINT regoperator_classid CHECK( regoperator IS NULL OR classid = cat_tools.object__reg_type_catalog('regoperator') ) - , regprocedure regprocedure - CONSTRAINT regprocedure_classid CHECK( regprocedure IS NULL OR classid = cat_tools.object__reg_type_catalog('regprocedure') ) - -- I don't think we should ever have regrole since we can't create event triggers on it --- , regrole regrole - , regtype regtype - CONSTRAINT regtype_classid CHECK( regtype IS NULL OR classid = cat_tools.object__reg_type_catalog('regtype') ) - , object_oid oid + , object_oid oid NOT NULL ); -CREATE TRIGGER null_count - AFTER INSERT OR UPDATE - ON _object_reference._object_oid - FOR EACH ROW EXECUTE PROCEDURE not_null_count_trigger( - 5 -- First 4 fields, + 1 - , 'only one object reference field may be set' - ) -; -CREATE UNIQUE INDEX _object_oid__u_regclass ON _object_reference._object_oid(regclass) WHERE regclass IS NOT NULL; -CREATE UNIQUE INDEX _object_oid__u_regconfig ON _object_reference._object_oid(regconfig) WHERE regconfig IS NOT NULL; -CREATE UNIQUE INDEX _object_oid__u_regdictionary ON _object_reference._object_oid(regdictionary) WHERE regdictionary IS NOT NULL; -CREATE UNIQUE INDEX _object_oid__u_regoperator ON _object_reference._object_oid(regoperator) WHERE regoperator IS NOT NULL; -CREATE UNIQUE INDEX _object_oid__u_regprocedure ON _object_reference._object_oid(regprocedure) WHERE regprocedure IS NOT NULL; -CREATE UNIQUE INDEX _object_oid__u_regtype ON _object_reference._object_oid(regtype) WHERE regtype IS NOT NULL; SELECT __object_reference.create_function( '_object_reference._sanity' @@ -303,13 +264,6 @@ CREATE VIEW _object_reference._object_v AS , i.classid , i.objid , i.objsubid - , i.regclass - , i.regconfig - , i.regdictionary - , i.regnamespace - , i.regoperator - , i.regprocedure - , i.regtype , i.object_oid , s.* FROM _object_reference.object o @@ -325,13 +279,6 @@ CREATE VIEW _object_reference._object_v__for_update AS , i.classid , i.objid , i.objsubid - , i.regclass - , i.regconfig - , i.regdictionary - , i.regnamespace - , i.regoperator - , i.regprocedure - , i.regtype , i.object_oid , s.* FROM _object_reference.object o @@ -363,26 +310,9 @@ BEGIN WHERE o.object_id = _object_oid__add.object_id ; END IF; - DECLARE - c_reg_type name := cat_tools.object__reg_type(object_type); -- Verifies regtype is supported, if there is one - c_oid_field CONSTANT name := coalesce(c_reg_type, 'object_oid'); - - c_oid_insert CONSTANT text := format( - --USING object_id, classid, objid, objsubid - $$INSERT INTO _object_reference._object_oid(object_id, classid, objid, objsubid, %I) - SELECT $1, $2, $3, $4, $3::%I$$ - , c_oid_field - , coalesce(c_reg_type, 'oid') - ) - ; BEGIN - RAISE DEBUG E'%\n USING %, %, %, %' - , c_oid_insert - , object_id, classid, objid, objsubid - ; - EXECUTE c_oid_insert - USING object_id, classid, objid, objsubid - ; + INSERT INTO _object_reference._object_oid(object_id, classid, objid, objsubid, object_oid) + VALUES (object_id, classid, objid, objsubid, objid); SELECT INTO STRICT r_object_v -- Record better exist! * diff --git a/test/expected/base.out b/test/expected/base.out index 5e39f56..8579c90 100644 --- a/test/expected/base.out +++ b/test/expected/base.out @@ -3,7 +3,7 @@ ok 1 - Role object_reference__dependency should be granted USAGE on schema _object_reference ok 2 - Role object_reference__dependency should be granted REFERENCES on table _object_reference.object ok 3 - CREATE TEMP TABLE test_object AS SELECT object_reference.object__getsert('table', 'test_table') AS object_id; -ok 4 - Verify regclass field is correct +ok 4 - Verify object_oid field is correct ok 5 - Existing object works, provides correct ID ok 6 - secondary may not be specified for table objects ok 7 - Verify count_nulls extension can not be relocated diff --git a/test/expected/zzz_build.out b/test/expected/zzz_build.out index fe92660..f3cfa9a 100644 --- a/test/expected/zzz_build.out +++ b/test/expected/zzz_build.out @@ -11,7 +11,7 @@ psql:test/temp_load.not_sql:189: WARNING: I promise you will be sorry if you tr -psql:test/temp_load.not_sql:513: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! +psql:test/temp_load.not_sql:443: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! @@ -21,9 +21,9 @@ psql:test/temp_load.not_sql:513: WARNING: I promise you will be sorry if you tr -psql:test/temp_load.not_sql:620: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! +psql:test/temp_load.not_sql:550: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! -psql:test/temp_load.not_sql:627: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! +psql:test/temp_load.not_sql:557: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! diff --git a/test/sql/base.sql b/test/sql/base.sql index 202ae5f..5b5ff27 100644 --- a/test/sql/base.sql +++ b/test/sql/base.sql @@ -32,9 +32,9 @@ SELECT lives_ok( , $$CREATE TEMP TABLE test_object AS SELECT object_reference.object__getsert('table', 'test_table') AS object_id;$$ ); SELECT is( - (SELECT regclass FROM _object_reference._object_v WHERE object_id = (SELECT object_id FROM test_object)) - , 'test_table'::regclass - , 'Verify regclass field is correct' + (SELECT object_oid FROM _object_reference._object_v WHERE object_id = (SELECT object_id FROM test_object)) + , 'test_table'::regclass::oid + , 'Verify object_oid field is correct' ); SELECT is( object_reference.object__getsert('table', 'test_table') From 7236cf3f6f62bffa74a166ef997f84b75f9ac8ad Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Tue, 26 Aug 2025 15:59:38 -0500 Subject: [PATCH 10/22] Add README; get rid of no longer needed code --- README.md | 313 +++++++++++++++++++++++++++++++++++++++ sql/object_reference.sql | 29 ---- 2 files changed, 313 insertions(+), 29 deletions(-) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 0000000..c20af2c --- /dev/null +++ b/README.md @@ -0,0 +1,313 @@ +# Object Reference Framework + +This framework provides tracking and management of references to database objects. It's designed to maintain referential integrity for objects that may be created, dropped, or renamed, and provides facilities for automatically capturing newly created objects into organized groups. + +Key capabilities: +- Track references to database objects that may be created, dropped, or renamed +- Group related objects for organization and DDL capture +- Automatically capture DDL operations to track new objects +- Manage dependencies between objects and external tables +- Support for object lifecycle management + +# A word on documentation... + +Good documentation should be like good code comments - explain things concisely without being overly verbose. Towards that end, this doc does *not* provide definition for things that should be inherently obvious, other than mentioning their existence. For example, we never define what is meant by `object_type`. The name itself should provide enough information. + +# Installation + +This extension depends on the `cat_tools` extension. + +```sql +CREATE EXTENSION object_reference CASCADE; +``` + +The extension creates two schemas: +- `object_reference` - Contains the public API functions +- `_object_reference` - Contains internal implementation details (do not use directly) + +To grant users access to the extension: +```sql +GRANT object_reference__usage TO role1, role2, role3; +``` + +# Security + +There are two roles associated with the extension: + +- `object_reference__usage` - Allows using the extension's public API functions. Grant this to users who need to track and manage object references. +- `object_reference__dependency` - Special role for creating foreign key dependencies to the internal object table. Only grant this to schemas/applications that need to create referential integrity constraints against the object tracking system. + +Most users will only need `object_reference__usage`. The `object_reference__dependency` role is only needed when using `object__dependency__add()` or `object_group__dependency__add()` functions. + +# Key Concepts + +## Objects vs OIDs + +The framework separates object metadata (names, types, arguments) from their actual database OIDs. This allows tracking objects that don't exist yet, or that may be recreated. OID resolution is performed lazily - only when actually needed. + +## Object Groups + +Objects can be organized into named groups for logical organization. This is particularly useful for tracking all objects created during a specific operation or time period, especially when combined with DDL capture. + +## DDL Capture + +The framework can automatically capture newly created objects during DDL operations and add them to a specified object group. This is implemented using PostgreSQL event triggers. + +# API + +Note that all API routines live in the `object_reference` schema. Objects in the `_object_reference` schema are considered internal-only and should not be accessed directly. + +Most routines work with the `cat_tools.object_type` enum for specifying object types. You can also pass object types as text strings which will be converted automatically. + +## Core Object Functions + +### `object__getsert(...) RETURNS int` + +```sql +object__getsert( + object_type text | cat_tools.object_type + , object_name text + , secondary text DEFAULT NULL + , object_group_name text DEFAULT NULL + , loose boolean DEFAULT false +) RETURNS int +``` + +Get or insert an object reference, returning the `object_id`. This is the primary function for tracking objects. + +Arguments: +- `object_type` - Type of object (table, function, index, etc.) +- `object_name` - Fully qualified name of the object +- `secondary` - Additional identifier for objects that need it (e.g., function arguments) +- `object_group_name` - Optional object group to add this object to +- `loose` - If true, allows creating references to objects that don't exist + +### `object__getsert_w_group_id(...) RETURNS int` + +```sql +object__getsert_w_group_id( + object_type cat_tools.object_type + , object_name text + , secondary text DEFAULT NULL + , object_group_id int DEFAULT NULL + , loose boolean DEFAULT false +) RETURNS int +``` + +Same as `object__getsert()` but accepts a numeric `object_group_id` instead of group name. + + +## Object Group Functions + +### `object_group__create(...) RETURNS int` + +```sql +object_group__create( + object_group_name text +) RETURNS int +``` + +Create a new object group and return its ID. + +### `object_group__get(...) RETURNS object_group` + +```sql +object_group__get( + object_group_name text | object_group_id int +) RETURNS _object_reference.object_group +``` + +Retrieve an object group by name or ID. Throws an error if the group doesn't exist. + +### `object_group__remove(...) RETURNS void` + +```sql +object_group__remove( + object_group_name text | object_group_id int + , force boolean DEFAULT false +) RETURNS void +``` + +Remove an object group. This does not delete the objects themselves, only the grouping. + +### `object_group__object__add(...) RETURNS void` + +```sql +object_group__object__add( + object_group_id int + , object_id int +) RETURNS void +``` + +Add an existing object to an object group. + +### `object_group__object__remove(...) RETURNS void` + +```sql +object_group__object__remove( + object_group_id int + , object_id int +) RETURNS void +``` + +Remove an object from an object group. + +## Dependency Functions + +These functions create foreign key dependencies to the object tracking system. They require the `object_reference__dependency` role. + +### `object__dependency__add(...) RETURNS void` + +```sql +object__dependency__add( + table_name text + , field_name name +) RETURNS void +``` + +Create a foreign key dependency from the specified table to the object tracking system. + +Arguments: +- `table_name` - Name of table to add dependency to +- `field_name` - Name of the field to create the foreign key on + +### `object_group__dependency__add(...) RETURNS void` + +```sql +object_group__dependency__add( + table_name text + , field_name name +) RETURNS void +``` + +Create a foreign key dependency from the specified table to the object group system. + +## DDL Capture Functions + +DDL capture allows you to automatically track objects created during DDL operations. + +### `capture__start(...) RETURNS int` + +```sql +capture__start( + object_group_name text | object_group_id int +) RETURNS int +``` + +Begin capturing newly created objects to the specified group. The group must +already exist. Returns the capture level (for nested captures). + +### `capture__stop(...) RETURNS void` + +```sql +capture__stop( + object_group_name text | object_group_id int +) RETURNS void +``` + +Stop capturing objects to the specified group. + +### `capture__get_current(...) RETURNS record` + +```sql +capture__get_current( + OUT capture_level int + , OUT object_group_id int +) RETURNS record +``` + +Get information about the current capture state. + +### `capture__get_all(...) RETURNS SETOF record` + +```sql +capture__get_all( + OUT capture_level int + , OUT object_group_id int +) RETURNS SETOF record +``` + +Get information about all active capture levels. + +## Utility Functions + +### `post_restore() RETURNS void` + +```sql +post_restore() RETURNS void +``` + +Ensures all object references are correct after a database restore. Run this after restoring from backup to fix any OID mismatches. + +### Object Type Information Functions + +**Get lists of unsupported/untested object types:** + +```sql +unsupported() RETURNS cat_tools.object_type[] +unsupported_srf() RETURNS SETOF cat_tools.object_type + +untested() RETURNS cat_tools.object_type[] +untested_srf() RETURNS SETOF cat_tools.object_type +``` + +**Check if specific object types are supported/tested:** + +```sql +unsupported(object_type text | cat_tools.object_type) RETURNS boolean +untested(object_type text | cat_tools.object_type) RETURNS boolean +``` + +These functions help determine which object types are supported by the framework. Unsupported types cannot be tracked, while untested types may work but haven't been fully validated. + +# Event Triggers + +The extension automatically installs several event triggers that: + +- Capture object creation when DDL capture is active +- Update object identity information when objects are renamed +- Clean up object references when objects are dropped + +These event triggers operate transparently and require no user intervention. However, be aware that they may add slight overhead to DDL operations. + +# Examples + +## Basic Object Tracking + +```sql +-- Track a table +SELECT object_reference.object__getsert('table', 'public.my_table'); + +-- Track a function with its signature +SELECT object_reference.object__getsert('function', 'public.my_func', 'integer, text'); +``` + +## Using Object Groups + +```sql +-- Create a group for related objects +SELECT object_reference.object_group__create('my_feature_objects'); + +-- Add objects to the group +SELECT object_reference.object__getsert('table', 'public.feature_table', NULL, 'my_feature_objects'); +SELECT object_reference.object__getsert('view', 'public.feature_view', NULL, 'my_feature_objects'); +``` + +## DDL Capture + +```sql +-- Create a group first +SELECT object_reference.object_group__create('migration_v2_objects'); + +-- Start capturing new objects to that group +SELECT object_reference.capture__start('migration_v2_objects'); + +-- Run your DDL commands +CREATE TABLE public.new_table (id int, name text); +CREATE INDEX idx_new_table_name ON public.new_table (name); + +-- Stop capturing +SELECT object_reference.capture__stop('migration_v2_objects'); + +-- All objects created between start/stop are now tracked in the 'migration_v2_objects' group +``` \ No newline at end of file diff --git a/sql/object_reference.sql b/sql/object_reference.sql index fafb79a..9b204e1 100644 --- a/sql/object_reference.sql +++ b/sql/object_reference.sql @@ -3,35 +3,6 @@ SET LOCAL client_min_messages = WARNING; \echo You really, REALLY do NOT want to try and load this via psql!!! \echo It will FAIL during pg_dump! \quit --- This BS is because count_nulls is relocatable, so could be in any schema -DO $$ -BEGIN - RAISE DEBUG 'initial search_path = %', current_setting('search_path'); - PERFORM set_config('search_path', current_setting('search_path') || ', ' || extnamespace::regnamespace::text, true) -- true = local only - FROM pg_extension - WHERE extname = 'count_nulls' - ; - RAISE DEBUG 'search_path changed to %', current_setting('search_path'); -END -$$; -/* -DO $$ -DECLARE - c_schema CONSTANT name := (SELECT extnamespace::regnamespace::text FROM pg_extension WHERE extname = 'cat_tools'); -BEGIN - IF c_schema IS NULL THEN - RAISE 'extension cat_tools is not installed'; - END IF; - - IF c_schema <> 'cat_tools' THEN - RAISE 'having the cat_tools extension installed anywhere but the "cat_tools" schema is not currently supported' - USING DETAIL = format('current schema for cat_tools is %s', c_schema) - ; - END IF; -END -$$; -*/ - DO $$ BEGIN CREATE ROLE object_reference__usage NOLOGIN; From d89e0fbd0561e29e67f3c9fb220836676c07fa3c Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Tue, 26 Aug 2025 16:24:37 -0500 Subject: [PATCH 11/22] minor test cleanup --- test/expected/base.out | 6 ++---- test/expected/zzz_build.out | 10 +++++----- test/sql/base.sql | 21 --------------------- 3 files changed, 7 insertions(+), 30 deletions(-) diff --git a/test/expected/base.out b/test/expected/base.out index 8579c90..9a52932 100644 --- a/test/expected/base.out +++ b/test/expected/base.out @@ -1,12 +1,10 @@ \set ECHO none -1..9 +1..7 ok 1 - Role object_reference__dependency should be granted USAGE on schema _object_reference ok 2 - Role object_reference__dependency should be granted REFERENCES on table _object_reference.object ok 3 - CREATE TEMP TABLE test_object AS SELECT object_reference.object__getsert('table', 'test_table') AS object_id; ok 4 - Verify object_oid field is correct ok 5 - Existing object works, provides correct ID ok 6 - secondary may not be specified for table objects -ok 7 - Verify count_nulls extension can not be relocated -ok 8 - Still works after moving the count_nulls extension -ok 9 - CREATE EXTENSION test_factory +ok 7 - CREATE EXTENSION test_factory # TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/zzz_build.out b/test/expected/zzz_build.out index f3cfa9a..93431c5 100644 --- a/test/expected/zzz_build.out +++ b/test/expected/zzz_build.out @@ -2,16 +2,16 @@ This extension must be loaded via CREATE EXTENSION object_reference; You really, REALLY do NOT want to try and load this via psql!!! -psql:test/temp_load.not_sql:188: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! +psql:test/temp_load.not_sql:159: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! -psql:test/temp_load.not_sql:189: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! +psql:test/temp_load.not_sql:160: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! -psql:test/temp_load.not_sql:443: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! +psql:test/temp_load.not_sql:414: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! @@ -21,9 +21,9 @@ psql:test/temp_load.not_sql:443: WARNING: I promise you will be sorry if you tr -psql:test/temp_load.not_sql:550: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! +psql:test/temp_load.not_sql:521: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! -psql:test/temp_load.not_sql:557: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! +psql:test/temp_load.not_sql:528: WARNING: I promise you will be sorry if you try to use this as anything other than an extension! diff --git a/test/sql/base.sql b/test/sql/base.sql index 5b5ff27..07c57a5 100644 --- a/test/sql/base.sql +++ b/test/sql/base.sql @@ -9,7 +9,6 @@ SELECT plan( +1 -- schema +3 -- initial +2 -- errors - +2 -- move +1 -- create extensions ); @@ -50,26 +49,6 @@ SELECT throws_ok( , 'secondary may not be specified for table objects' ); -/* - * I'm not sure if our extension would continue working if count_nulls was - * relocated. Currently a moot point since relocation isn't supported, but I'd - * already coded the second test so might as well leave it here in case it - * changes in the future. - */ -\set null_schema test_relocate_count_nulls -CREATE SCHEMA :null_schema; -SELECT throws_ok( - $$ALTER EXTENSION count_nulls SET SCHEMA $$ || :'null_schema' - , '0A000' - , NULL - , 'Verify count_nulls extension can not be relocated' -); -SELECT is( - object_reference.object__getsert('table', 'test_table') - , (SELECT object_id FROM test_object) - , 'Still works after moving the count_nulls extension' -); - -- Create extensions SELECT lives_ok( $$CREATE EXTENSION test_factory$$ From fae8f7c7c95f0ac8df661cf40b30295a78c84b6b Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Tue, 26 Aug 2025 16:46:29 -0500 Subject: [PATCH 12/22] Add functions to retrieve object info --- README.md | 8 +++++++ sql/object_reference.sql | 44 +++++++++++++++++++++++++++++++++++++ test/expected/base.out | 10 +++++---- test/expected/zzz_build.out | 2 ++ test/sql/base.sql | 15 +++++++++++++ 5 files changed, 75 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index c20af2c..01644fd 100644 --- a/README.md +++ b/README.md @@ -96,6 +96,14 @@ object__getsert_w_group_id( Same as `object__getsert()` but accepts a numeric `object_group_id` instead of group name. +### `object__describe(object_id int) RETURNS text` + +Returns a human-readable description of the object, matching the format of PostgreSQL's `pg_describe_object()` function. + +### `object__identity(object_id int) RETURNS record` + +Returns object identification information matching the format of PostgreSQL's `pg_identify_object()` function. Returns a record with columns: `type`, `schema`, `name`, `identity`. + ## Object Group Functions diff --git a/sql/object_reference.sql b/sql/object_reference.sql index 9b204e1..03b449c 100644 --- a/sql/object_reference.sql +++ b/sql/object_reference.sql @@ -731,7 +731,51 @@ $body$ , 'Add a foreign key from . to the object table.' , 'object_reference__dependency' ); +/* + * OBJECT INFO FUNCTIONS + */ +SELECT __object_reference.create_function( + 'object_reference.object__describe' + , $args$ + object_id int +$args$ + , 'text LANGUAGE sql' + , $body$ +SELECT pg_catalog.pg_describe_object( + o.classid, + o.objid, + o.objsubid +) +FROM _object_reference._object_oid o +WHERE o.object_id = $1 +$body$ + , 'Return a human-readable description of the object, matching pg_describe_object() format.' + , 'object_reference__usage' +); +SELECT __object_reference.create_function( + 'object_reference.object__identity' + , $args$ + object_id int + , OUT type text + , OUT schema text + , OUT name text + , OUT identity text +$args$ + , 'record LANGUAGE sql' + , $body$ +SELECT + i.type::text, + i.schema::text, + i.name::text, + i.identity::text +FROM _object_reference._object_oid o, + LATERAL pg_catalog.pg_identify_object(o.classid, o.objid, o.objsubid) i +WHERE o.object_id = $1 +$body$ + , 'Return object identification information matching pg_identify_object() format.' + , 'object_reference__usage' +); /* * OBJECT GETSERT */ diff --git a/test/expected/base.out b/test/expected/base.out index 9a52932..9e9af7a 100644 --- a/test/expected/base.out +++ b/test/expected/base.out @@ -1,10 +1,12 @@ \set ECHO none -1..7 +1..9 ok 1 - Role object_reference__dependency should be granted USAGE on schema _object_reference ok 2 - Role object_reference__dependency should be granted REFERENCES on table _object_reference.object ok 3 - CREATE TEMP TABLE test_object AS SELECT object_reference.object__getsert('table', 'test_table') AS object_id; ok 4 - Verify object_oid field is correct -ok 5 - Existing object works, provides correct ID -ok 6 - secondary may not be specified for table objects -ok 7 - CREATE EXTENSION test_factory +ok 5 - object__describe returns same result as pg_describe_object +ok 6 - object__identity returns same result as pg_identify_object +ok 7 - Existing object works, provides correct ID +ok 8 - secondary may not be specified for table objects +ok 9 - CREATE EXTENSION test_factory # TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/zzz_build.out b/test/expected/zzz_build.out index 93431c5..84aa402 100644 --- a/test/expected/zzz_build.out +++ b/test/expected/zzz_build.out @@ -47,6 +47,8 @@ psql:test/temp_load.not_sql:528: WARNING: I promise you will be sorry if you tr + + diff --git a/test/sql/base.sql b/test/sql/base.sql index 07c57a5..9652a4b 100644 --- a/test/sql/base.sql +++ b/test/sql/base.sql @@ -8,6 +8,7 @@ SELECT plan( 0 +1 -- schema +3 -- initial + +2 -- new functions +2 -- errors +1 -- create extensions ); @@ -35,6 +36,20 @@ SELECT is( , 'test_table'::regclass::oid , 'Verify object_oid field is correct' ); + +-- Test object__describe function +SELECT is( + object_reference.object__describe((SELECT object_id FROM test_object)) + , pg_catalog.pg_describe_object('pg_class'::regclass, 'test_table'::regclass, 0) + , 'object__describe returns same result as pg_describe_object' +); + +-- Test object__identity function +SELECT results_eq( + $$SELECT * FROM object_reference.object__identity((SELECT object_id FROM test_object))$$ + , $$SELECT type, schema, name, identity FROM pg_catalog.pg_identify_object('pg_class'::regclass, 'test_table'::regclass, 0)$$ + , 'object__identity returns same result as pg_identify_object' +); SELECT is( object_reference.object__getsert('table', 'test_table') , (SELECT object_id FROM test_object) From 96be1332b36787c85d88e9e67362069b12b6eced Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Tue, 26 Aug 2025 17:38:26 -0500 Subject: [PATCH 13/22] Disallow tracking objects in temp schemas --- sql/object_reference.sql | 10 ++++++++++ test/expected/base.out | 5 +++-- test/sql/base.sql | 11 ++++++++++- test/sql/object_group.sql | 22 +++++++++++----------- 4 files changed, 34 insertions(+), 14 deletions(-) diff --git a/sql/object_reference.sql b/sql/object_reference.sql index 03b449c..878009d 100644 --- a/sql/object_reference.sql +++ b/sql/object_reference.sql @@ -795,6 +795,7 @@ DECLARE r_object_v _object_reference._object_v; r_address record; + r_identity record; did_insert boolean := false; @@ -823,6 +824,15 @@ BEGIN ; END IF; + -- Refuse to track objects in temporary schemas + SELECT INTO r_identity * FROM pg_catalog.pg_identify_object(c_classid, objid, objsubid); + IF r_identity.schema IS NOT NULL AND (r_identity.schema LIKE 'pg_temp%' OR r_identity.schema LIKE 'pg_toast_temp%') THEN + RAISE 'cannot track temporary object' + USING DETAIL = format('object %s is in temporary schema %s', r_identity.identity, r_identity.schema) + , ERRCODE = 'feature_not_supported' + ; + END IF; + -- Ensure the object record exists SELECT INTO r_object_v * diff --git a/test/expected/base.out b/test/expected/base.out index 9e9af7a..5357054 100644 --- a/test/expected/base.out +++ b/test/expected/base.out @@ -1,5 +1,5 @@ \set ECHO none -1..9 +1..10 ok 1 - Role object_reference__dependency should be granted USAGE on schema _object_reference ok 2 - Role object_reference__dependency should be granted REFERENCES on table _object_reference.object ok 3 - CREATE TEMP TABLE test_object AS SELECT object_reference.object__getsert('table', 'test_table') AS object_id; @@ -8,5 +8,6 @@ ok 5 - object__describe returns same result as pg_describe_object ok 6 - object__identity returns same result as pg_identify_object ok 7 - Existing object works, provides correct ID ok 8 - secondary may not be specified for table objects -ok 9 - CREATE EXTENSION test_factory +ok 9 - temp objects are rejected +ok 10 - CREATE EXTENSION test_factory # TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/sql/base.sql b/test/sql/base.sql index 9652a4b..5fa9918 100644 --- a/test/sql/base.sql +++ b/test/sql/base.sql @@ -9,7 +9,7 @@ SELECT plan( +1 -- schema +3 -- initial +2 -- new functions - +2 -- errors + +3 -- errors (includes temp object test) +1 -- create extensions ); @@ -64,6 +64,15 @@ SELECT throws_ok( , 'secondary may not be specified for table objects' ); +-- Test temp object rejection +CREATE TEMP TABLE temp_test_table(); +SELECT throws_ok( + $$SELECT object_reference.object__getsert('table', 'temp_test_table')$$ + , '0A000' -- feature_not_supported + , 'cannot track temporary object' + , 'temp objects are rejected' +); + -- Create extensions SELECT lives_ok( $$CREATE EXTENSION test_factory$$ diff --git a/test/sql/object_group.sql b/test/sql/object_group.sql index 6304892..7ae5468 100644 --- a/test/sql/object_group.sql +++ b/test/sql/object_group.sql @@ -2,8 +2,8 @@ \i test/load.sql -CREATE TEMP TABLE test_table_1(col1 int, col2 int); -CREATE TEMP TABLE test_table_2(col1 int, col2 int); +CREATE TABLE object_group_test_table_1(col1 int, col2 int); +CREATE TABLE object_group_test_table_2(col1 int, col2 int); CREATE FUNCTION pg_temp.bogus_group( command_template text @@ -43,7 +43,7 @@ SELECT plan( ); SELECT lives_ok( - $$CREATE TEMP TABLE test_table_1_id AS SELECT * FROM object_reference.object__getsert('table', 'test_table_1')$$ + $$CREATE TEMP TABLE test_table_1_id AS SELECT * FROM object_reference.object__getsert('table', 'object_group_test_table_1')$$ , 'Register test table 1' ); @@ -102,37 +102,37 @@ SELECT lives_ok( -- object__getsert SELECT throws_ok( -- Can't use helper here - $$CREATE TEMP TABLE col1_id AS SELECT * FROM object_reference.object__getsert('table column', 'test_table_1', 'col1', 'absurd group name used only for testing purposes ktxbye')$$ + $$CREATE TEMP TABLE col1_id AS SELECT * FROM object_reference.object__getsert('table column', 'object_group_test_table_1', 'col1', 'absurd group name used only for testing purposes ktxbye')$$ , 'P0002' , 'object group "absurd group name used only for testing purposes ktxbye" does not exist' , 'object__getsert with bogus group name' ); /* TODO SELECT throws_ok( -- Can't use helper here - $$CREATE TEMP TABLE col1_id AS SELECT * FROM object_reference.object__getsert_w_group_id('table column', 'test_table_1', 'col1', -1)$$ + $$CREATE TEMP TABLE col1_id AS SELECT * FROM object_reference.object__getsert_w_group_id('table column', 'object_group_test_table_1', 'col1', -1)$$ , '' , '' , 'object__getsert with bogus group id' ); */ SELECT lives_ok( - $$CREATE TEMP TABLE col1_id AS SELECT * FROM object_reference.object__getsert('table column', 'test_table_1', 'col1', 'object reference test group')$$ + $$CREATE TEMP TABLE col1_id AS SELECT * FROM object_reference.object__getsert('table column', 'object_group_test_table_1', 'col1', 'object reference test group')$$ , 'Register test column' ); SELECT lives_ok( - $$CREATE TEMP TABLE test_table_2_id AS SELECT * FROM object_reference.object__getsert('table', 'test_table_2', object_group_name := 'object reference test group')$$ + $$CREATE TEMP TABLE test_table_2_id AS SELECT * FROM object_reference.object__getsert('table', 'object_group_test_table_2', object_group_name := 'object reference test group')$$ , 'Register test table 2' ); -- Drop tests SELECT throws_ok( - $$ALTER TABLE test_table_1 DROP COLUMN col1$$ + $$ALTER TABLE object_group_test_table_1 DROP COLUMN col1$$ , '23503' , NULL -- current error is crap anyway , 'Dropping col1 fails' ); SELECT throws_ok( - $$DROP TABLE test_table_2$$ + $$DROP TABLE object_group_test_table_2$$ , '23503' , NULL -- current error is crap anyway , 'Dropping test_table_2 fails' @@ -144,7 +144,7 @@ SELECT throws_ok( , 'Removing test group fails' ); SELECT lives_ok( - $$ALTER TABLE test_table_1 DROP COLUMN col2$$ + $$ALTER TABLE object_group_test_table_1 DROP COLUMN col2$$ , 'Dropping col2 works' ); @@ -178,7 +178,7 @@ SELECT lives_ok( , '__object__remove() for test_table_1 works' ); SELECT throws_ok( - $$DROP TABLE test_table_1$$ -- Should not work because column is still registered + $$DROP TABLE object_group_test_table_1$$ -- Should not work because column is still registered , '23503' , NULL -- current error is crap anyway , 'Dropping test_table_1 fails' From bb40896086b86c8bb09c41dfdcc2eca8eb7c19d5 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Wed, 27 Aug 2025 15:37:04 -0500 Subject: [PATCH 14/22] Add object__cleanup() --- README.md | 14 +++++++++++++- sql/object_reference.sql | 34 ++++++++++++++++++++++++++++++++++ test/sql/object_group.sql | 22 ++++++++++++++++++++++ 3 files changed, 69 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 01644fd..9bb1a6f 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ GRANT object_reference__usage TO role1, role2, role3; There are two roles associated with the extension: - `object_reference__usage` - Allows using the extension's public API functions. Grant this to users who need to track and manage object references. -- `object_reference__dependency` - Special role for creating foreign key dependencies to the internal object table. Only grant this to schemas/applications that need to create referential integrity constraints against the object tracking system. +- `object_reference__dependency` - Special role for creating foreign key dependencies to the internal object table. Only grant this to schemas/applications that need to create referential integrity constraints against the object tracking system. See [Referring to Objects](#referring-to-objects) below. Most users will only need `object_reference__usage`. The `object_reference__dependency` role is only needed when using `object__dependency__add()` or `object_group__dependency__add()` functions. @@ -53,6 +53,10 @@ Objects can be organized into named groups for logical organization. This is par The framework can automatically capture newly created objects during DDL operations and add them to a specified object group. This is implemented using PostgreSQL event triggers. +## Referring to Objects + +The framework supports removing objects that are no longer referenced. Because of this, *it is critical that any tables that store an `object_id` are registered with `object__dependency__add()`*. + # API Note that all API routines live in the `object_reference` schema. Objects in the `_object_reference` schema are considered internal-only and should not be accessed directly. @@ -247,6 +251,14 @@ post_restore() RETURNS void Ensures all object references are correct after a database restore. Run this after restoring from backup to fix any OID mismatches. +### `object__cleanup(object_id int) RETURNS void` + +```sql +object__cleanup(object_id int) RETURNS void +``` + +Attempts to delete an object from the tracking system. Silently returns if the object is still referenced by other tables (via foreign keys). This function is automatically called when objects are removed from object groups. + ### Object Type Information Functions **Get lists of unsupported/untested object types:** diff --git a/sql/object_reference.sql b/sql/object_reference.sql index 878009d..fcfc045 100644 --- a/sql/object_reference.sql +++ b/sql/object_reference.sql @@ -528,6 +528,23 @@ CREATE TABLE _object_reference.object_group__object( ); SELECT __object_reference.safe_dump('_object_reference.object_group__object'); +-- Trigger function for automatic object cleanup +SELECT __object_reference.create_function( + '_object_reference._object_group__object__cleanup_trigger' + , '' + , 'trigger LANGUAGE plpgsql' + , $body$ +BEGIN + PERFORM object_reference.object__cleanup(OLD.object_id); + RETURN OLD; +END +$body$ + , 'Trigger function to automatically attempt cleanup of objects when removed from groups.' +); +CREATE TRIGGER object_group__object__cleanup + AFTER DELETE ON _object_reference.object_group__object + FOR EACH ROW + EXECUTE FUNCTION _object_reference._object_group__object__cleanup_trigger(); -- __get SELECT __object_reference.create_function( 'object_reference.object_group__get' @@ -776,6 +793,23 @@ $body$ , 'Return object identification information matching pg_identify_object() format.' , 'object_reference__usage' ); +SELECT __object_reference.create_function( + 'object_reference.object__cleanup' + , $args$ + object_id int +$args$ + , 'void LANGUAGE plpgsql' + , $body$ +BEGIN + DELETE FROM _object_reference.object WHERE object.object_id = object__cleanup.object_id; +EXCEPTION WHEN foreign_key_violation THEN + -- Object is still referenced elsewhere, ignore the error + NULL; +END +$body$ + , 'Attempts to delete an object from the tracking system. Silently returns if the object is still referenced by other tables.' + , 'object_reference__usage' +); /* * OBJECT GETSERT */ diff --git a/test/sql/object_group.sql b/test/sql/object_group.sql index 7ae5468..bef342a 100644 --- a/test/sql/object_group.sql +++ b/test/sql/object_group.sql @@ -40,6 +40,8 @@ SELECT plan( +4 -- __object__remove +4 + 2 -- __remove + +4 -- cleanup tests + +1 -- final group removal (there was always an extra test) ); SELECT lives_ok( @@ -211,6 +213,26 @@ SELECT lives_ok( ) , '__object__remove() for test_table_2 works' ); + +-- Test automatic cleanup via trigger +SELECT lives_ok( + $$CREATE TEMP TABLE cleanup_test_id AS SELECT * FROM object_reference.object__getsert('table', 'object_group_test_table_1', object_group_name := 'object reference test group')$$ + , 'Add test table back to group for cleanup test' +); +SELECT ok( + EXISTS(SELECT 1 FROM _object_reference.object WHERE object_id = (SELECT object__getsert FROM cleanup_test_id)) + , 'Object exists before cleanup test' +); +SELECT lives_ok( + $$DELETE FROM _object_reference.object_group__object WHERE object_id = (SELECT object__getsert FROM cleanup_test_id)$$ + , 'Remove from group triggers automatic cleanup attempt' +); +-- Object should be deleted because it's no longer in any group and trigger calls cleanup +SELECT ok( + NOT EXISTS(SELECT 1 FROM _object_reference.object WHERE object_id = (SELECT object__getsert FROM cleanup_test_id)) + , 'Object was automatically cleaned up after group removal' +); + SELECT lives_ok( $$SELECT object_reference.object_group__remove('object reference test group')$$ , 'Removing empty group works' From f50bbc677aeba936e6740f20ae5e1e733ce3930f Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Wed, 27 Aug 2025 15:40:00 -0500 Subject: [PATCH 15/22] Fix tests --- test/expected/object_group.out | 9 ++++++--- test/expected/zzz_build.out | 2 ++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/test/expected/object_group.out b/test/expected/object_group.out index d286833..e670106 100644 --- a/test/expected/object_group.out +++ b/test/expected/object_group.out @@ -1,5 +1,5 @@ \set ECHO none -1..24 +1..29 ok 1 - Register test table 1 ok 2 - object_group__create(...) for group name that is too long throws error ok 3 - object_group__create('object reference test group') @@ -24,6 +24,9 @@ ok 21 - object_group__object__add(...)for missing group throws error ok 22 - Removing group with items in it fails ok 23 - __object__remove() for col1 works ok 24 - __object__remove() for test_table_2 works -ok 25 - Removing empty group works -# Looks like you planned 24 tests but ran 25 +ok 25 - Add test table back to group for cleanup test +ok 26 - Object exists before cleanup test +ok 27 - Remove from group triggers automatic cleanup attempt +ok 28 - Object was automatically cleaned up after group removal +ok 29 - Removing empty group works # TRANSACTION INTENTIONALLY LEFT OPEN! diff --git a/test/expected/zzz_build.out b/test/expected/zzz_build.out index 84aa402..ff07ade 100644 --- a/test/expected/zzz_build.out +++ b/test/expected/zzz_build.out @@ -49,6 +49,8 @@ psql:test/temp_load.not_sql:528: WARNING: I promise you will be sorry if you tr + + From f02670ec97e597697a5aa802b69f5fe46eebeaea Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Wed, 27 Aug 2025 16:44:52 -0500 Subject: [PATCH 16/22] Remove references to count_nulls Also, add missing cat_tools requirement to META.in --- META.in.json | 1 + Makefile | 6 +----- object_reference.control | 2 +- test/deps.sql | 1 - test/load.sql | 1 - test/sql/zzz_build.sql | 1 - 6 files changed, 3 insertions(+), 9 deletions(-) diff --git a/META.in.json b/META.in.json index d01dc70..9a541be 100644 --- a/META.in.json +++ b/META.in.json @@ -81,6 +81,7 @@ }, "runtime": { "requires": { + "cat_tools": 0, "plpgsql": 0 } }, diff --git a/Makefile b/Makefile index f5d2ff7..75543b6 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ include pgxntool/base.mk testdeps: $(wildcard test/*.sql test/helpers/*.sql) # Be careful not to include directories in this testdeps: test_factory -install: cat_tools count_nulls +install: cat_tools test: dump_test extra_clean += $(wildcard test/dump/*.log) @@ -15,10 +15,6 @@ cat_tools: $(DESTDIR)$(datadir)/extension/cat_tools.control $(DESTDIR)$(datadir)/extension/cat_tools.control: pgxn install --unstable cat_tools -.PHONY: count_nulls -count_nulls: $(DESTDIR)$(datadir)/extension/count_nulls.control -$(DESTDIR)$(datadir)/extension/count_nulls.control: - pgxn install --unstable count_nulls .PHONY: test_factory test_factory: $(DESTDIR)$(datadir)/extension/test_factory.control diff --git a/object_reference.control b/object_reference.control index 8200390..c03010e 100644 --- a/object_reference.control +++ b/object_reference.control @@ -2,4 +2,4 @@ comment = 'Provides reference IDs for database objects' default_version = '0.1.0' relocatable = false schema = 'object_reference' -requires = 'cat_tools, count_nulls' +requires = 'cat_tools' diff --git a/test/deps.sql b/test/deps.sql index e1a53c8..d13a016 100644 --- a/test/deps.sql +++ b/test/deps.sql @@ -4,6 +4,5 @@ /* * Normally these should be loaded by the cascade! -CREATE EXTENSION IF NOT EXISTS count_nulls; CREATE EXTENSION IF NOT EXISTS cat_tools; */ diff --git a/test/load.sql b/test/load.sql index 0f1c6be..f1b267f 100644 --- a/test/load.sql +++ b/test/load.sql @@ -1,6 +1,5 @@ \i test/pgxntool/setup.sql --- Need to add count_nulls back into the path SET search_path = tap, public; -- Don't use IF NOT EXISTS here; we want to ensure we always have the latest code diff --git a/test/sql/zzz_build.sql b/test/sql/zzz_build.sql index 4da65b4..4fc0628 100644 --- a/test/sql/zzz_build.sql +++ b/test/sql/zzz_build.sql @@ -6,7 +6,6 @@ -- Loads deps, but not extension itself \i test/pgxntool/setup.sql -CREATE EXTENSION IF NOT EXISTS count_nulls; CREATE EXTENSION IF NOT EXISTS cat_tools; CREATE SCHEMA object_reference; From 1d964c715030c791baf426a1f234e8792c4f3e34 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Wed, 27 Aug 2025 16:44:52 -0500 Subject: [PATCH 17/22] Remove references to count_nulls Also, add missing cat_tools requirement to META.in --- META.in.json | 1 + META.json | 1 + Makefile | 6 +----- object_reference.control | 2 +- sql/object_reference.sql | 2 +- test/deps.sql | 1 - test/load.sql | 1 - test/sql/zzz_build.sql | 1 - 8 files changed, 5 insertions(+), 10 deletions(-) diff --git a/META.in.json b/META.in.json index d01dc70..9a541be 100644 --- a/META.in.json +++ b/META.in.json @@ -81,6 +81,7 @@ }, "runtime": { "requires": { + "cat_tools": 0, "plpgsql": 0 } }, diff --git a/META.json b/META.json index 0512fbb..c56fae1 100644 --- a/META.json +++ b/META.json @@ -79,6 +79,7 @@ }, "runtime": { "requires": { + "cat_tools": 0, "plpgsql": 0 } }, diff --git a/Makefile b/Makefile index f5d2ff7..75543b6 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ include pgxntool/base.mk testdeps: $(wildcard test/*.sql test/helpers/*.sql) # Be careful not to include directories in this testdeps: test_factory -install: cat_tools count_nulls +install: cat_tools test: dump_test extra_clean += $(wildcard test/dump/*.log) @@ -15,10 +15,6 @@ cat_tools: $(DESTDIR)$(datadir)/extension/cat_tools.control $(DESTDIR)$(datadir)/extension/cat_tools.control: pgxn install --unstable cat_tools -.PHONY: count_nulls -count_nulls: $(DESTDIR)$(datadir)/extension/count_nulls.control -$(DESTDIR)$(datadir)/extension/count_nulls.control: - pgxn install --unstable count_nulls .PHONY: test_factory test_factory: $(DESTDIR)$(datadir)/extension/test_factory.control diff --git a/object_reference.control b/object_reference.control index 8200390..c03010e 100644 --- a/object_reference.control +++ b/object_reference.control @@ -2,4 +2,4 @@ comment = 'Provides reference IDs for database objects' default_version = '0.1.0' relocatable = false schema = 'object_reference' -requires = 'cat_tools, count_nulls' +requires = 'cat_tools' diff --git a/sql/object_reference.sql b/sql/object_reference.sql index fcfc045..e381023 100644 --- a/sql/object_reference.sql +++ b/sql/object_reference.sql @@ -56,7 +56,7 @@ CREATE FUNCTION __object_reference.create_function( , grants text DEFAULT NULL ) RETURNS void LANGUAGE plpgsql AS $body$ DECLARE - c_clean_args text := cat_tools.function__arg_types_text(args); + c_clean_args text := cat_tools.routine__parse_arg_types_text(args); create_template CONSTANT text := $template$ CREATE OR REPLACE FUNCTION %s( diff --git a/test/deps.sql b/test/deps.sql index e1a53c8..d13a016 100644 --- a/test/deps.sql +++ b/test/deps.sql @@ -4,6 +4,5 @@ /* * Normally these should be loaded by the cascade! -CREATE EXTENSION IF NOT EXISTS count_nulls; CREATE EXTENSION IF NOT EXISTS cat_tools; */ diff --git a/test/load.sql b/test/load.sql index 0f1c6be..f1b267f 100644 --- a/test/load.sql +++ b/test/load.sql @@ -1,6 +1,5 @@ \i test/pgxntool/setup.sql --- Need to add count_nulls back into the path SET search_path = tap, public; -- Don't use IF NOT EXISTS here; we want to ensure we always have the latest code diff --git a/test/sql/zzz_build.sql b/test/sql/zzz_build.sql index 4da65b4..4fc0628 100644 --- a/test/sql/zzz_build.sql +++ b/test/sql/zzz_build.sql @@ -6,7 +6,6 @@ -- Loads deps, but not extension itself \i test/pgxntool/setup.sql -CREATE EXTENSION IF NOT EXISTS count_nulls; CREATE EXTENSION IF NOT EXISTS cat_tools; CREATE SCHEMA object_reference; From 2fa541d5113cc1b2e033615ac58616b34afdb0c7 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Tue, 11 Nov 2025 16:02:54 -0600 Subject: [PATCH 18/22] Get more debug output on a test failure --- .github/workflows/ci.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3183031..bb674ff 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,3 +15,10 @@ jobs: uses: actions/checkout@v4 - name: Test on PostgreSQL ${{ matrix.pg }} run: make test PGUSER=postgres + - name: Output failed results + run: | + if [ -e test/regression.out ]; then + ls -la /etc/postgresql/${{ matrix.pg }}/ + ls -la /etc/postgresql/${{ matrix.pg }}/test/ + cat /etc/postgresql/${{ matrix.pg }}/test/log + fi From 23d3bf97e63fa72efc398a0328dc2bb6ae0c4a41 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Tue, 11 Nov 2025 16:07:01 -0600 Subject: [PATCH 19/22] Have test step continue on failure --- .github/workflows/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bb674ff..50f52de 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,10 +15,12 @@ jobs: uses: actions/checkout@v4 - name: Test on PostgreSQL ${{ matrix.pg }} run: make test PGUSER=postgres + continue-on-error: true - name: Output failed results run: | if [ -e test/regression.out ]; then ls -la /etc/postgresql/${{ matrix.pg }}/ ls -la /etc/postgresql/${{ matrix.pg }}/test/ cat /etc/postgresql/${{ matrix.pg }}/test/log + exit 1 fi From 40625c2a3552315a5f5e3ddbfc5a7ef47c08297b Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Tue, 11 Nov 2025 16:09:28 -0600 Subject: [PATCH 20/22] Try /var/log for logfile --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 50f52de..9fb30d8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,8 +19,8 @@ jobs: - name: Output failed results run: | if [ -e test/regression.out ]; then - ls -la /etc/postgresql/${{ matrix.pg }}/ - ls -la /etc/postgresql/${{ matrix.pg }}/test/ - cat /etc/postgresql/${{ matrix.pg }}/test/log + ls -la /var/log + ls -la /var/log/postgresql/ + cat /var/log/postgresql/postgresql-${{ matrix.pg }}-test.log exit 1 fi From 81f3dddf4d62fb13e5fef912eeeb25377ef71408 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Thu, 22 Jan 2026 15:37:25 -0600 Subject: [PATCH 21/22] Squashed 'pgxntool/' changes from e9c24de..54793a3 54793a3 Merge branch 'master' into upstream/stable ab7f6e2 Stamp 1.0.0 3a571ba Add pg_tle support and modernize test infrastructure (#11) b96ea6d Add support for Claude code; build and doc improvements (#9) bed3604 Fix pg_regress on versions > 12 (#5) (#6) git-subtree-dir: pgxntool git-subtree-split: 54793a39251290657767816d23b45d6297f3a671 --- .claude/commands/commit.md | 78 ++++ .claude/settings.json | 19 + .gitattributes | 3 + .gitignore | 1 + CLAUDE.md | 255 +++++++++++ HISTORY.asc | 13 +- LICENSE | 2 +- README.asc | 225 ++++++++- README.html | 912 ++++++++++++++++++++++++++++--------- _.gitignore | 10 +- base.mk | 137 +++++- build_meta.sh | 30 +- control.mk.sh | 90 ++++ lib.sh | 40 ++ make_results.sh | 28 ++ meta.mk.sh | 96 ++-- pgtle.sh | 849 ++++++++++++++++++++++++++++++++++ pgtle_versions.md | 47 ++ setup.sh | 4 + 19 files changed, 2519 insertions(+), 320 deletions(-) create mode 100644 .claude/commands/commit.md create mode 100644 .claude/settings.json create mode 100644 CLAUDE.md create mode 100755 control.mk.sh create mode 100644 lib.sh create mode 100755 make_results.sh create mode 100755 pgtle.sh create mode 100644 pgtle_versions.md diff --git a/.claude/commands/commit.md b/.claude/commands/commit.md new file mode 100644 index 0000000..5ddbd74 --- /dev/null +++ b/.claude/commands/commit.md @@ -0,0 +1,78 @@ +--- +description: Create a git commit following project standards and safety protocols +allowed-tools: Bash(git status:*), Bash(git log:*), Bash(git add:*), Bash(git diff:*), Bash(git commit:*), Bash(make test:*) +--- + +# commit + +Create a git commit following all project standards and safety protocols for pgxntool-test. + +**CRITICAL REQUIREMENTS:** + +1. **Git Safety**: Never update `git config`, never force push to `main`/`master`, never skip hooks unless explicitly requested + +2. **Commit Attribution**: Do NOT add "Generated with Claude Code" to commit message body. The standard Co-Authored-By trailer is acceptable per project CLAUDE.md. + +3. **Testing**: ALL tests must pass before committing: + - Run `make test` + - Check the output carefully for any "not ok" lines + - Count passing vs total tests + - **If ANY tests fail: STOP. Do NOT commit. Ask the user what to do.** + - There is NO such thing as an "acceptable" failing test + - Do NOT rationalize failures as "pre-existing" or "unrelated" + +**WORKFLOW:** + +1. Run in parallel: `git status`, `git diff --stat`, `git log -10 --oneline` + +2. Check test status - THIS IS MANDATORY: + - Run `make test 2>&1 | tee /tmp/test-output.txt` + - Check for failing tests: `grep "^not ok" /tmp/test-output.txt` + - If ANY tests fail: STOP immediately and inform the user + - Only proceed if ALL tests pass + +3. Analyze changes and draft concise commit message following this repo's style: + - Look at `git log -10 --oneline` to match existing style + - Be factual and direct (e.g., "Fix BATS dist test to create its own distribution") + - Focus on "why" when it adds value, otherwise just describe "what" + - List items in roughly decreasing order of impact + - Keep related items grouped together + - **In commit messages**: Wrap all code references in backticks - filenames, paths, commands, function names, variables, make targets, etc. + - Examples: `helpers.bash`, `make test-recursion`, `setup_sequential_test()`, `TEST_REPO`, `.envs/`, `01-meta.bats` + - Prevents markdown parsing issues and improves clarity + +4. **PRESENT the proposed commit message to the user and WAIT for approval before proceeding** + +5. After receiving approval, stage changes appropriately using `git add` + +6. **VERIFY staged files with `git status`**: + - If user did NOT specify a subset: Confirm ALL modified/untracked files are staged + - If user specified only certain files: Confirm ONLY those files are staged + - STOP and ask user if staging doesn't match intent + +7. After verification, commit using `HEREDOC` format: +```bash +git commit -m "$(cat <<'EOF' +Subject line (imperative mood, < 72 chars) + +Additional context if needed, wrapped at 72 characters. + +Co-Authored-By: Claude +EOF +)" +``` + +8. Run `git status` after commit to verify success + +9. If pre-commit hook modifies files: Check authorship (`git log -1 --format='%an %ae'`) and branch status, then amend if safe or create new commit + +**REPOSITORY CONTEXT:** + +This is pgxntool-test, a test harness for the pgxntool framework. Key facts: +- Tests live in `tests/` directory +- `.envs/` contains test environments (gitignored) + +**RESTRICTIONS:** +- DO NOT push unless explicitly asked +- DO NOT commit files with actual secrets (`.env`, `credentials.json`, etc.) +- Never use `-i` flags (`git commit -i`, `git rebase -i`, etc.) diff --git a/.claude/settings.json b/.claude/settings.json new file mode 100644 index 0000000..e7bf5a9 --- /dev/null +++ b/.claude/settings.json @@ -0,0 +1,19 @@ +{ + "permissions": { + "allow": [ + "Bash(cat:*)", + "Bash(make test:*)", + "Bash(tee:*)", + "Bash(echo:*)", + "Bash(git show:*)", + "Bash(git log:*)", + "Bash(ls:*)", + "Bash(find:*)", + "Bash(git checkout:*)", + "Bash(head:*)" + ], + "additionalDirectories": [ + "../pgxntool-test/" + ] + } +} diff --git a/.gitattributes b/.gitattributes index c602ea0..a94d824 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,7 @@ .gitattributes export-ignore +.claude/ export-ignore +*.md export-ignore +.DS_Store export-ignore *.asc export-ignore *.adoc export-ignore *.html export-ignore diff --git a/.gitignore b/.gitignore index a01ee28..5ffb236 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ .*.swp +.claude/*.local.json diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..d2ea214 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,255 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Git Commit Guidelines + +**IMPORTANT**: When creating commit messages, do not attribute commits to yourself (Claude). Commit messages should reflect the work being done without AI attribution in the message body. The standard Co-Authored-By trailer is acceptable. + +## Critical: What This Repo Actually Is + +**pgxntool is NOT a standalone project.** It is a meta-framework that exists ONLY to be embedded into PostgreSQL extension projects via `git subtree`. This repo cannot be built, tested, or run directly. + +**Think of it like this**: pgxntool is to PostgreSQL extensions what a Makefile template library is to C projects - it's infrastructure code that gets copied into other projects, not a project itself. + +## Critical: Directory Purity - NO Temporary Files + +**This directory contains ONLY files that get embedded into extension projects.** When extension developers run `git subtree add`, they pull the entire pgxntool directory into their project. + +**ABSOLUTE RULE**: NO temporary files, scratch work, or development tools may be added to this directory. + +**Examples of what NEVER belongs here:** +- Temporary files (scratch notes, test output, debugging artifacts) +- Development scripts or tools (these go in pgxntool-test/) +- Planning documents (PLAN-*.md files go in pgxntool-test/) +- Any file you wouldn't want in every extension project that uses pgxntool + +**CLAUDE.md exception**: CLAUDE.md exists here for AI assistant guidance, but is excluded from distributions via `.gitattributes export-ignore`. Same with `.claude/` directory. + +**Why this matters**: Any file you add here will be pulled into hundreds of extension projects via git subtree. Keep this directory lean and clean. + +## Development Workflow: Work from pgxntool-test + +**CRITICAL**: All development work on pgxntool should be done from the pgxntool-test repository, NOT from this repository. + +**For complete development workflow documentation, see:** +https://github.com/Postgres-Extensions/pgxntool-test + +## Two-Repository Development Pattern + +This codebase uses a two-repository pattern: + +1. **pgxntool/** (this repo) - The framework code that gets embedded into extension projects +2. **pgxntool-test** - The test harness that validates pgxntool functionality + +**For development and testing workflow, see:** +https://github.com/Postgres-Extensions/pgxntool-test + +## How Extension Developers Use pgxntool + +Extension projects include pgxntool via git subtree: + +```bash +git subtree add -P pgxntool --squash git@github.com:decibel/pgxntool.git release +pgxntool/setup.sh +``` + +After setup, their Makefile typically contains just: +```makefile +include pgxntool/base.mk +``` + +## Architecture: Two-Phase Build System + +### Phase 1: Meta Generation (`build_meta.sh`) +- Processes `META.in.json` (template with placeholders/empty values) +- Strips out X_comment fields and empty values +- Produces clean `META.json` + +### Phase 2: Variable Extraction (`meta.mk.sh`) +- Parses `META.json` using `JSON.sh` (a bash-based JSON parser) +- Generates `meta.mk` with Make variables: + - `PGXN` - distribution name + - `PGXNVERSION` - version number + - `EXTENSIONS` - list of extensions provided + - `EXTENSION_*_VERSION` - per-extension versions + - `EXTENSION_VERSION_FILES` - auto-generated versioned SQL files +- `base.mk` includes `meta.mk` via `-include` + +### The Magic of base.mk + +`base.mk` provides a complete PGXS-based build system: +- Auto-detects extension SQL files in `sql/` +- Auto-detects C modules in `src/*.c` +- Auto-detects tests in `test/sql/*.sql` +- Auto-generates versioned extension files (`extension--version.sql`) +- Handles Asciidoc → HTML conversion +- Integrates with PGXN distribution format +- Manages git tagging and release packaging + +## File Structure for Consumer Projects + +Projects using pgxntool follow this layout: +``` +project/ +├── Makefile # include pgxntool/base.mk +├── META.in.json # Template metadata (customize for your extension) +├── META.json # Auto-generated from META.in.json +├── extension.control # Standard PostgreSQL control file +├── pgxntool/ # This repo, embedded via git subtree +├── sql/ +│ └── extension.sql # Base extension SQL +├── src/ # Optional C code (*.c files) +├── test/ +│ ├── deps.sql # Load extension and test dependencies +│ ├── sql/*.sql # Test SQL files +│ └── expected/*.out # Expected test outputs +└── doc/ # Optional docs (*.adoc, *.asciidoc) +``` + +## Commands for Extension Developers (End Users) + +These are the commands extension developers use (documented for context): + +```bash +make # Build extension (generates versioned SQL, docs) +make test # Full test: testdeps → install → installcheck → show diffs +make results # Run tests and update expected output files +make html # Generate HTML from Asciidoc sources +make tag # Create git branch for current META.json version +make dist # Create PGXN .zip (auto-tags, places in ../) +make pgtle # Generate pg_tle registration SQL (see pg_tle Support below) +make check-pgtle # Check pg_tle installation and report version +make install-pgtle # Install pg_tle registration SQL files into database +make pgxntool-sync # Update to latest pgxntool via git subtree pull +``` + +## Testing with pgxntool + +### Critical Testing Rules + +**NEVER use `make installcheck` directly**. Always use `make test` instead. The `make test` target ensures: +- Clean builds before testing +- Proper test isolation +- Correct test dependency installation +- Proper cleanup and result comparison + +**Database Connection Requirement**: PostgreSQL must be running before executing `make test`. If you get connection errors (e.g., "could not connect to server"), stop and ask the user to start PostgreSQL. + +**Claude Code MUST NEVER run `make results`**. This target updates test expected output files and requires manual human verification of test changes before execution. + +**Claude Code MUST NEVER modify files in `test/expected/`**. These are expected test outputs that define correct behavior and must only be updated through the `make results` workflow. + +The workflow is: +1. Human runs `make test` and examines diffs +2. Human manually verifies changes are correct +3. Human manually runs `make results` to update expected files + +### Test Output Mechanics + +pgxntool uses PostgreSQL's pg_regress test framework: +- **Actual test output**: Written to `test/results/` directory +- **Expected output**: Stored in `test/expected/` directory +- **Test comparison**: pg_regress compares actual vs expected and generates diffs; `make test` displays them +- **Updating expectations**: `make results` copies `test/results/` → `test/expected/` + +When tests fail, examine the diff output carefully. The actual test output in `test/results/` shows what your code produced, while `test/expected/` shows what was expected. + +## Key Implementation Details + +### PostgreSQL Version Handling +- `MAJORVER` = version × 10 (e.g., 9.6 → 96, 13 → 130) +- Tests use `--load-language=plpgsql` for versions < 13 +- Version detection via `pg_config --version` + +### Test System (pg_regress based) +- Tests in `test/sql/*.sql`, outputs compared to `test/expected/*.out` +- Setup via `test/pgxntool/setup.sql` (loads pgTap and deps.sql) +- `.IGNORE: installcheck` allows `make test` to handle errors (show diffs, then exit with error status) +- `make results` updates expected outputs after test runs + +### Document Generation +- Auto-detects `asciidoctor` or `asciidoc` +- Generates HTML from `*.adoc` and `*.asciidoc` in `$(DOC_DIRS)` +- HTML required for `make dist`, optional for `make install` +- Template-based rules via `ASCIIDOC_template` + +### Distribution Packaging +- `make dist` creates `../PGXN-VERSION.zip` +- Always creates git branch tag matching version +- Uses `git archive` to package +- Validates repo is clean before tagging + +### Subtree Sync Support +- `make pgxntool-sync` pulls latest release +- Multiple sync targets: release, stable, local variants +- Uses `git subtree pull --squash` +- Requires clean repo (no uncommitted changes) + +### pg_tle Support + +pgxntool can generate pg_tle (Trusted Language Extensions) registration SQL for deploying extensions in AWS RDS/Aurora without filesystem access. + +**Usage:** `make pgtle` or `make pgtle PGTLE_VERSION=1.5.0+` + +**Output:** `pg_tle/{version_range}/{extension}.sql` + +**For version range details and API compatibility boundaries, see:** `pgtle_versions.md` + +**Installation targets:** + +- `make check-pgtle` - Checks if pg_tle is installed and reports the version. Reports version from `pg_extension` if extension has been created, or newest available version from `pg_available_extension_versions` if available but not created. Errors if pg_tle not available in cluster. Assumes `PG*` environment variables are configured. + +- `make install-pgtle` - Auto-detects pg_tle version and installs appropriate registration SQL files. Updates or creates pg_tle extension as needed. Determines which version range files to install based on detected version. Runs all generated SQL files via `psql` to register extensions with pg_tle. Assumes `PG*` environment variables are configured. + +**Version notation:** +- `X.Y.Z+` means >= X.Y.Z +- `X.Y.Z-A.B.C` means >= X.Y.Z and < A.B.C (note boundary) + +**Key implementation details:** +- Script: `pgxntool/pgtle-wrap.sh` (bash) +- Parses `.control` files for metadata (NOT META.json) +- Fixed delimiter: `$_pgtle_wrap_delimiter_$` (validated not in source) +- Each output file contains ALL versions and ALL upgrade paths +- Multi-extension support (multiple .control files) +- Output directory `pg_tle/` excluded from git +- Depends on `make all` to ensure versioned SQL files exist first +- Only processes versioned files (`sql/{ext}--{version}.sql`), not base files + +**SQL file handling:** +- **Version files** (`sql/{ext}--{version}.sql`): Generated automatically by `make all` from base `sql/{ext}.sql` file +- **Upgrade scripts** (`sql/{ext}--{v1}--{v2}.sql`): Created manually by users when adding new extension versions +- The script ensures the default_version file exists if the base file exists (creates it from base file if missing) +- All version files and upgrade scripts are discovered and included in the generated pg_tle registration SQL + +**Dependencies:** +Generated files depend on: +- Control file (metadata source) +- All SQL files (sql/{ext}--*.sql) - must run `make all` first +- Generator script itself + +**Limitations:** +- No C code support (pg_tle requires trusted languages only) +- PostgreSQL 14.5+ required (pg_tle not available on earlier versions) + +## Critical Gotchas + +1. **Empty Variables**: If `DOCS` or `MODULES` is empty, base.mk sets to empty to prevent PGXS errors +2. **testdeps Pattern**: Never add recipes to `testdeps` - create separate target and make it a prerequisite +3. **META.json is Generated**: Always edit `META.in.json`, never `META.json` directly +4. **Control File Versions**: No automatic validation that `.control` matches `META.json` version +5. **PGXNTOOL_NO_PGXS_INCLUDE**: Setting this skips PGXS inclusion (for special scenarios) +6. **Distribution Placement**: `.zip` files go in parent directory (`../`) to avoid repo clutter + +## Scripts + +- **setup.sh** - Initializes pgxntool in a new extension project (copies templates, creates directories) +- **build_meta.sh** - Strips empty fields from META.in.json to create META.json +- **meta.mk.sh** - Parses META.json via JSON.sh and generates meta.mk with Make variables +- **JSON.sh** - Third-party bash JSON parser (MIT licensed) +- **safesed** - Utility for safe sed operations + +## Related Repositories + +- **pgxntool-test** - Test harness for validating pgxntool functionality: https://github.com/Postgres-Extensions/pgxntool-test +- Never produce any kind of metrics or estimates unless you have data to back them up. If you do have data you MUST reference it. \ No newline at end of file diff --git a/HISTORY.asc b/HISTORY.asc index 9cb793b..bedc0b5 100644 --- a/HISTORY.asc +++ b/HISTORY.asc @@ -1,5 +1,14 @@ -STABLE ------- +1.0.0 +----- +== Fix broken multi-extension support +Prior to this fix, distributions with multiple extensions or extensions with versions different from the PGXN distribution version were completely broken. Extension versions are now correctly read from each `.control` file's `default_version` instead of using META.json's distribution version. + +== Add pg_tle support +New `make pgtle` target generates pg_tle registration SQL for extensions. Supports pg_tle version ranges (1.0.0-1.4.0, 1.4.0-1.5.0, 1.5.0+) with appropriate API calls for each range. See README for usage. + +== Use git tags for distribution versioning +The `tag` and `rmtag` targets now create/delete git tags instead of branches. + == Support 13+ The `--load-language` option was removed from `pg_regress` in 13. diff --git a/LICENSE b/LICENSE index 5a20925..4a507f7 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2015, Jim Nasby, Blue Treble Solutions +Copyright (c) 2015-2026, Jim Nasby, Blue Treble Solutions All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/README.asc b/README.asc index c2c6683..bf3559d 100644 --- a/README.asc +++ b/README.asc @@ -23,6 +23,10 @@ pgxntool/setup.sh TODO: Create a nice script that will init a new project for you. +== Development + +If you want to contribute to pgxntool development, work from the https://github.com/decibel/pgxntool-test[pgxntool-test] repository, not from this repository. That repository contains the test infrastructure and development tools needed to validate changes to pgxntool. This repository contains only the framework files that get embedded into extension projects via `git subtree`. + == Usage Typically, you can just create a simple Makefile that does nothing but include base.mk: @@ -41,6 +45,8 @@ This will build any .html files that can be created. See <<_Document_Handling>>. === test Runs unit tests via the PGXS `installcheck` target. Unlike a simple `make installcheck` though, the `test` rule has the following prerequisites: clean testdeps install installcheck. All of those are PGXS rules, except for `testdeps`. +NOTE: While you can still run `make installcheck` or any other valid PGXS make target directly, it's recommended to use `make test` when using pgxntool. The `test` target ensures clean builds, proper test isolation, and correct dependency installation. + === testdeps This rule allows you to ensure certain actions have taken place before running tests. By default it has a single prerequisite, `pgtap`, which will attempt to install http://pgtap.org[pgtap] from PGXN. This depneds on having the pgxn client installed. @@ -60,10 +66,18 @@ If you want to over-ride the default dependency on `pgtap` you should be able to WARNING: It will probably cause problems if you try to create a `testdeps` rule that has a recipe. Instead of doing that, put the recipe in a separate rule and make that rule a prerequisite of `testdeps` as show in the example. === results -Because `make test` ultimately runs `installcheck`, it's using the Postgres test suite. Unfortunately, that suite is based on running `diff` between a raw output file and expected results. I *STRONGLY* recommend you use http://pgtap.org[pgTap] instead! The extra effort of learning pgTap will quickly pay for itself. https://github.com/decibel/trunklet-format/blob/master/test/sql/base.sql[This example] might help get you started. +Because `make test` ultimately runs `installcheck`, it's using the Postgres test suite. Unfortunately, that suite is based on running `diff` between a raw output file and expected results. I *STRONGLY* recommend you use http://pgtap.org[pgTap] instead! With pgTap, it's MUCH easier to determine whether a test is passing or not - tests explicitly pass or fail rather than requiring you to examine diff output. The extra effort of learning pgTap will quickly pay for itself. https://github.com/decibel/trunklet-format/blob/master/test/sql/base.sql[This example] might help get you started. No matter what method you use, once you know that all your tests are passing correctly, you need to create or update the test output expected files. `make results` does that for you. +IMPORTANT: *`make results` requires manual verification first*. The correct workflow is: + +1. Run `make test` and examine the diff output +2. Manually verify that the differences are correct and expected +3. Only then run `make results` to update the expected output files in `test/expected/` + +Never run `make results` without first verifying the test changes are correct. The `results` target copies files from `test/results/` to `test/expected/`, so running it blindly will make incorrect output become the new expected behavior. + === tag `make tag` will create a git branch for the current version of your extension, as determined by the META.json file. The reason to do this is so you can always refer to the exact code that went into a released version. @@ -83,6 +97,109 @@ NOTE: Your repository must be clean (no modified files) in order to run this. Ru TIP: There is also a `pgxntool-sync-%` rule if you need to do more advanced things. +=== pgtle +Generates pg_tle (Trusted Language Extensions) registration SQL files for deploying extensions in managed environments like AWS RDS/Aurora. See <<_pg_tle_Support>> for complete documentation. + +`make pgtle` generates SQL files in `pg_tle/` subdirectories organized by pg_tle version ranges. For version range details, see `pgtle_versions.md`. + +=== check-pgtle +Checks if pg_tle is installed and reports the version. This target: +- Reports the version from `pg_extension` if `CREATE EXTENSION pg_tle` has been run in the database +- Errors if pg_tle is not available in the cluster + +This target assumes `PG*` environment variables are configured for `psql` connectivity. + +---- +make check-pgtle +---- + +=== run-pgtle +Registers all extensions with pg_tle by executing the generated pg_tle registration SQL files in a PostgreSQL database. This target: +- Requires pg_tle extension to be installed (checked via `check-pgtle`) +- Uses `pgtle.sh` to determine which version range directory to use based on the installed pg_tle version +- Runs all generated SQL files via `psql` to register your extensions with pg_tle + +This target assumes that running `psql` without any arguments will connect to the desired database. You can control this by setting the various PG* environment variables (and possibly using the `.pgpassword` file). See the PostgreSQL documentation for more details. + +NOTE: The `pgtle` target is a dependency, so `make run-pgtle` will automatically generate the SQL files if needed. + +---- +make run-pgtle +---- + +After running `make run-pgtle`, you can create your extension in the database: +---- +CREATE EXTENSION "your-extension-name"; +---- + +== Version-Specific SQL Files + +PGXNtool automatically generates version-specific SQL files from your base SQL file. These files follow the pattern `sql/{extension}--{version}.sql` and are used by PostgreSQL's extension system to install specific versions of your extension. + +=== How Version Files Are Generated + +When you run `make` (or `make all`), PGXNtool: + +1. Reads your `META.json` file to determine the extension version from `provides.{extension}.version` +2. Generates a Makefile rule that copies your base SQL file (`sql/{extension}.sql`) to the version-specific file (`sql/{extension}--{version}.sql`) +3. Executes this rule, creating the version-specific file with a header comment indicating it's auto-generated + +For example, if your `META.json` contains: +---- +"provides": { + "myext": { + "version": "1.2.3", + ... + } +} +---- + +Running `make` will create `sql/myext--1.2.3.sql` by copying `sql/myext.sql`. + +=== What Controls the Version Number + +The version number comes from `META.json` → `provides.{extension}.version`, *not* from your `.control` file's `default_version` field. The `.control` file's `default_version` is used by PostgreSQL to determine which version to install by default, but the actual version-specific file that gets generated is determined by what's in `META.json`. + +To change the version of your extension: +1. Update `provides.{extension}.version` in `META.json` +2. Run `make` to regenerate the version-specific file +3. Update `default_version` in your `.control` file to match (if needed) + +=== Committing Version Files + +Version-specific SQL files are now treated as permanent files that should be committed to your repository. This makes it much easier to test updates to extensions, as you can see exactly what SQL was included in each version. + +IMPORTANT: These files are auto-generated and include a header comment warning not to edit them. Any manual changes will be overwritten the next time you run `make`. To modify the extension, edit the base SQL file (`sql/{extension}.sql`) instead. + +=== Alternative: Ignoring Version Files + +If you prefer not to commit version-specific SQL files, you must add them to your `.gitignore` to prevent `make dist` from failing due to untracked files. Add the following to your `.gitignore`: + +---- +# Auto-generated version-specific SQL files (if not committing them) +sql/*--*.sql +!sql/*--*--*.sql +---- + +The second line (`!sql/*--*--*.sql`) ensures that upgrade scripts (which contain two version numbers and should be manually written) are still tracked. + +WARNING: If you ignore version files instead of committing them, they will NOT be included in your PGXN distribution (`make dist` uses `git archive`, which only includes tracked files). This means users installing your extension from PGXN will need `make` and PGXS available to build the extension - they cannot simply copy the SQL files into their PostgreSQL installation. For maximum compatibility, we recommend committing version files. + +=== Distribution Inclusion + +Version-specific files are included in distributions created by `make dist` only if they are committed to git. Since `make dist` uses `git archive`, only tracked files are included in the distribution archive. + +=== Multiple Versions + +If you need to support multiple versions of your extension: + +1. Create additional version-specific files manually (e.g., `sql/myext--1.0.0.sql`, `sql/myext--1.1.0.sql`) +2. Create upgrade scripts for version transitions (e.g., `sql/myext--1.0.0--1.1.0.sql`) +3. Update `META.json` to reflect the current version you're working on +4. Commit all version files and upgrade scripts to your repository + +The version file for the current version (specified in `META.json`) will be automatically regenerated when you run `make`, but other version files you create manually will be preserved. + == Document Handling PGXNtool supports generation and installation of document files. There are several variables and rules that control this behavior. @@ -158,7 +275,111 @@ Because of this, `base.mk` will forcibly define it to be NULL if it's empty. PGXNtool appends *all* files found in all `$(DOC_DIRS)` to `DOCS`. +== pg_tle Support +[[_pg_tle_Support]] +pgxntool can generate link:https://github.com/aws/pg_tle[pg_tle (Trusted Language Extensions)] registration SQL for deploying PostgreSQL extensions in managed environments like AWS RDS and Aurora where filesystem access is not available. + +For make targets, see: <<_pgtle>>, <<_check_pgtle>>, <<_run_pgtle>>. + +=== What is pg_tle? + +pg_tle is an AWS open-source framework that enables developers to create and deploy PostgreSQL extensions without filesystem access. Traditional PostgreSQL extensions require `.control` and `.sql` files on the filesystem, which isn't possible in managed services like RDS and Aurora. + +pg_tle solves this by: +- Storing extension metadata and SQL in database tables +- Using the `pgtle_admin` role for administrative operations +- Enabling `CREATE EXTENSION` to work in managed environments + +=== Quick Start + +Generate pg_tle registration SQL for your extension: + +---- +make pgtle +---- + +This creates files in `pg_tle/` subdirectories organized by pg_tle version ranges. See `pgtle_versions.md` for complete version range details and API compatibility boundaries. + +=== Version Groupings + +pgxntool creates different sets of files for different pg_tle versions to handle backward-incompatible API changes. Each version boundary represents a change to pg_tle's API functions that we use. + +For details on version boundaries and API changes, see `pgtle_versions.md`. + +=== Installation Example + +IMPORTANT: This is only a basic example. Always refer to the link:https://github.com/aws/pg_tle[main pg_tle documentation] for complete installation instructions and best practices. + +Basic installation steps: + +. Ensure pg_tle is installed and grant the `pgtle_admin` role to your user +. Generate and run the pg_tle registration SQL files: ++ +---- +make run-pgtle +---- ++ +This automatically detects your pg_tle version and runs the appropriate SQL files. See `pgtle_versions.md` for version range details. +. Create your extension: `CREATE EXTENSION myextension;` + +=== Advanced Usage + +==== Multi-Extension Projects + +If your project has multiple extensions (multiple `.control` files), `make pgtle` generates files for all of them: + +---- +myproject/ +├── ext1.control +├── ext2.control +└── pg_tle/ + ├── 1.0.0-1.5.0/ + │ ├── ext1.sql + │ └── ext2.sql + └── 1.5.0+/ + ├── ext1.sql + └── ext2.sql +---- + +=== How It Works +`make pgtle` does the following: + +. Parses control file(s): Extracts `comment`, `default_version`, `requires`, and `schema` fields +. Discovers SQL files: Finds all versioned files (`sql/{ext}--{version}.sql`) and upgrade scripts (`sql/{ext}--{ver1}--{ver2}.sql`) +. Wraps SQL content: Uses a fixed dollar-quote delimiter (`$_pgtle_wrap_delimiter_$`) to wrap SQL for pg_tle functions +. Generates registration SQL: Creates `pgtle.install_extension()` calls for each version, `pgtle.install_update_path()` for upgrades, and `pgtle.set_default_version()` for the default +. Version-specific output: Generates separate files for different pg_tle capability levels + +Each generated SQL file is wrapped in a transaction (`BEGIN;` ... `COMMIT;`) to ensure atomic installation. + +=== Troubleshooting + +==== "No versioned SQL files found" + +*Problem*: The script can't find `sql/{ext}--{version}.sql` files. + +*Solution*: Run `make` first to generate versioned files from your base `sql/{ext}.sql` file. + +==== "Control file not found" + +*Problem*: The script can't find `{ext}.control` in the current directory. + +*Solution*: Run `make pgtle` from your extension's root directory (where the `.control` file is). + +==== "SQL file contains reserved pg_tle delimiter" + +*Problem*: Your SQL files contain the string `$_pgtle_wrap_delimiter_$` (extremely unlikely). + +*Solution*: Don't use that dollar-quote delimiter in your code. + +==== Extension uses C code + +*Problem*: Your control file has `module_pathname`, indicating C code. + +*Solution*: pg_tle only supports trusted languages. You cannot use C extensions with pg_tle. The script will warn you but still generate files (which won't work). + +NOTE: there are several untrusted languages (such as plpython), and the only tests for C. == Copyright -Copyright (c) 2015 Jim Nasby +Copyright (c) 2026 Jim Nasby PGXNtool is released under a https://github.com/decibel/pgxntool/blob/master/LICENCE[BSD license]. Note that it includes https://github.com/dominictarr/JSON.sh[JSON.sh], which is released under a https://github.com/decibel/pgxntool/blob/master/JSON.sh.LICENCE[MIT license]. diff --git a/README.html b/README.html index ae4a597..41200aa 100644 --- a/README.html +++ b/README.html @@ -2,31 +2,26 @@ - + - + PGXNtool @@ -428,26 +445,50 @@

PGXNtool

Table of Contents
@@ -466,7 +507,7 @@

PGXNtool

-

1. Install

+

1. Install

This assumes that you’ve already initialized your extension in git.

@@ -495,7 +536,15 @@

1. Install

-

2. Usage

+

2. Development

+
+
+

If you want to contribute to pgxntool development, work from the pgxntool-test repository, not from this repository. That repository contains the test infrastructure and development tools needed to validate changes to pgxntool. This repository contains only the framework files that get embedded into extension projects via git subtree.

+
+
+
+
+

3. Usage

Typically, you can just create a simple Makefile that does nothing but include base.mk:

@@ -508,7 +557,7 @@

2. Usage

-

3. make targets

+

4. make targets

These are the make targets that are provided by base.mk

@@ -526,19 +575,31 @@

3. make targe

-

3.1. html

+

4.1. html

This will build any .html files that can be created. See [_Document_Handling].

-

3.2. test

+

4.2. test

Runs unit tests via the PGXS installcheck target. Unlike a simple make installcheck though, the test rule has the following prerequisites: clean testdeps install installcheck. All of those are PGXS rules, except for testdeps.

+
+ + + + + +
+
Note
+
+While you can still run make installcheck or any other valid PGXS make target directly, it’s recommended to use make test when using pgxntool. The test target ensures clean builds, proper test isolation, and correct dependency installation. +
+
-

3.3. testdeps

+

4.3. testdeps

This rule allows you to ensure certain actions have taken place before running tests. By default it has a single prerequisite, pgtap, which will attempt to install pgtap from PGXN. This depneds on having the pgxn client installed.

@@ -574,16 +635,44 @@

3.3. testdeps

-

3.4. results

+

4.4. results

-

Because make test ultimately runs installcheck, it’s using the Postgres test suite. Unfortunately, that suite is based on running diff between a raw output file and expected results. I STRONGLY recommend you use pgTap instead! The extra effort of learning pgTap will quickly pay for itself. This example might help get you started.

+

Because make test ultimately runs installcheck, it’s using the Postgres test suite. Unfortunately, that suite is based on running diff between a raw output file and expected results. I STRONGLY recommend you use pgTap instead! With pgTap, it’s MUCH easier to determine whether a test is passing or not - tests explicitly pass or fail rather than requiring you to examine diff output. The extra effort of learning pgTap will quickly pay for itself. This example might help get you started.

No matter what method you use, once you know that all your tests are passing correctly, you need to create or update the test output expected files. make results does that for you.

+
+ + + + + +
+
Important
+
+make results requires manual verification first. The correct workflow is: +
+
+
+
    +
  1. +

    Run make test and examine the diff output

    +
  2. +
  3. +

    Manually verify that the differences are correct and expected

    +
  4. +
  5. +

    Only then run make results to update the expected output files in test/expected/

    +
  6. +
+
+
+

Never run make results without first verifying the test changes are correct. The results target copies files from test/results/ to test/expected/, so running it blindly will make incorrect output become the new expected behavior.

+
-

3.5. tag

+

4.5. tag

make tag will create a git branch for the current version of your extension, as determined by the META.json file. The reason to do this is so you can always refer to the exact code that went into a released version.

@@ -604,7 +693,7 @@

3.5. tag

-

3.6. dist

+

4.6. dist

make dist will create a .zip file for your current version that you can upload to PGXN. The file is named after the PGXN name and version (the top-level "name" and "version" attributes in META.json). The .zip file is placed in the parent directory so as not to clutter up your git repo.

@@ -622,7 +711,7 @@

3.6. dist

-

3.7. pgxntool-sync

+

4.7. pgxntool-sync

This rule will pull down the latest released version of PGXNtool via git subtree pull.

@@ -651,10 +740,204 @@

3.7. pgxnto

+
+

4.8. pgtle

+
+

Generates pg_tle (Trusted Language Extensions) registration SQL files for deploying extensions in managed environments like AWS RDS/Aurora. See [_pg_tle_Support] for complete documentation.

+
+
+

make pgtle generates SQL files in pg_tle/ subdirectories organized by pg_tle version ranges. For version range details, see pgtle_versions.md.

+
+
+
+

4.9. check-pgtle

+
+

Checks if pg_tle is installed and reports the version. This target: +- Reports the version from pg_extension if CREATE EXTENSION pg_tle has been run in the database +- Errors if pg_tle is not available in the cluster

+
+
+

This target assumes PG* environment variables are configured for psql connectivity.

+
+
+
+
make check-pgtle
+
+
+
+
+

4.10. run-pgtle

+
+

Registers all extensions with pg_tle by executing the generated pg_tle registration SQL files in a PostgreSQL database. This target: +- Requires pg_tle extension to be installed (checked via check-pgtle) +- Uses pgtle.sh to determine which version range directory to use based on the installed pg_tle version +- Runs all generated SQL files via psql to register your extensions with pg_tle

+
+
+

This target assumes that running psql without any arguments will connect to the desired database. You can control this by setting the various PG* environment variables (and possibly using the .pgpassword file). See the PostgreSQL documentation for more details.

+
+
+ + + + + +
+
Note
+
+The pgtle target is a dependency, so make run-pgtle will automatically generate the SQL files if needed. +
+
+
+
+
make run-pgtle
+
+
+
+

After running make run-pgtle, you can create your extension in the database:

+
+
+
+
CREATE EXTENSION "your-extension-name";
+
+
+
-

4. Document Handling

+

5. Version-Specific SQL Files

+
+
+

PGXNtool automatically generates version-specific SQL files from your base SQL file. These files follow the pattern sql/{extension}--{version}.sql and are used by PostgreSQL’s extension system to install specific versions of your extension.

+
+
+

5.1. How Version Files Are Generated

+
+

When you run make (or make all), PGXNtool:

+
+
+
    +
  1. +

    Reads your META.json file to determine the extension version from provides.{extension}.version

    +
  2. +
  3. +

    Generates a Makefile rule that copies your base SQL file (sql/{extension}.sql) to the version-specific file (sql/{extension}--{version}.sql)

    +
  4. +
  5. +

    Executes this rule, creating the version-specific file with a header comment indicating it’s auto-generated

    +
  6. +
+
+
+

For example, if your META.json contains:

+
+
+
+
"provides": {
+  "myext": {
+    "version": "1.2.3",
+    ...
+  }
+}
+
+
+
+

Running make will create sql/myext—​1.2.3.sql by copying sql/myext.sql.

+
+
+
+

5.2. What Controls the Version Number

+
+

The version number comes from META.jsonprovides.{extension}.version, not from your .control file’s default_version field. The .control file’s default_version is used by PostgreSQL to determine which version to install by default, but the actual version-specific file that gets generated is determined by what’s in META.json.

+
+
+

To change the version of your extension: +1. Update provides.{extension}.version in META.json +2. Run make to regenerate the version-specific file +3. Update default_version in your .control file to match (if needed)

+
+
+
+

5.3. Committing Version Files

+
+

Version-specific SQL files are now treated as permanent files that should be committed to your repository. This makes it much easier to test updates to extensions, as you can see exactly what SQL was included in each version.

+
+
+ + + + + +
+
Important
+
+These files are auto-generated and include a header comment warning not to edit them. Any manual changes will be overwritten the next time you run make. To modify the extension, edit the base SQL file (sql/{extension}.sql) instead. +
+
+
+
+

5.4. Alternative: Ignoring Version Files

+
+

If you prefer not to commit version-specific SQL files, you must add them to your .gitignore to prevent make dist from failing due to untracked files. Add the following to your .gitignore:

+
+
+
+
# Auto-generated version-specific SQL files (if not committing them)
+sql/*--*.sql
+!sql/*--*--*.sql
+
+
+
+

The second line (!sql/----*.sql) ensures that upgrade scripts (which contain two version numbers and should be manually written) are still tracked.

+
+
+ + + + + +
+
Warning
+
+If you ignore version files instead of committing them, they will NOT be included in your PGXN distribution (make dist uses git archive, which only includes tracked files). This means users installing your extension from PGXN will need make and PGXS available to build the extension - they cannot simply copy the SQL files into their PostgreSQL installation. For maximum compatibility, we recommend committing version files. +
+
+
+
+

5.5. Distribution Inclusion

+
+

Version-specific files are included in distributions created by make dist only if they are committed to git. Since make dist uses git archive, only tracked files are included in the distribution archive.

+
+
+
+

5.6. Multiple Versions

+
+

If you need to support multiple versions of your extension:

+
+
+
    +
  1. +

    Create additional version-specific files manually (e.g., sql/myext—​1.0.0.sql, sql/myext—​1.1.0.sql)

    +
  2. +
  3. +

    Create upgrade scripts for version transitions (e.g., sql/myext—​1.0.0—​1.1.0.sql)

    +
  4. +
  5. +

    Update META.json to reflect the current version you’re working on

    +
  6. +
  7. +

    Commit all version files and upgrade scripts to your repository

    +
  8. +
+
+
+

The version file for the current version (specified in META.json) will be automatically regenerated when you run make, but other version files you create manually will be preserved.

+
+
+
+
+
+

6. Document Handling

PGXNtool supports generation and installation of document files. There are several variables and rules that control this behavior.

@@ -665,7 +948,7 @@

4. If any generated files are missing (or out-of-date) during installation, PGXNtool will build them if Asciidoc is present on the system.

-

4.1. Document Variables

+

6.1. Document Variables

DOC_DIRS
@@ -712,7 +995,7 @@

4

-

4.2. Document Rules

+

6.2. Document Rules

If Asciidoc is found (or $(ASCIIDOC) is set), the html rule will be added as a prerequisite to the install and installchec rules. That will ensure that docs are generated for install and test, but only if Asciidoc is available. @@ -730,7 +1013,7 @@

4.2. Docu
ASCIIDOC_template
define ASCIIDOC_template
-%.html: %.$(1) (1)
+%.html: %.$(1) # (1)
 ifndef ASCIIDOC
 	$$(warning Could not find "asciidoc" or "asciidoctor". Add one of them to your PATH,)
 	$$(warning or set ASCIIDOC to the correct location.)
@@ -754,7 +1037,7 @@ 

4.2. Docu

-

4.3. The DOCS variable

+

6.3. The DOCS variable

This variable has special meaning to PGXS. See the Postgres documentation for full details.

@@ -782,10 +1065,189 @@

4.3

- +

7. pg_tle Support

+
+

pgxntool can generate pg_tle (Trusted Language Extensions) registration SQL for deploying PostgreSQL extensions in managed environments like AWS RDS and Aurora where filesystem access is not available.

+
-

Copyright (c) 2015 Jim Nasby <Jim.Nasby@BlueTreble.com>

+

For make targets, see: pgtle, check-pgtle, run-pgtle.

+
+
+

7.1. What is pg_tle?

+
+

pg_tle is an AWS open-source framework that enables developers to create and deploy PostgreSQL extensions without filesystem access. Traditional PostgreSQL extensions require .control and .sql files on the filesystem, which isn’t possible in managed services like RDS and Aurora.

+
+
+

pg_tle solves this by: +- Storing extension metadata and SQL in database tables +- Using the pgtle_admin role for administrative operations +- Enabling CREATE EXTENSION to work in managed environments

+
+
+
+

7.2. Quick Start

+
+

Generate pg_tle registration SQL for your extension:

+
+
+
+
make pgtle
+
+
+
+

This creates files in pg_tle/ subdirectories organized by pg_tle version ranges. See pgtle_versions.md for complete version range details and API compatibility boundaries.

+
+
+
+

7.3. Version Groupings

+
+

pgxntool creates different sets of files for different pg_tle versions to handle backward-incompatible API changes. Each version boundary represents a change to pg_tle’s API functions that we use.

+
+
+

For details on version boundaries and API changes, see pgtle_versions.md.

+
+
+
+

7.4. Installation Example

+
+ + + + + +
+
Important
+
+This is only a basic example. Always refer to the main pg_tle documentation for complete installation instructions and best practices. +
+
+
+

Basic installation steps:

+
+
+
    +
  1. +

    Ensure pg_tle is installed and grant the pgtle_admin role to your user

    +
  2. +
  3. +

    Generate and run the pg_tle registration SQL files:

    +
    +
    +
    make run-pgtle
    +
    +
    +
    +

    This automatically detects your pg_tle version and runs the appropriate SQL files. See pgtle_versions.md for version range details.

    +
    +
  4. +
  5. +

    Create your extension: CREATE EXTENSION myextension;

    +
  6. +
+
+
+
+

7.5. Advanced Usage

+
+

7.5.1. Multi-Extension Projects

+
+

If your project has multiple extensions (multiple .control files), make pgtle generates files for all of them:

+
+
+
+
myproject/
+├── ext1.control
+├── ext2.control
+└── pg_tle/
+    ├── 1.0.0-1.5.0/
+    │   ├── ext1.sql
+    │   └── ext2.sql
+    └── 1.5.0+/
+        ├── ext1.sql
+        └── ext2.sql
+
+
+
+
+
+

7.6. How It Works

+
+

make pgtle does the following:

+
+
+
    +
  1. +

    Parses control file(s): Extracts comment, default_version, requires, and schema fields

    +
  2. +
  3. +

    Discovers SQL files: Finds all versioned files (sql/{ext}--{version}.sql) and upgrade scripts (sql/{ext}--{ver1}--{ver2}.sql)

    +
  4. +
  5. +

    Wraps SQL content: Uses a fixed dollar-quote delimiter ($pgtle_wrap_delimiter$) to wrap SQL for pg_tle functions

    +
  6. +
  7. +

    Generates registration SQL: Creates pgtle.install_extension() calls for each version, pgtle.install_update_path() for upgrades, and pgtle.set_default_version() for the default

    +
  8. +
  9. +

    Version-specific output: Generates separate files for different pg_tle capability levels

    +
  10. +
+
+
+

Each generated SQL file is wrapped in a transaction (BEGIN; …​ COMMIT;) to ensure atomic installation.

+
+
+
+

7.7. Troubleshooting

+
+

7.7.1. "No versioned SQL files found"

+
+

Problem: The script can’t find sql/{ext}--{version}.sql files.

+
+
+

Solution: Run make first to generate versioned files from your base sql/{ext}.sql file.

+
+
+
+

7.7.2. "Control file not found"

+
+

Problem: The script can’t find {ext}.control in the current directory.

+
+
+

Solution: Run make pgtle from your extension’s root directory (where the .control file is).

+
+
+
+

7.7.3. "SQL file contains reserved pg_tle delimiter"

+
+

Problem: Your SQL files contain the string $pgtle_wrap_delimiter$ (extremely unlikely).

+
+
+

Solution: Don’t use that dollar-quote delimiter in your code.

+
+
+
+

7.7.4. Extension uses C code

+
+

Problem: Your control file has module_pathname, indicating C code.

+
+
+

Solution: pg_tle only supports trusted languages. You cannot use C extensions with pg_tle. The script will warn you but still generate files (which won’t work).

+
+
+ + + + + +
+
Note
+
+there are several untrusted languages (such as plpython), and the only tests for C. +== Copyright +Copyright (c) 2026 Jim Nasby <Jim.Nasby@gmail.com> +

PGXNtool is released under a BSD license. Note that it includes JSON.sh, which is released under a MIT license.

@@ -793,9 +1255,11 @@
+
+
diff --git a/_.gitignore b/_.gitignore index 3eb345a..0c14928 100644 --- a/_.gitignore +++ b/_.gitignore @@ -1,11 +1,15 @@ # Editor files .*.swp +# Claude Code local settings +.claude/*.local.json + # Explicitly exclude META.json! !/META.json # Generated make files meta.mk +control.mk # Compiler output *.o @@ -13,8 +17,7 @@ meta.mk .deps/ # built targets -/sql/*--* -!/sql/*--*--*.sql +# Note: Version-specific files (sql/*--*.sql) are now tracked in git and should be committed # Test artifacts results/ @@ -24,3 +27,6 @@ regression.out # Misc tmp/ .DS_Store + +# pg_tle generated files +/pg_tle/ diff --git a/base.mk b/base.mk index a976ebb..b03a5cc 100644 --- a/base.mk +++ b/base.mk @@ -1,5 +1,8 @@ PGXNTOOL_DIR := pgxntool +# Ensure 'all' is the default target (not META.json which happens to be first) +.DEFAULT_GOAL := all + # # META.json # @@ -10,13 +13,30 @@ META.json: META.in.json $(PGXNTOOL_DIR)/build_meta.sh # # meta.mk # -# Buind meta.mk, which contains info from META.json, and include it +# Build meta.mk, which contains PGXN distribution info from META.json PGXNTOOL_distclean += meta.mk meta.mk: META.json Makefile $(PGXNTOOL_DIR)/base.mk $(PGXNTOOL_DIR)/meta.mk.sh @$(PGXNTOOL_DIR)/meta.mk.sh $< >$@ -include meta.mk +# +# control.mk +# +# Build control.mk, which contains extension info from .control files +# This is separate from meta.mk because: +# - META.json specifies PGXN distribution metadata +# - .control files specify what PostgreSQL actually uses (e.g., default_version) +# These can differ, and PostgreSQL cares about the control file version. +# +# Find all control files first (needed for dependencies) +PGXNTOOL_CONTROL_FILES := $(wildcard *.control) +PGXNTOOL_distclean += control.mk +control.mk: $(PGXNTOOL_CONTROL_FILES) Makefile $(PGXNTOOL_DIR)/base.mk $(PGXNTOOL_DIR)/control.mk.sh + @$(PGXNTOOL_DIR)/control.mk.sh $(PGXNTOOL_CONTROL_FILES) >$@ + +-include control.mk + DATA = $(EXTENSION_VERSION_FILES) $(wildcard sql/*--*--*.sql) DOC_DIRS += doc # NOTE: if this is empty it gets forcibly defined to NUL before including PGXS @@ -30,11 +50,18 @@ ASCIIDOC_FILES += $(foreach dir,$(DOC_DIRS),$(foreach ext,$(ASCIIDOC_EXTS),$(wil PG_CONFIG ?= pg_config TESTDIR ?= test TESTOUT ?= $(TESTDIR) -TEST_SOURCE_FILES += $(wildcard $(TESTDIR)/input/*.source) -TEST_OUT_FILES = $(subst input,output,$(TEST_SOURCE_FILES)) +# .source files are OPTIONAL - see "pg_regress workflow" comment below for details +TEST__SOURCE__INPUT_FILES += $(wildcard $(TESTDIR)/input/*.source) +TEST__SOURCE__OUTPUT_FILES += $(wildcard $(TESTDIR)/output/*.source) +TEST__SOURCE__INPUT_AS_OUTPUT = $(subst input,output,$(TEST__SOURCE__INPUT_FILES)) TEST_SQL_FILES += $(wildcard $(TESTDIR)/sql/*.sql) TEST_RESULT_FILES = $(patsubst $(TESTDIR)/sql/%.sql,$(TESTDIR)/expected/%.out,$(TEST_SQL_FILES)) -TEST_FILES = $(TEST_SOURCE_FILES) $(TEST_SQL_FILES) +TEST_FILES = $(TEST__SOURCE__INPUT_FILES) $(TEST_SQL_FILES) +# Ephemeral files generated from source files (should be cleaned) +# input/*.source → sql/*.sql (converted by pg_regress) +TEST__SOURCE__SQL_FILES = $(patsubst $(TESTDIR)/input/%.source,$(TESTDIR)/sql/%.sql,$(TEST__SOURCE__INPUT_FILES)) +# output/*.source → expected/*.out (converted by pg_regress) +TEST__SOURCE__EXPECTED_FILES = $(patsubst $(TESTDIR)/output/%.source,$(TESTDIR)/expected/%.out,$(TEST__SOURCE__OUTPUT_FILES)) REGRESS = $(sort $(notdir $(subst .source,,$(TEST_FILES:.sql=)))) # Sort is to get unique list REGRESS_OPTS = --inputdir=$(TESTDIR) --outputdir=$(TESTOUT) # See additional setup below MODULES = $(patsubst %.c,%,$(wildcard src/*.c)) @@ -42,7 +69,7 @@ ifeq ($(strip $(MODULES)),) MODULES =# Set to NUL so PGXS doesn't puke endif -EXTRA_CLEAN = $(wildcard ../$(PGXN)-*.zip) $(EXTENSION_VERSION_FILES) +EXTRA_CLEAN = $(wildcard ../$(PGXN)-*.zip) $(TEST__SOURCE__SQL_FILES) $(TEST__SOURCE__EXPECTED_FILES) pg_tle/ # Get Postgres version, as well as major (9.4, etc) version. # NOTE! In at least some versions, PGXS defines VERSION, so we intentionally don't use that variable @@ -70,7 +97,7 @@ DATA += $(wildcard *.control) # Don't have installcheck bomb on error .IGNORE: installcheck -installcheck: $(TEST_RESULT_FILES) $(TEST_OUT_FILES) $(TEST_SQL_FILES) $(TEST_SOURCE_FILES) +installcheck: $(TEST_RESULT_FILES) $(TEST_SQL_FILES) $(TEST__SOURCE__INPUT_FILES) | $(TESTDIR)/sql/ $(TESTDIR)/expected/ $(TESTOUT)/results/ # # TEST SUPPORT @@ -89,25 +116,81 @@ test: testdeps install installcheck # make results: runs `make test` and copy all result files to expected # DO NOT RUN THIS UNLESS YOU'RE CERTAIN ALL YOUR TESTS ARE PASSING! +# +# pg_regress workflow: +# 1. Converts input/*.source → sql/*.sql (with token substitution) +# 2. Converts output/*.source → expected/*.out (with token substitution) +# 3. Runs tests, saving actual output in results/ +# 4. Compares results/ with expected/ +# +# NOTE: Both input/*.source and output/*.source are COMPLETELY OPTIONAL and are +# very rarely needed. pg_regress does NOT create the input/ or output/ directories +# - these are optional INPUT directories that users create if they need them. +# Most extensions will never need these directories. +# +# CRITICAL: Do NOT copy files that have corresponding output/*.source files, because +# those are the source of truth and will be regenerated by pg_regress from the .source files. +# Only copy files from results/ that don't have output/*.source counterparts. .PHONY: results results: test - rsync -rlpgovP $(TESTOUT)/results/ $(TESTDIR)/expected + @# Copy .out files from results/ to expected/, excluding those with output/*.source counterparts + @# .out files with output/*.source counterparts are generated from .source files and should NOT be overwritten + @$(PGXNTOOL_DIR)/make_results.sh $(TESTDIR) $(TESTOUT) # testdeps is a generic dependency target that you can add targets to .PHONY: testdeps testdeps: pgtap +# +# pg_tle support - Generate pg_tle registration SQL +# + +# PGXNTOOL_CONTROL_FILES is defined above (for control.mk dependencies) +PGXNTOOL_EXTENSIONS = $(basename $(PGXNTOOL_CONTROL_FILES)) + +# Main target +# Depend on 'all' to ensure versioned SQL files are generated first +# Depend on control.mk (which defines EXTENSION_VERSION_FILES) +# Depend on control files explicitly so changes trigger rebuilds +# Generates all supported pg_tle versions for each extension +.PHONY: pgtle +pgtle: all control.mk $(PGXNTOOL_CONTROL_FILES) + @$(foreach ext,$(PGXNTOOL_EXTENSIONS),\ + $(PGXNTOOL_DIR)/pgtle.sh --extension $(ext);) + +# +# pg_tle installation support +# + +# Check if pg_tle is installed and report version +# Only reports version if CREATE EXTENSION pg_tle has been run +# Errors if pg_tle extension is not installed +# Uses pgtle.sh to get version (avoids code duplication) +.PHONY: check-pgtle +check-pgtle: + @echo "Checking pg_tle installation..." + @PGTLE_VERSION=$$($(PGXNTOOL_DIR)/pgtle.sh --get-version 2>/dev/null); \ + if [ -n "$$PGTLE_VERSION" ]; then \ + echo "pg_tle extension version: $$PGTLE_VERSION"; \ + exit 0; \ + fi; \ + echo "ERROR: pg_tle extension is not installed" >&2; \ + echo " Run 'CREATE EXTENSION pg_tle;' first" >&2; \ + exit 1 + +# Run pg_tle registration SQL files +# Requires pg_tle extension to be installed (checked via check-pgtle) +# Uses pgtle.sh to determine which version range directory to use +# Assumes PG* environment variables are configured +.PHONY: run-pgtle +run-pgtle: pgtle + @$(PGXNTOOL_DIR)/pgtle.sh --run + # These targets ensure all the relevant directories exist -$(TESTDIR)/sql: - @mkdir -p $@ -$(TESTDIR)/expected/: +$(TESTDIR)/sql $(TESTDIR)/expected/ $(TESTOUT)/results/: @mkdir -p $@ $(TEST_RESULT_FILES): | $(TESTDIR)/expected/ @touch $@ -$(TESTDIR)/output/: - @mkdir -p $@ -$(TEST_OUT_FILES): | $(TESTDIR)/output/ $(TESTDIR)/expected/ $(TESTDIR)/sql/ - @touch $@ # @@ -155,14 +238,23 @@ docclean: # rmtag: git fetch origin # Update our remotes - @test -z "$$(git branch --list $(PGXNVERSION))" || git branch -d $(PGXNVERSION) - @test -z "$$(git branch --list -r origin/$(PGXNVERSION))" || git push --delete origin $(PGXNVERSION) + @test -z "$$(git tag --list $(PGXNVERSION))" || git tag -d $(PGXNVERSION) + @test -z "$$(git ls-remote --tags origin $(PGXNVERSION) | grep -v '{}')" || git push --delete origin $(PGXNVERSION) -# TODO: Don't puke if tag already exists *and is the same* tag: @test -z "$$(git status --porcelain)" || (echo 'Untracked changes!'; echo; git status; exit 1) - git branch $(PGXNVERSION) - git push --set-upstream origin $(PGXNVERSION) + @# Skip if tag already exists and points to HEAD + @if git rev-parse $(PGXNVERSION) >/dev/null 2>&1; then \ + if [ "$$(git rev-parse $(PGXNVERSION))" = "$$(git rev-parse HEAD)" ]; then \ + echo "Tag $(PGXNVERSION) already exists at HEAD, skipping"; \ + else \ + echo "ERROR: Tag $(PGXNVERSION) exists but points to different commit" >&2; \ + exit 1; \ + fi; \ + else \ + git tag $(PGXNVERSION); \ + fi + git push origin $(PGXNVERSION) .PHONY: forcetag forcetag: rmtag tag @@ -171,6 +263,13 @@ forcetag: rmtag tag dist: tag dist-only dist-only: + @# Check if .gitattributes exists but isn't committed + @if [ -f .gitattributes ] && ! git ls-files --error-unmatch .gitattributes >/dev/null 2>&1; then \ + echo "ERROR: .gitattributes exists but is not committed to git." >&2; \ + echo " git archive only respects export-ignore for committed files." >&2; \ + echo " Please commit .gitattributes for export-ignore to take effect." >&2; \ + exit 1; \ + fi git archive --prefix=$(PGXN)-$(PGXNVERSION)/ -o ../$(PGXN)-$(PGXNVERSION).zip $(PGXNVERSION) .PHONY: forcedist diff --git a/build_meta.sh b/build_meta.sh index 70d2273..e0fd6b2 100755 --- a/build_meta.sh +++ b/build_meta.sh @@ -1,16 +1,28 @@ #!/bin/bash +# Build META.json from META.in.json template +# +# WHY META.in.json EXISTS: +# META.in.json serves as a template that: +# 1. Shows all possible PGXN metadata fields (both required and optional) with comments +# 2. Can have empty placeholder fields like "key": "" or "key": [ "", "" ] +# 3. Users edit this to fill in their extension's metadata +# +# WHY WE GENERATE META.json: +# The reason we generate META.json from a template is to eliminate empty fields that +# are optional; PGXN.org gets upset about them. In the future it's possible we'll do +# more here (for example, if we added more info to the template we could use it to +# generate control files). +# +# WHY WE COMMIT META.json: +# PGXN.org requires META.json to be present in submitted distributions. We choose +# to commit it to git instead of manually adding it to distributions for simplicity +# (and since it generally only changes once for each new version). + set -e -error () { - echo $@ >&2 -} -die () { - return=$1 - shift - error $@ - exit $return -} +BASEDIR=$(dirname "$0") +source "$BASEDIR/lib.sh" [ $# -eq 2 ] || die 2 Invalid number of arguments $# diff --git a/control.mk.sh b/control.mk.sh new file mode 100755 index 0000000..cc63cea --- /dev/null +++ b/control.mk.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash +# +# control.mk.sh - Generate Makefile rules from PostgreSQL extension control files +# +# This script parses .control files to extract extension metadata (particularly +# default_version) and generates Make variables and rules for building versioned +# SQL files. +# +# Usage: control.mk.sh [ ...] +# +# Output (to stdout, meant to be redirected to control.mk): +# EXTENSIONS += +# EXTENSION_SQL_FILES += sql/.sql +# EXTENSION__VERSION := +# EXTENSION__VERSION_FILE = sql/--.sql +# EXTENSION_VERSION_FILES += $(EXTENSION__VERSION_FILE) +# +# +# Why control files instead of META.json? +# META.json's "provides" section specifies versions for PGXN distribution metadata. +# But PostgreSQL uses the control file's default_version to determine which +# versioned SQL file to load. These can differ (e.g., PGXN distribution version +# might be updated independently of extension version). Using the control file +# ensures the generated SQL files match what PostgreSQL expects. + +set -o errexit -o errtrace -o pipefail + +BASEDIR=$(dirname "$0") +source "$BASEDIR/lib.sh" + +# Extract default_version from a PostgreSQL extension control file +# Usage: get_control_default_version +# Errors if: +# - Control file doesn't exist +# - default_version is not specified (pgxntool requires it) +# - Multiple default_version lines exist +get_control_default_version() { + local control_file="$1" + + if [ ! -f "$control_file" ]; then + die 2 "Control file '$control_file' not found" + fi + + # Count default_version lines + local count + count=$(grep -cE "^[[:space:]]*default_version[[:space:]]*=" "$control_file" 2>/dev/null) || count=0 + + if [ "$count" -eq 0 ]; then + die 2 "default_version not specified in '$control_file'. PostgreSQL allows extensions without a default_version, but pgxntool requires it to generate versioned SQL files." + fi + + if [ "$count" -gt 1 ]; then + die 2 "Multiple default_version lines found in '$control_file'. Control files must have exactly one default_version." + fi + + # Extract the version value + # Handles: default_version = '1.0', default_version = "1.0", trailing comments + local version=$(grep -E "^[[:space:]]*default_version[[:space:]]*=" "$control_file" | \ + sed -e "s/^[^=]*=[[:space:]]*//" \ + -e "s/[[:space:]]*#.*//" \ + -e "s/^['\"]//;s/['\"]$//" ) + + if [ -z "$version" ]; then + die 2 "Could not parse default_version value from '$control_file'" + fi + + echo "$version" +} + +# Main: process each control file passed as argument +if [ $# -eq 0 ]; then + die 1 "Usage: control.mk.sh [ ...]" +fi + +for control_file in "$@"; do + ext=$(basename "$control_file" .control) + version=$(get_control_default_version "$control_file") + + echo "EXTENSIONS += $ext" + echo "EXTENSION_SQL_FILES += sql/${ext}.sql" + echo "EXTENSION_${ext}_VERSION := ${version}" + echo "EXTENSION_${ext}_VERSION_FILE = sql/${ext}--\$(EXTENSION_${ext}_VERSION).sql" + echo "EXTENSION_VERSION_FILES += \$(EXTENSION_${ext}_VERSION_FILE)" + echo "\$(EXTENSION_${ext}_VERSION_FILE): sql/${ext}.sql ${control_file}" + echo " @echo '/* DO NOT EDIT - AUTO-GENERATED FILE */' > \$(EXTENSION_${ext}_VERSION_FILE)" + echo " @cat sql/${ext}.sql >> \$(EXTENSION_${ext}_VERSION_FILE)" + echo +done + +# vi: expandtab ts=2 sw=2 diff --git a/lib.sh b/lib.sh new file mode 100644 index 0000000..c3eb88e --- /dev/null +++ b/lib.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# lib.sh - Common utility functions for pgxntool scripts +# +# This file is meant to be sourced by other scripts, not executed directly. +# Usage: source "$(dirname "${BASH_SOURCE[0]}")/lib.sh" + +# Error function - outputs to stderr but doesn't exit +# Usage: error "message" +error() { + echo "ERROR: $*" >&2 +} + +# Die function - outputs error message and exits with specified code +# Usage: die EXIT_CODE "message" +die() { + local exit_code=$1 + shift + error "$@" + exit $exit_code +} + +# Debug function +# Usage: debug LEVEL "message" +# Outputs message to stderr if DEBUG >= LEVEL +# Debug levels use multiples of 10 (10, 20, 30, 40, etc.) to allow for easy expansion +# - 10: Critical errors, important warnings +# - 20: Warnings, significant state changes +# - 30: General debugging, function entry/exit, array operations +# - 40: Verbose details, loop iterations +# - 50+: Maximum verbosity +# Enable with: DEBUG=30 scriptname.sh +debug() { + local level=$1 + shift + local message="$*" + + if [ "${DEBUG:-0}" -ge "$level" ]; then + echo "DEBUG[$level]: $message" >&2 + fi +} diff --git a/make_results.sh b/make_results.sh new file mode 100755 index 0000000..066e372 --- /dev/null +++ b/make_results.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Helper script for make results target +# Copies .out files from results/ to expected/, excluding those with output/*.source counterparts + +set -e + +TESTDIR="${1:-test}" +TESTOUT="${2:-${TESTDIR}}" + +mkdir -p "${TESTDIR}/expected" + +# Use nullglob so globs that don't match return nothing instead of the literal pattern +shopt -s nullglob + +for result_file in "${TESTOUT}/results"/*.out; do + test_name=$(basename "$result_file" .out) + + # Check if this file has a corresponding output/*.source file + # Only consider non-empty source files (empty files are likely leftovers from pg_regress) + if [ -f "${TESTDIR}/output/${test_name}.source" ] && [ -s "${TESTDIR}/output/${test_name}.source" ]; then + echo "WARNING: ${TESTOUT}/results/${test_name}.out exists but will NOT be copied" >&2 + echo " (excluded because ${TESTDIR}/output/${test_name}.source exists)" >&2 + else + # Copy the file - it doesn't have an output/*.source counterpart + cp "$result_file" "${TESTDIR}/expected/${test_name}.out" + fi +done + diff --git a/meta.mk.sh b/meta.mk.sh index a5da2ec..e6cecc5 100755 --- a/meta.mk.sh +++ b/meta.mk.sh @@ -1,25 +1,38 @@ -#! /usr/bin/env bash +#!/usr/bin/env bash +# +# meta.mk.sh - Generate Makefile variables from META.json +# +# This script parses META.json (PGXN distribution metadata) and generates +# Make variables for the distribution name and version. +# +# Usage: meta.mk.sh +# +# Output (to stdout, meant to be redirected to meta.mk): +# PGXN := +# PGXNVERSION := +# +# Note: Extension-specific variables (like EXTENSION_*_VERSION) are generated +# by control.mk.sh from .control files, not from META.json. This is because +# META.json specifies PGXN distribution metadata, while .control files specify +# what PostgreSQL actually uses. set -o errexit -o errtrace -o pipefail -trap 'echo "Error on line ${LINENO}" >&2' ERR -META=$1 -BASEDIR=`dirname $0` +BASEDIR=$(dirname "$0") +source "$BASEDIR/lib.sh" + JSON_SH=$BASEDIR/JSON.sh -error () { - echo $@ >&2 -} trap 'error "Error on line ${LINENO}"' ERR -die () { - local retval=$1 - shift - error $@ - exit $retval -} +META=$1 +if [ -z "$META" ]; then + die 1 "Usage: meta.mk.sh " +fi -REQUIRED='abstract maintainer license provides name version' +if [ ! -f "$META" ]; then + die 2 "META.json file '$META' not found" +fi #function to get value of specified key #returns empty string if not found @@ -27,7 +40,7 @@ REQUIRED='abstract maintainer license provides name version' #usage: VAR=$(getkey foo.bar) #get value of "bar" contained within "foo" # VAR=$(getkey foo[4].bar) #get value of "bar" contained in the array "foo" on position 4 # VAR=$(getkey [4].foo) #get value of "foo" contained in the root unnamed array on position 4 -function _getkey { +_getkey() { #reformat key string (parameter) to what JSON.sh uses KEYSTRING=$(sed -e 's/\[/\"\,/g' -e 's/^\"\,/\[/g' -e 's/\]\./\,\"/g' -e 's/\./\"\,\"/g' -e '/^\[/! s/^/\[\"/g' -e '/\]$/! s/$/\"\]/g' <<< "$@") #extract the key value @@ -37,60 +50,21 @@ function _getkey { FOUT="${FOUT%\"*}" echo "$FOUT" } -function getkeys { - KEYSTRING=$(sed -e 's/\[/\"\,/g' -e 's/^\"\,/\[/g' -e 's/\]\./\,\"/g' -e 's/\./\"\,\"/g' -e '/^\[/! s/^/\[\"/g' -e '/\",\"$/! s/$/\",\"/g' <<< "$@") - #extract the key value - FOUT=$(grep -F "$KEYSTRING" <<< "$JSON_PARSED") - FOUT="${FOUT%$'\t'*}" - echo "$FOUT" -} - -#function returning length of array -#returns zero if key in parameter does not exist or is not an array -#usage: VAR=$(getarrlen foo.bar) #get length of array "bar" contained within "foo" -# VAR=$(getarrlen) #get length of the root unnamed array -# VAR=$(getarrlen [2].foo.bar) #get length of array "bar" contained within "foo", which is stored in the root unnamed array on position 2 -function getarrlen { - #reformat key string (parameter) to what JSON.sh uses - KEYSTRING=$(gsed -e '/^\[/! s/\[/\"\,/g' -e 's/\]\./\,\"/g' -e 's/\./\"\,\"/g' -e '/^$/! {/^\[/! s/^/\[\"/g}' -e '/^$/! s/$/\"\,/g' -e 's/\[/\\\[/g' -e 's/\]/\\\]/g' -e 's/\,/\\\,/g' -e '/^$/ s/^/\\\[/g' <<< "$@") - #extract the key array length - get last index - LEN=$(grep -o "${KEYSTRING}[0-9]*" <<< "$JSON_PARSED" | tail -n -1 | grep -o "[0-9]*$") - #increment to get length, if empty => zero - if [ -n "$LEN" ]; then - LEN=$(($LEN+1)) - else - LEN="0" - fi - echo "$LEN" -} -JSON_PARSED=$(cat $META | $JSON_SH -l) - -function getkey { +getkey() { out=$(_getkey "$@") [ -n "$out" ] || die 2 "key $@ not found in $META" echo $out } -# Handle meta-spec specially -spec_version=`getkey meta-spec.version` -[ "$spec_version" == "1.0.0" ] || die 2 "Unknown meta-spec/version: $PGXN_meta-spec_version" +JSON_PARSED=$(cat "$META" | $JSON_SH -l) + +# Validate meta-spec version +spec_version=$(getkey meta-spec.version) +[ "$spec_version" == "1.0.0" ] || die 2 "Unknown meta-spec/version: $spec_version" +# Output distribution name and version echo "PGXN := $(getkey name)" echo "PGXNVERSION := $(getkey version)" -echo - -provides=$(getkeys provides | sed -e 's/\["provides","//' -e 's/",".*//' | uniq) -for ext in $provides; do - version=$(getkey provides.${ext}.version) - [ -n "$version" ] || die 2 "provides/${ext} does not specify a version number" - echo "EXTENSIONS += $ext" - echo "EXTENSION_SQL_FILES += sql/${ext}.sql" - echo "EXTENSION_${ext}_VERSION := ${version}" - echo "EXTENSION_${ext}_VERSION_FILE = sql/${ext}--\$(EXTENSION_${ext}_VERSION).sql" - echo "EXTENSION_VERSION_FILES += \$(EXTENSION_${ext}_VERSION_FILE)" - echo "\$(EXTENSION_${ext}_VERSION_FILE): sql/${ext}.sql META.json meta.mk" - echo ' cp $< $@' -done # vi: expandtab ts=2 sw=2 diff --git a/pgtle.sh b/pgtle.sh new file mode 100755 index 0000000..8fd2d17 --- /dev/null +++ b/pgtle.sh @@ -0,0 +1,849 @@ +#!/bin/bash +# +# pgtle.sh - Generate pg_tle registration SQL for PostgreSQL extensions +# +# Part of pgxntool: https://github.com/decibel/pgxntool +# +# SYNOPSIS +# pgtle.sh --extension EXTNAME [--pgtle-version VERSION] +# pgtle.sh --get-dir VERSION +# pgtle.sh --get-version +# pgtle.sh --run +# +# DESCRIPTION +# Generates pg_tle (Trusted Language Extensions) registration SQL from +# a pgxntool-based PostgreSQL extension. Reads the extension's .control +# file and SQL files, wrapping them for pg_tle deployment in managed +# environments like AWS RDS and Aurora. +# +# pg_tle enables extension installation without filesystem access by +# storing extension code in database tables. This script converts +# traditional PostgreSQL extensions into pg_tle-compatible SQL. +# +# OPTIONS +# --extension NAME +# Extension name (required). Must match a .control file basename +# in the current directory. +# +# --pgtle-version VERSION +# Generate for specific pg_tle version only (optional). +# Format: 1.0.0-1.4.0, 1.4.0-1.5.0, or 1.5.0+ +# Default: Generate all supported versions +# +# --get-dir VERSION +# Returns the directory path for the given pg_tle version. +# Format: VERSION is a version string like "1.5.2" +# Output: Directory path like "pg_tle/1.5.0+", "pg_tle/1.4.0-1.5.0", or "pg_tle/1.0.0-1.4.0" +# This option is used by make to determine which directory to use +# +# --get-version +# Returns the installed pg_tle version from the database. +# Output: Version string like "1.5.2" or empty if not installed +# Exit status: 0 if pg_tle is installed, 1 if not installed +# +# --run +# Runs the generated pg_tle registration SQL files. This option: +# - Detects the installed pg_tle version from the database +# - Determines the appropriate directory using --get-dir logic +# - Executes all SQL files in that directory via psql +# - Assumes PG* environment variables are configured for psql +# +# VERSION NOTATION +# X.Y.Z+ Works on pg_tle >= X.Y.Z +# X.Y.Z-A.B.C Works on pg_tle >= X.Y.Z and < A.B.C +# +# Note the boundary conditions: +# 1.5.0+ means >= 1.5.0 (includes 1.5.0) +# 1.4.0-1.5.0 means >= 1.4.0 and < 1.5.0 (excludes 1.5.0) +# 1.0.0-1.4.0 means >= 1.0.0 and < 1.4.0 (excludes 1.4.0) +# +# SUPPORTED VERSIONS +# 1.0.0-1.4.0 pg_tle 1.0.0 through 1.3.x (no uninstall function, no schema parameter) +# 1.4.0-1.5.0 pg_tle 1.4.0 through 1.4.x (has uninstall function, no schema parameter) +# 1.5.0+ pg_tle 1.5.0 and later (has uninstall function, schema parameter support) +# +# EXAMPLES +# # Generate all versions (default) +# pgtle.sh --extension myext +# +# # Generate only for pg_tle 1.5+ +# pgtle.sh --extension myext --pgtle-version 1.5.0+ +# +# # Get directory for a specific pg_tle version +# pgtle.sh --get-dir 1.5.2 +# # Output: pg_tle/1.5.0+ +# +# pgtle.sh --get-dir 1.4.2 +# # Output: pg_tle/1.4.0-1.5.0 +# +# # Get installed pg_tle version from database +# pgtle.sh --get-version +# # Output: 1.5.2 (or empty if not installed) +# +# # Run generated pg_tle registration SQL files +# pgtle.sh --run +# +# OUTPUT +# Creates files in version-specific subdirectories: +# pg_tle/1.0.0-1.4.0/{extension}.sql +# pg_tle/1.4.0-1.5.0/{extension}.sql +# pg_tle/1.5.0+/{extension}.sql +# +# Each file contains: +# - All versions of the extension +# - All upgrade paths between versions +# - Default version configuration +# - Complete installation instructions +# +# For --get-dir: Outputs the directory path to stdout. +# +# For --get-version: Outputs the installed pg_tle version to stdout, or empty if not installed. +# +# For --run: Executes SQL files and outputs progress messages to stderr. +# +# REQUIREMENTS +# - Must run from extension directory (where .control files are) +# - Extension must use only trusted languages (PL/pgSQL, SQL, PL/Perl, etc.) +# - No C code (module_pathname not supported by pg_tle) +# - Versioned SQL files must exist: sql/{ext}--{version}.sql +# +# EXIT STATUS +# 0 Success +# 1 Error (missing files, validation failure, C code detected, etc.) +# +# SEE ALSO +# pgxntool/README-pgtle.md - Complete user guide +# https://github.com/aws/pg_tle - pg_tle documentation +# + +set -eo pipefail + +# Source common library functions (error, die, debug) +PGXNTOOL_DIR="$(dirname "${BASH_SOURCE[0]}")" +source "$PGXNTOOL_DIR/lib.sh" + +# Constants +PGTLE_DELIMITER='$_pgtle_wrap_delimiter_$' +PGTLE_VERSIONS=("1.0.0-1.4.0" "1.4.0-1.5.0" "1.5.0+") + +# Supported pg_tle version ranges and their capabilities +# Use a function instead of associative array for compatibility with bash < 4.0 +get_pgtle_capability() { + local version="$1" + case "$version" in + "1.0.0-1.4.0") + echo "no_uninstall_no_schema" + ;; + "1.4.0-1.5.0") + echo "has_uninstall_no_schema" + ;; + "1.5.0+") + echo "has_uninstall_has_schema" + ;; + *) + echo "unknown" + ;; + esac +} + +# Global variables (populated from control file) +EXTENSION="" +DEFAULT_VERSION="" +COMMENT="" +REQUIRES="" +SCHEMA="" +MODULE_PATHNAME="" +VERSION_FILES=() +UPGRADE_FILES=() + +debug 30 "Global arrays initialized: VERSION_FILES=${#VERSION_FILES[@]}, UPGRADE_FILES=${#UPGRADE_FILES[@]}" +PGTLE_VERSION="" # Empty = generate all +GET_DIR_VERSION="" # For --get-dir option + +# Arrays (populated from SQL discovery) +VERSION_FILES=() +UPGRADE_FILES=() + +# Parse and validate a version string +# Extracts numeric version (major.minor.patch) from version strings +# Handles versions with suffixes like "1.5.0alpha1", "2.0beta", "1.2.3dev" +# Returns: numeric version string (e.g., "1.5.0") or exits with error +parse_version() { + local version="$1" + + if [ -z "$version" ]; then + die 1 "Version string is empty" + fi + + # Extract numeric version part (major.minor.patch) + # Matches: 1.5.0, 1.5, 10.2.1alpha, 2.0beta1, etc. + # Pattern: start of string, then digits, dot, digits, optionally (dot digits), then anything + local numeric_version + if [[ "$version" =~ ^([0-9]+\.[0-9]+(\.[0-9]+)?) ]]; then + numeric_version="${BASH_REMATCH[1]}" + else + die 1 "Cannot parse version string: '$version' + Expected format: major.minor[.patch][suffix] + Examples: 1.5.0, 1.5, 2.0alpha1, 10.2.3dev" + fi + + # Ensure we have at least major.minor (add .0 if needed) + if [[ ! "$numeric_version" =~ \. ]]; then + die 1 "Invalid version format: '$version' (need at least major.minor)" + fi + + # If we only have major.minor, add .0 for patch + if [[ ! "$numeric_version" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + numeric_version="${numeric_version}.0" + fi + + echo "$numeric_version" +} + +# Convert version string to comparable integer +# Takes a numeric version string (major.minor.patch) and converts to integer +# Example: "1.5.0" -> 1005000 +# Encoding scheme: major * 1000000 + minor * 1000 + patch +# This limits each component to 0-999 to prevent overflow +version_to_number() { + local version="$1" + + # Parse major.minor.patch + local major minor patch + if [[ "$version" =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then + major="${BASH_REMATCH[1]}" + minor="${BASH_REMATCH[2]}" + patch="${BASH_REMATCH[3]}" + else + die 1 "version_to_number: Invalid numeric version format: '$version'" + fi + + # Check for overflow in encoding scheme + # Each component must be < 1000 to fit in the allocated space + if [ "$major" -ge 1000 ]; then + die 1 "version_to_number: Major version too large: $major (max 999) + Version: $version" + fi + if [ "$minor" -ge 1000 ]; then + die 1 "version_to_number: Minor version too large: $minor (max 999) + Version: $version" + fi + if [ "$patch" -ge 1000 ]; then + die 1 "version_to_number: Patch version too large: $patch (max 999) + Version: $version" + fi + + # Convert to comparable number: major * 1000000 + minor * 1000 + patch + echo $(( major * 1000000 + minor * 1000 + patch )) +} + +# Get directory for a given pg_tle version +# Takes a version string like "1.5.2" and returns the directory path +# Handles versions with suffixes (e.g., "1.5.0alpha1") +# Returns: "pg_tle/1.0.0-1.4.0", "pg_tle/1.4.0-1.5.0", or "pg_tle/1.5.0+" +get_version_dir() { + local version="$1" + + if [ -z "$version" ]; then + die 1 "Version required for --get-dir (got empty string)" + fi + + # Parse and validate version + local numeric_version + numeric_version=$(parse_version "$version") + + # Check if the original version has a pre-release suffix + # Pre-release versions (alpha, beta, rc, dev) are considered BEFORE the release + # Example: 1.4.0alpha1 comes BEFORE 1.4.0, so it should use the 1.0.0-1.4.0 range + local has_prerelease=0 + if [[ "$version" =~ (alpha|beta|rc|dev) ]]; then + has_prerelease=1 + fi + + # Convert versions to comparable numbers + local version_num + local threshold_1_4_num + local threshold_1_5_num + version_num=$(version_to_number "$numeric_version") + threshold_1_4_num=$(version_to_number "1.4.0") + threshold_1_5_num=$(version_to_number "1.5.0") + + # Compare and return appropriate directory: + # < 1.4.0 -> 1.0.0-1.4.0 + # >= 1.4.0 and < 1.5.0 -> 1.4.0-1.5.0 + # >= 1.5.0 -> 1.5.0+ + # + # Special handling for pre-release versions: + # If version equals a threshold but has a pre-release suffix, treat it as less than that threshold + # Example: 1.4.0alpha1 is treated as < 1.4.0, so it uses 1.0.0-1.4.0 + if [ "$version_num" -lt "$threshold_1_4_num" ]; then + echo "pg_tle/1.0.0-1.4.0" + elif [ "$version_num" -eq "$threshold_1_4_num" ] && [ "$has_prerelease" -eq 1 ]; then + # Pre-release of 1.4.0 is considered < 1.4.0 + echo "pg_tle/1.0.0-1.4.0" + elif [ "$version_num" -lt "$threshold_1_5_num" ]; then + echo "pg_tle/1.4.0-1.5.0" + elif [ "$version_num" -eq "$threshold_1_5_num" ] && [ "$has_prerelease" -eq 1 ]; then + # Pre-release of 1.5.0 is considered < 1.5.0 + echo "pg_tle/1.4.0-1.5.0" + else + echo "pg_tle/1.5.0+" + fi +} + +# Get pg_tle version from installed extension +# Returns version string or empty if not installed +get_pgtle_version() { + psql --no-psqlrc --tuples-only --no-align --command "SELECT extversion FROM pg_extension WHERE extname = 'pg_tle';" 2>/dev/null | tr -d '[:space:]' || echo "" +} + +# Run pg_tle registration SQL files +# Detects installed pg_tle version and runs appropriate SQL files +run_pgtle_sql() { + echo "Running pg_tle registration SQL files..." >&2 + + # Get version from installed extension + local pgtle_version=$(get_pgtle_version) + if [ -z "$pgtle_version" ]; then + die 1 "pg_tle extension is not installed + Run 'CREATE EXTENSION pg_tle;' first, or use 'make check-pgtle' to verify" + fi + + # Get directory for this version + local pgtle_dir=$(get_version_dir "$pgtle_version") + if [ -z "$pgtle_dir" ]; then + die 1 "Failed to determine pg_tle directory for version $pgtle_version" + fi + + echo "Using pg_tle files for version $pgtle_version (directory: $pgtle_dir)" >&2 + + # Check if directory exists + if [ ! -d "$pgtle_dir" ]; then + die 1 "pg_tle directory $pgtle_dir does not exist + Run 'make pgtle' first to generate files" + fi + + # Run all SQL files in the directory + local sql_file + local found=0 + for sql_file in "$pgtle_dir"/*.sql; do + if [ -f "$sql_file" ]; then + found=1 + echo "Running $sql_file..." >&2 + psql --no-psqlrc --file="$sql_file" || exit 1 + fi + done + + if [ "$found" -eq 0 ]; then + die 1 "No SQL files found in $pgtle_dir + Run 'make pgtle' first to generate files" + fi + + echo "pg_tle registration complete" >&2 +} + +# Main logic +main() { + # Handle --get-dir, --get-version, --test-function, and --run options first (early exit, before other validation) + local args=("$@") + local i=0 + while [ $i -lt ${#args[@]} ]; do + if [ "${args[$i]}" = "--get-dir" ] && [ $((i+1)) -lt ${#args[@]} ]; then + get_version_dir "${args[$((i+1))]}" + exit 0 + elif [ "${args[$i]}" = "--get-version" ]; then + local version=$(get_pgtle_version) + if [ -n "$version" ]; then + echo "$version" + exit 0 + else + exit 1 + fi + elif [ "${args[$i]}" = "--test-function" ] && [ $((i+1)) -lt ${#args[@]} ]; then + # Hidden option for testing internal functions + # NOT a supported public interface - used only by the test suite + # Usage: pgtle.sh --test-function FUNC_NAME [ARGS...] + local func_name="${args[$((i+1))]}" + shift $((i+2)) # Remove script name and --test-function and func_name + + # Check if function exists + if ! declare -f "$func_name" >/dev/null 2>&1; then + die 1 "Function '$func_name' does not exist" + fi + + # Call the function with remaining arguments + "$func_name" "${args[@]:$((i+2))}" + exit $? + elif [ "${args[$i]}" = "--run" ]; then + run_pgtle_sql + exit 0 + fi + i=$((i+1)) + done + + # Parse other arguments + parse_args "$@" + + validate_environment + parse_control_file + discover_sql_files + + if [ -z "$PGTLE_VERSION" ]; then + # Generate all versions + for version in "${PGTLE_VERSIONS[@]}"; do + generate_pgtle_sql "$version" + done + else + # Generate specific version + generate_pgtle_sql "$PGTLE_VERSION" + fi +} + +parse_args() { + while [[ $# -gt 0 ]]; do + case $1 in + --extension) + EXTENSION="$2" + shift 2 + ;; + --pgtle-version) + PGTLE_VERSION="$2" + shift 2 + ;; + --get-dir) # This case should ideally not be hit due to early exit + GET_DIR_VERSION="$2" + shift 2 + ;; + --get-version) # This case should ideally not be hit due to early exit + shift + ;; + --test-function) # Hidden option for testing - not documented, not supported + shift 2 # Skip function name and --test-function + ;; + --run) # This case should ideally not be hit due to early exit + shift + ;; + *) + echo "Unknown option: $1" >&2 + exit 1 + ;; + esac + done + + if [ -z "$EXTENSION" ] && [ -z "$GET_DIR_VERSION" ]; then + die 1 "--extension is required (unless using --get-dir, --get-version, --test-function, or --run)" + fi +} + +validate_environment() { + # Check if control file exists + if [ ! -f "${EXTENSION}.control" ]; then + die 1 "Control file not found: ${EXTENSION}.control + Must run from extension directory" + fi +} + +parse_control_file() { + local control_file="${EXTENSION}.control" + + echo "Parsing control file: $control_file" >&2 + + # Parse key = value or key = 'value' format + while IFS= read -r line; do + # Skip comments and empty lines + [[ "$line" =~ ^[[:space:]]*# ]] && continue + [[ "$line" =~ ^[[:space:]]*$ ]] && continue + + # Extract key = value + if [[ "$line" =~ ^[[:space:]]*([a-z_]+)[[:space:]]*=[[:space:]]*(.*)[[:space:]]*$ ]]; then + local key="${BASH_REMATCH[1]}" + local value="${BASH_REMATCH[2]}" + + # Strip quotes if present (both single and double) + value="${value#\'}" + value="${value%\'}" + value="${value#\"}" + value="${value%\"}" + + # Trim trailing whitespace/comments + value="${value%%#*}" # Remove trailing comments + value="${value%% }" # Trim trailing spaces + + # Store in global variables + case "$key" in + default_version) DEFAULT_VERSION="$value" ;; + comment) COMMENT="$value" ;; + requires) REQUIRES="$value" ;; + schema) SCHEMA="$value" ;; + module_pathname) MODULE_PATHNAME="$value" ;; + esac + fi + done < "$control_file" + + # Validate required fields + if [ -z "$DEFAULT_VERSION" ]; then + die 1 "Control file missing default_version" + fi + + if [ -z "$COMMENT" ]; then + echo "WARNING: Control file missing comment, using extension name" >&2 + COMMENT="$EXTENSION extension" + fi + + # Warn about C code + if [ -n "$MODULE_PATHNAME" ]; then + cat >&2 <<-EOF + WARNING: Extension uses module_pathname (C code) + pg_tle only supports trusted languages (PL/pgSQL, SQL, etc.) + Generated SQL will likely not work + EOF + fi + + echo " default_version: $DEFAULT_VERSION" >&2 + echo " comment: $COMMENT" >&2 + if [ -n "$REQUIRES" ]; then + echo " requires: $REQUIRES" >&2 + fi + if [ -n "$SCHEMA" ]; then + echo " schema: $SCHEMA" >&2 + fi +} + +discover_sql_files() { + echo "Discovering SQL files for extension: $EXTENSION" >&2 + debug 30 "discover_sql_files: Starting discovery for extension: $EXTENSION" + + # Ensure default_version file exists and has content if base file exists + # This handles the case where make all hasn't generated it yet, or it exists but is empty + local default_version_file="sql/${EXTENSION}--${DEFAULT_VERSION}.sql" + local base_file="sql/${EXTENSION}.sql" + if [ -f "$base_file" ] && ([ ! -f "$default_version_file" ] || [ ! -s "$default_version_file" ]); then + debug 30 "discover_sql_files: Creating default_version file from base file" + cp "$base_file" "$default_version_file" + fi + + # Find versioned files: sql/{ext}--{version}.sql + # Use find to get proper null-delimited output, then filter out upgrade scripts + VERSION_FILES=() # Reset array + debug 30 "discover_sql_files: Reset VERSION_FILES array" + while IFS= read -r -d '' file; do + local basename=$(basename "$file" .sql) + local dash_count=$(echo "$basename" | grep -o -- "--" | wc -l | tr -d '[:space:]') + # Skip upgrade scripts (they have 2 dashes) + if [ "$dash_count" -ne 1 ]; then + continue + fi + # Error on empty version files + if [ ! -s "$file" ]; then + die 1 "Empty version file found: $file" + fi + VERSION_FILES+=("$file") + done < <(find sql/ -maxdepth 1 -name "${EXTENSION}--*.sql" -print0 2>/dev/null | sort -zV) + + # Find upgrade scripts: sql/{ext}--{ver1}--{ver2}.sql + # These have TWO occurrences of "--" in the filename + UPGRADE_FILES=() # Reset array + debug 30 "discover_sql_files: Reset UPGRADE_FILES array" + while IFS= read -r -d '' file; do + # Error on empty upgrade files + if [ ! -s "$file" ]; then + die 1 "Empty upgrade file found: $file" + fi + local basename=$(basename "$file" .sql) + local dash_count=$(echo "$basename" | grep -o -- "--" | wc -l | tr -d '[:space:]') + if [ "$dash_count" -eq 2 ]; then + UPGRADE_FILES+=("$file") + fi + done < <(find sql/ -maxdepth 1 -name "${EXTENSION}--*--*.sql" -print0 2>/dev/null | sort -zV) + + if [ ${#VERSION_FILES[@]} -eq 0 ]; then + die 1 "No versioned SQL files found for $EXTENSION + Expected pattern: sql/${EXTENSION}--{version}.sql + Run 'make' first to generate versioned files from sql/${EXTENSION}.sql" + fi + + echo " Found ${#VERSION_FILES[@]} version file(s):" >&2 + for f in "${VERSION_FILES[@]}"; do + echo " - $f" >&2 + done + + debug 30 "discover_sql_files: Checking UPGRADE_FILES array, count=${#UPGRADE_FILES[@]:-0}" + if [ ${#UPGRADE_FILES[@]:-0} -gt 0 ]; then + echo " Found ${#UPGRADE_FILES[@]} upgrade script(s):" >&2 + debug 30 "discover_sql_files: Iterating over ${#UPGRADE_FILES[@]} upgrade files" + for f in "${UPGRADE_FILES[@]}"; do + echo " - $f" >&2 + done + else + debug 30 "discover_sql_files: No upgrade files found" + fi +} + +extract_version_from_filename() { + local filename="$1" + local basename=$(basename "$filename" .sql) + + # Match patterns: + # - ext--1.0.0 → FROM_VERSION=1.0.0, TO_VERSION="" + # - ext--1.0.0--2.0.0 → FROM_VERSION=1.0.0, TO_VERSION=2.0.0 + + if [[ "$basename" =~ ^${EXTENSION}--([0-9][0-9.]*)(--([0-9][0-9.]*))?$ ]]; then + FROM_VERSION="${BASH_REMATCH[1]}" + TO_VERSION="${BASH_REMATCH[3]}" # Empty for non-upgrade files + return 0 + else + die 1 "Cannot parse version from filename: $filename + Expected format: ${EXTENSION}--{version}.sql or ${EXTENSION}--{ver1}--{ver2}.sql" + fi +} + +validate_delimiter() { + local sql_file="$1" + + if grep -qF "$PGTLE_DELIMITER" "$sql_file"; then + die 1 "SQL file contains reserved pg_tle delimiter: $sql_file + Found: $PGTLE_DELIMITER + This delimiter is used internally by pgtle.sh to wrap SQL content. + You must modify your SQL to not contain this string. If this poses a + serious problem, please open an issue at https://github.com/decibel/pgxntool/issues" + fi +} + +wrap_sql_content() { + local sql_file="$1" + + validate_delimiter "$sql_file" + + # Output wrapped SQL with proper indentation + echo " ${PGTLE_DELIMITER}" + cat "$sql_file" + echo " ${PGTLE_DELIMITER}" +} + +build_requires_array() { + # Input: "plpgsql, other_ext, another" + # Output: 'plpgsql', 'other_ext', 'another' + + # Split on comma, trim whitespace, quote each element + REQUIRES_ARRAY=$(echo "$REQUIRES" | \ + sed 's/[[:space:]]*,[[:space:]]*/\n/g' | \ + sed "s/^[[:space:]]*//;s/[[:space:]]*$//" | \ + sed "s/^/'/;s/$/'/" | \ + paste -sd, -) +} + +generate_header() { + local pgtle_version="$1" + local output_file="$2" + local version_count=${#VERSION_FILES[@]:-0} + local upgrade_count=${#UPGRADE_FILES[@]:-0} + + # Determine version compatibility message + local compat_msg + if [[ "$pgtle_version" == *"+"* ]]; then + local base_version="${pgtle_version%+}" + compat_msg="-- Works on pg_tle >= ${base_version}" + else + local min_version="${pgtle_version%-*}" + local max_version="${pgtle_version#*-}" + compat_msg="-- Works on pg_tle >= ${min_version} and < ${max_version}" + fi + + cat < $to_ver" + echo "SELECT pgtle.install_update_path(" + echo " '${EXTENSION}'," + echo " '${from_ver}'," + echo " '${to_ver}'," + wrap_sql_content "$upgrade_file" + echo ");" + echo +} + +generate_pgtle_sql() { + local pgtle_version="$1" + debug 30 "generate_pgtle_sql: Starting for version $pgtle_version, extension $EXTENSION" + + # Get capability using function (compatible with bash < 4.0) + local capability=$(get_pgtle_capability "$pgtle_version") + local version_dir="pg_tle/${pgtle_version}" + local output_file="${version_dir}/${EXTENSION}.sql" + + # Ensure arrays are initialized (defensive programming) + # Arrays should already be initialized at top level, but ensure they exist + debug 30 "generate_pgtle_sql: Checking array initialization" + debug 30 "generate_pgtle_sql: VERSION_FILES is ${VERSION_FILES+set}, count=${#VERSION_FILES[@]:-0}" + debug 30 "generate_pgtle_sql: UPGRADE_FILES is ${UPGRADE_FILES+set}, count=${#UPGRADE_FILES[@]:-0}" + + if [ -z "${VERSION_FILES+set}" ]; then + echo "WARNING: VERSION_FILES not set, initializing" >&2 + VERSION_FILES=() + fi + if [ -z "${UPGRADE_FILES+set}" ]; then + echo "WARNING: UPGRADE_FILES not set, initializing" >&2 + UPGRADE_FILES=() + fi + + # Create version-specific output directory if needed + mkdir -p "$version_dir" + + echo "Generating: $output_file (pg_tle $pgtle_version)" >&2 + + # Generate SQL to file + { + generate_header "$pgtle_version" "$output_file" + + cat < "$output_file" + + echo " ✓ Generated: $output_file" >&2 +} + +main "$@" + diff --git a/pgtle_versions.md b/pgtle_versions.md new file mode 100644 index 0000000..d2c5c03 --- /dev/null +++ b/pgtle_versions.md @@ -0,0 +1,47 @@ +# pg_tle Version Support Matrix + +This file documents pg_tle version boundaries that affect pgxntool's pg_tle support code. Each boundary represents a backward-incompatible API change. + +## Version Ranges (pgxntool notation) + +### 1.0.0-1.4.0 +- **pg_tle versions:** 1.0.0 through 1.3.x +- **PostgreSQL support:** 11-17 +- **API:** No `pgtle.uninstall_extension()` function, no schema parameter +- **Features:** Basic extension management, custom data types, authentication hooks + +### 1.4.0-1.5.0 +- **pg_tle versions:** 1.4.0 through 1.4.x +- **PostgreSQL support:** 11-17 +- **API:** Added `pgtle.uninstall_extension()` function, no schema parameter +- **Features:** Custom alignment/storage, enhanced warnings + +### 1.5.0+ +- **pg_tle versions:** 1.5.0 and later (tested through 1.5.2) +- **PostgreSQL support:** 12-18 (dropped PG 11) +- **API:** BREAKING CHANGE - `pgtle.install_extension()` now requires schema parameter +- **Features:** Schema parameter support in installation + +## Key API Changes by Version + +**1.4.0:** Added `pgtle.uninstall_extension()` +- Versions before 1.4.0 cannot uninstall extensions + +**1.5.0:** Changed `pgtle.install_extension()` signature +- Added required `schema` parameter +- Dropped PostgreSQL 11 support + +## Version Notation + +- `X.Y.Z+` - Works on pg_tle >= X.Y.Z +- `X.Y.Z-A.B.C` - Works on pg_tle >= X.Y.Z and < A.B.C + +**Boundary conditions:** +- `1.5.0+` means >= 1.5.0 (includes 1.5.0) +- `1.4.0-1.5.0` means >= 1.4.0 and < 1.5.0 (excludes 1.5.0) +- `1.0.0-1.4.0` means >= 1.0.0 and < 1.4.0 (excludes 1.4.0) + +## For Complete Details + +- `pgtle.sh` (comments at top) +- https://github.com/aws/pg_tle diff --git a/setup.sh b/setup.sh index 881ccaa..08751f1 100755 --- a/setup.sh +++ b/setup.sh @@ -3,6 +3,10 @@ set -o errexit -o errtrace -o pipefail trap 'echo "Error on line ${LINENO}"' ERR +# Source common library functions (error, die, debug) +PGXNTOOL_DIR="$(dirname "${BASH_SOURCE[0]}")" +source "$PGXNTOOL_DIR/lib.sh" + [ -d .git ] || git init if ! git diff --cached --exit-code; then From dbe7b50193b547177b53fbbde4ef3e903ac2eb15 Mon Sep 17 00:00:00 2001 From: jnasbyupgrade Date: Thu, 22 Jan 2026 16:16:01 -0600 Subject: [PATCH 22/22] Update .gitignore to reflect pgxntool changes --- .gitignore | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index 8e63124..1fb95a8 100644 --- a/.gitignore +++ b/.gitignore @@ -5,21 +5,21 @@ test/dump/*.log # Editor files .*.swp +# Claude Code local settings +.claude/*.local.json + # Explicitly exclude META.json! !/META.json # Generated make files meta.mk +control.mk # Compiler output *.o *.so .deps/ -# built targets -/sql/*--* -!/sql/*--*--*.sql - # Test artifacts results/ regression.diffs @@ -28,4 +28,6 @@ regression.out # Misc tmp/ .DS_Store -.claude/settings.local.json + +# pg_tle generated files +/pg_tle/