diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 33bba98d5c5..59d9544e598 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -103,6 +103,18 @@ RUN mkdir .pgenv-staging/ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ RUN rm .pgenv-staging/config/default.conf +FROM base AS pg17 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 17beta2 +RUN rm .pgenv/src/*.tar* +RUN make -C .pgenv/src/postgresql-*/ clean +RUN make -C .pgenv/src/postgresql-*/src/include install + +# create a staging directory with all files we want to copy from our pgenv build +# we will copy the contents of the staged folder into the final image at once +RUN mkdir .pgenv-staging/ +RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ +RUN rm .pgenv-staging/config/default.conf + FROM base AS uncrustify-builder RUN sudo apt update && sudo apt install -y cmake tree diff --git a/.gitattributes b/.gitattributes index 42f42cd25b9..c7c03e1efda 100644 --- a/.gitattributes +++ b/.gitattributes @@ -29,6 +29,7 @@ src/backend/distributed/deparser/ruleutils_13.c -citus-style src/backend/distributed/deparser/ruleutils_14.c -citus-style src/backend/distributed/deparser/ruleutils_15.c -citus-style src/backend/distributed/deparser/ruleutils_16.c -citus-style +src/backend/distributed/deparser/ruleutils_17.c -citus-style src/backend/distributed/commands/index_pg_source.c -citus-style src/include/distributed/citus_nodes.h -citus-style diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 70bc0bcb9a6..f8e6cfefff9 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -32,11 +32,12 @@ jobs: style_checker_image_name: "ghcr.io/citusdata/stylechecker" style_checker_tools_version: "0.8.18" sql_snapshot_pg_version: "16.3" - image_suffix: "-v13fd57c" + image_suffix: "-dev-16d4616" pg14_version: '{ "major": "14", "full": "14.12" }' pg15_version: '{ "major": "15", "full": "15.7" }' pg16_version: '{ "major": "16", "full": "16.3" }' - upgrade_pg_versions: "14.12-15.7-16.3" + pg17_version: '{ "major": "17", "full": "17beta2" }' + upgrade_pg_versions: "14.12-15.7-16.3-17beta2" steps: # Since GHA jobs needs at least one step we use a noop step here. - name: Set up parameters @@ -113,6 +114,7 @@ jobs: - ${{ needs.params.outputs.pg14_version }} - ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg16_version }} + - ${{ needs.params.outputs.pg17_version }} runs-on: ubuntu-20.04 container: image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}" @@ -144,6 +146,7 @@ jobs: - ${{ needs.params.outputs.pg14_version }} - ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg16_version }} + - ${{ needs.params.outputs.pg17_version }} make: - check-split - check-multi @@ -173,6 +176,10 @@ jobs: pg_version: ${{ needs.params.outputs.pg16_version }} suite: regress image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: check-failure + pg_version: ${{ needs.params.outputs.pg17_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-enterprise-failure pg_version: ${{ needs.params.outputs.pg14_version }} suite: regress @@ -185,6 +192,10 @@ jobs: pg_version: ${{ needs.params.outputs.pg16_version }} suite: regress image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: check-enterprise-failure + pg_version: ${{ needs.params.outputs.pg17_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-pytest pg_version: ${{ needs.params.outputs.pg14_version }} suite: regress @@ -197,6 +208,10 @@ jobs: pg_version: ${{ needs.params.outputs.pg16_version }} suite: regress image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: check-pytest + pg_version: ${{ needs.params.outputs.pg17_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: installcheck suite: cdc image_name: ${{ needs.params.outputs.test_image_name }} @@ -205,6 +220,10 @@ jobs: suite: cdc image_name: ${{ needs.params.outputs.test_image_name }} pg_version: ${{ needs.params.outputs.pg16_version }} + - make: installcheck + suite: cdc + image_name: ${{ needs.params.outputs.test_image_name }} + pg_version: ${{ needs.params.outputs.pg17_version }} - make: check-query-generator pg_version: ${{ needs.params.outputs.pg14_version }} suite: regress @@ -217,6 +236,10 @@ jobs: pg_version: ${{ needs.params.outputs.pg16_version }} suite: regress image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: check-query-generator + pg_version: ${{ needs.params.outputs.pg17_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} runs-on: ubuntu-20.04 container: image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}" @@ -260,6 +283,7 @@ jobs: - ${{ needs.params.outputs.pg14_version }} - ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg16_version }} + - ${{ needs.params.outputs.pg17_version }} parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs steps: - uses: actions/checkout@v4 @@ -308,6 +332,10 @@ jobs: new_pg_major: 16 - old_pg_major: 14 new_pg_major: 16 + - old_pg_major: 16 + new_pg_major: 17 + - old_pg_major: 15 + new_pg_major: 17 env: old_pg_major: ${{ matrix.old_pg_major }} new_pg_major: ${{ matrix.new_pg_major }} @@ -391,7 +419,7 @@ jobs: CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }} runs-on: ubuntu-20.04 container: - image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }} + image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg17_version).full }}${{ needs.params.outputs.image_suffix }} needs: - params - test-citus @@ -502,7 +530,7 @@ jobs: name: Test flakyness runs-on: ubuntu-20.04 container: - image: ${{ needs.params.outputs.fail_test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }} + image: ${{ needs.params.outputs.fail_test_image_name }}:${{ fromJson(needs.params.outputs.pg17_version).full }}${{ needs.params.outputs.image_suffix }} options: --user root env: runs: 8 diff --git a/configure b/configure index a0c978deaad..e4ea28d7d86 100755 --- a/configure +++ b/configure @@ -2588,7 +2588,7 @@ fi if test "$with_pg_version_check" = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num (skipped compatibility check)" >&5 $as_echo "$as_me: building against PostgreSQL $version_num (skipped compatibility check)" >&6;} -elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16'; then +elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5 else { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5 diff --git a/configure.ac b/configure.ac index 2a4c7a21ae8..1f4633ccaaa 100644 --- a/configure.ac +++ b/configure.ac @@ -80,7 +80,7 @@ AC_SUBST(with_pg_version_check) if test "$with_pg_version_check" = no; then AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)]) -elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16'; then +elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.]) else AC_MSG_NOTICE([building against PostgreSQL $version_num]) diff --git a/src/backend/columnar/columnar_customscan.c b/src/backend/columnar/columnar_customscan.c index 9ed82a5bfec..5288b8096b7 100644 --- a/src/backend/columnar/columnar_customscan.c +++ b/src/backend/columnar/columnar_customscan.c @@ -363,7 +363,7 @@ ColumnarGetRelationInfoHook(PlannerInfo *root, Oid relationObjectId, /* disable index-only scan */ IndexOptInfo *indexOptInfo = NULL; - foreach_ptr(indexOptInfo, rel->indexlist) + foreach_declared_ptr(indexOptInfo, rel->indexlist) { memset(indexOptInfo->canreturn, false, indexOptInfo->ncolumns * sizeof(bool)); } @@ -381,7 +381,7 @@ RemovePathsByPredicate(RelOptInfo *rel, PathPredicate removePathPredicate) List *filteredPathList = NIL; Path *path = NULL; - foreach_ptr(path, rel->pathlist) + foreach_declared_ptr(path, rel->pathlist) { if (!removePathPredicate(path)) { @@ -428,7 +428,7 @@ static void CostColumnarPaths(PlannerInfo *root, RelOptInfo *rel, Oid relationId) { Path *path = NULL; - foreach_ptr(path, rel->pathlist) + foreach_declared_ptr(path, rel->pathlist) { if (IsA(path, IndexPath)) { @@ -783,7 +783,7 @@ ExtractPushdownClause(PlannerInfo *root, RelOptInfo *rel, Node *node) List *pushdownableArgs = NIL; Node *boolExprArg = NULL; - foreach_ptr(boolExprArg, boolExpr->args) + foreach_declared_ptr(boolExprArg, boolExpr->args) { Expr *pushdownableArg = ExtractPushdownClause(root, rel, (Node *) boolExprArg); @@ -1550,7 +1550,7 @@ ColumnarPerStripeScanCost(RelOptInfo *rel, Oid relationId, int numberOfColumnsRe uint32 maxColumnCount = 0; uint64 totalStripeSize = 0; StripeMetadata *stripeMetadata = NULL; - foreach_ptr(stripeMetadata, stripeList) + foreach_declared_ptr(stripeMetadata, stripeList) { totalStripeSize += stripeMetadata->dataLength; maxColumnCount = Max(maxColumnCount, stripeMetadata->columnCount); @@ -1924,11 +1924,6 @@ ColumnarScan_EndCustomScan(CustomScanState *node) */ TableScanDesc scanDesc = node->ss.ss_currentScanDesc; - /* - * Free the exprcontext - */ - ExecFreeExprContext(&node->ss.ps); - /* * clean out the tuple table */ diff --git a/src/backend/columnar/columnar_metadata.c b/src/backend/columnar/columnar_metadata.c index 215f9609109..192c4cc4bac 100644 --- a/src/backend/columnar/columnar_metadata.c +++ b/src/backend/columnar/columnar_metadata.c @@ -2041,7 +2041,7 @@ GetHighestUsedRowNumber(uint64 storageId) List *stripeMetadataList = ReadDataFileStripeList(storageId, GetTransactionSnapshot()); StripeMetadata *stripeMetadata = NULL; - foreach_ptr(stripeMetadata, stripeMetadataList) + foreach_declared_ptr(stripeMetadata, stripeMetadataList) { highestRowNumber = Max(highestRowNumber, StripeGetHighestRowNumber(stripeMetadata)); diff --git a/src/backend/columnar/columnar_reader.c b/src/backend/columnar/columnar_reader.c index 7ef0d15d7da..65ef27617d8 100644 --- a/src/backend/columnar/columnar_reader.c +++ b/src/backend/columnar/columnar_reader.c @@ -880,7 +880,7 @@ ReadChunkGroupNextRow(ChunkGroupReadState *chunkGroupReadState, Datum *columnVal memset(columnNulls, true, sizeof(bool) * chunkGroupReadState->columnCount); int attno; - foreach_int(attno, chunkGroupReadState->projectedColumnList) + foreach_declared_int(attno, chunkGroupReadState->projectedColumnList) { const ChunkData *chunkGroupData = chunkGroupReadState->chunkGroupData; const int rowIndex = chunkGroupReadState->currentRow; @@ -1489,7 +1489,7 @@ ProjectedColumnMask(uint32 columnCount, List *projectedColumnList) bool *projectedColumnMask = palloc0(columnCount * sizeof(bool)); int attno; - foreach_int(attno, projectedColumnList) + foreach_declared_int(attno, projectedColumnList) { /* attno is 1-indexed; projectedColumnMask is 0-indexed */ int columnIndex = attno - 1; diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index ca3a5f4c4aa..148ccb50710 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -1424,15 +1424,32 @@ ConditionalLockRelationWithTimeout(Relation rel, LOCKMODE lockMode, int timeout, static bool -columnar_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno, +columnar_scan_analyze_next_block(TableScanDesc scan, +#if PG_VERSION_NUM >= PG_VERSION_17 + ReadStream *stream) +#else + BlockNumber blockno, BufferAccessStrategy bstrategy) +#endif { /* * Our access method is not pages based, i.e. tuples are not confined * to pages boundaries. So not much to do here. We return true anyway * so acquire_sample_rows() in analyze.c would call our * columnar_scan_analyze_next_tuple() callback. + * In PG17, we return false in case there is no buffer left, since + * the outer loop changed in acquire_sample_rows(), and it is + * expected for the scan_analyze_next_block function to check whether + * there are any blocks left in the block sampler. */ +#if PG_VERSION_NUM >= PG_VERSION_17 + Buffer buf = read_stream_next_buffer(stream, NULL); + if (!BufferIsValid(buf)) + { + return false; + } + ReleaseBuffer(buf); +#endif return true; } @@ -2239,7 +2256,9 @@ ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions "Specify SET ACCESS METHOD before storage parameters, or use separate ALTER TABLE commands."))); } - destIsColumnar = (strcmp(alterTableCmd->name, COLUMNAR_AM_NAME) == 0); + destIsColumnar = (strcmp(alterTableCmd->name ? alterTableCmd->name : + default_table_access_method, + COLUMNAR_AM_NAME) == 0); if (srcIsColumnar && !destIsColumnar) { @@ -3083,7 +3102,7 @@ DefElem * GetExtensionOption(List *extensionOptions, const char *defname) { DefElem *defElement = NULL; - foreach_ptr(defElement, extensionOptions) + foreach_declared_ptr(defElement, extensionOptions) { if (IsA(defElement, DefElem) && strncmp(defElement->defname, defname, NAMEDATALEN) == 0) diff --git a/src/backend/distributed/cdc/cdc_decoder.c b/src/backend/distributed/cdc/cdc_decoder.c index cf9f4963b72..1e71a82a1c1 100644 --- a/src/backend/distributed/cdc/cdc_decoder.c +++ b/src/backend/distributed/cdc/cdc_decoder.c @@ -22,6 +22,8 @@ #include "utils/rel.h" #include "utils/typcache.h" +#include "pg_version_constants.h" + PG_MODULE_MAGIC; extern void _PG_output_plugin_init(OutputPluginCallbacks *cb); @@ -435,6 +437,74 @@ TranslateChangesIfSchemaChanged(Relation sourceRelation, Relation targetRelation return; } +#if PG_VERSION_NUM >= PG_VERSION_17 + + /* Check the ReorderBufferChange's action type and handle them accordingly.*/ + switch (change->action) + { + case REORDER_BUFFER_CHANGE_INSERT: + { + /* For insert action, only new tuple should always be translated*/ + HeapTuple sourceRelationNewTuple = change->data.tp.newtuple; + HeapTuple targetRelationNewTuple = GetTupleForTargetSchemaForCdc( + sourceRelationNewTuple, sourceRelationDesc, targetRelationDesc); + change->data.tp.newtuple = targetRelationNewTuple; + break; + } + + /* + * For update changes both old and new tuples need to be translated for target relation + * if the REPLICA IDENTITY is set to FULL. Otherwise, only the new tuple needs to be + * translated for target relation. + */ + case REORDER_BUFFER_CHANGE_UPDATE: + { + /* For update action, new tuple should always be translated*/ + /* Get the new tuple from the ReorderBufferChange, and translate it to target relation. */ + HeapTuple sourceRelationNewTuple = change->data.tp.newtuple; + HeapTuple targetRelationNewTuple = GetTupleForTargetSchemaForCdc( + sourceRelationNewTuple, sourceRelationDesc, targetRelationDesc); + change->data.tp.newtuple = targetRelationNewTuple; + + /* + * Format oldtuple according to the target relation. If the column values of replica + * identiy change, then the old tuple is non-null and needs to be formatted according + * to the target relation schema. + */ + if (change->data.tp.oldtuple != NULL) + { + HeapTuple sourceRelationOldTuple = change->data.tp.oldtuple; + HeapTuple targetRelationOldTuple = GetTupleForTargetSchemaForCdc( + sourceRelationOldTuple, + sourceRelationDesc, + targetRelationDesc); + + change->data.tp.oldtuple = targetRelationOldTuple; + } + break; + } + + case REORDER_BUFFER_CHANGE_DELETE: + { + /* For delete action, only old tuple should be translated*/ + HeapTuple sourceRelationOldTuple = change->data.tp.oldtuple; + HeapTuple targetRelationOldTuple = GetTupleForTargetSchemaForCdc( + sourceRelationOldTuple, + sourceRelationDesc, + targetRelationDesc); + + change->data.tp.oldtuple = targetRelationOldTuple; + break; + } + + default: + { + /* Do nothing for other action types. */ + break; + } + } +#else + /* Check the ReorderBufferChange's action type and handle them accordingly.*/ switch (change->action) { @@ -499,4 +569,5 @@ TranslateChangesIfSchemaChanged(Relation sourceRelation, Relation targetRelation break; } } +#endif } diff --git a/src/backend/distributed/clock/causal_clock.c b/src/backend/distributed/clock/causal_clock.c index eb4b8d9d362..e2f12b79c48 100644 --- a/src/backend/distributed/clock/causal_clock.c +++ b/src/backend/distributed/clock/causal_clock.c @@ -328,7 +328,7 @@ GetHighestClockInTransaction(List *nodeConnectionList) { MultiConnection *connection = NULL; - foreach_ptr(connection, nodeConnectionList) + foreach_declared_ptr(connection, nodeConnectionList) { int querySent = SendRemoteCommand(connection, "SELECT citus_get_node_clock();"); @@ -349,7 +349,7 @@ GetHighestClockInTransaction(List *nodeConnectionList) globalClockValue->counter))); /* fetch the results and pick the highest clock value of all the nodes */ - foreach_ptr(connection, nodeConnectionList) + foreach_declared_ptr(connection, nodeConnectionList) { bool raiseInterrupts = true; diff --git a/src/backend/distributed/commands/alter_table.c b/src/backend/distributed/commands/alter_table.c index 030dbbe7869..d2f8348dadb 100644 --- a/src/backend/distributed/commands/alter_table.c +++ b/src/backend/distributed/commands/alter_table.c @@ -414,7 +414,7 @@ UndistributeTables(List *relationIdList) */ List *originalForeignKeyRecreationCommands = NIL; Oid relationId = InvalidOid; - foreach_oid(relationId, relationIdList) + foreach_declared_oid(relationId, relationIdList) { List *fkeyCommandsForRelation = GetFKeyCreationCommandsRelationInvolvedWithTableType(relationId, @@ -802,7 +802,7 @@ ConvertTableInternal(TableConversionState *con) List *partitionList = PartitionList(con->relationId); Oid partitionRelationId = InvalidOid; - foreach_oid(partitionRelationId, partitionList) + foreach_declared_oid(partitionRelationId, partitionList) { char *tableQualifiedName = generate_qualified_relation_name( partitionRelationId); @@ -873,7 +873,7 @@ ConvertTableInternal(TableConversionState *con) } TableDDLCommand *tableCreationCommand = NULL; - foreach_ptr(tableCreationCommand, preLoadCommands) + foreach_declared_ptr(tableCreationCommand, preLoadCommands) { Assert(CitusIsA(tableCreationCommand, TableDDLCommand)); @@ -947,7 +947,7 @@ ConvertTableInternal(TableConversionState *con) con->suppressNoticeMessages); TableDDLCommand *tableConstructionCommand = NULL; - foreach_ptr(tableConstructionCommand, postLoadCommands) + foreach_declared_ptr(tableConstructionCommand, postLoadCommands) { Assert(CitusIsA(tableConstructionCommand, TableDDLCommand)); char *tableConstructionSQL = GetTableDDLCommand(tableConstructionCommand); @@ -965,7 +965,7 @@ ConvertTableInternal(TableConversionState *con) MemoryContext oldContext = MemoryContextSwitchTo(citusPerPartitionContext); char *attachPartitionCommand = NULL; - foreach_ptr(attachPartitionCommand, attachPartitionCommands) + foreach_declared_ptr(attachPartitionCommand, attachPartitionCommands) { MemoryContextReset(citusPerPartitionContext); @@ -990,7 +990,7 @@ ConvertTableInternal(TableConversionState *con) /* For now we only support cascade to colocation for alter_distributed_table UDF */ Assert(con->conversionType == ALTER_DISTRIBUTED_TABLE); - foreach_oid(colocatedTableId, con->colocatedTableList) + foreach_declared_oid(colocatedTableId, con->colocatedTableList) { if (colocatedTableId == con->relationId) { @@ -1018,7 +1018,7 @@ ConvertTableInternal(TableConversionState *con) if (con->cascadeToColocated != CASCADE_TO_COLOCATED_NO_ALREADY_CASCADED) { char *foreignKeyCommand = NULL; - foreach_ptr(foreignKeyCommand, foreignKeyCommands) + foreach_declared_ptr(foreignKeyCommand, foreignKeyCommands) { ExecuteQueryViaSPI(foreignKeyCommand, SPI_OK_UTILITY); } @@ -1054,7 +1054,7 @@ CopyTableConversionReturnIntoCurrentContext(TableConversionReturn *tableConversi tableConversionReturnCopy = palloc0(sizeof(TableConversionReturn)); List *copyForeignKeyCommands = NIL; char *foreignKeyCommand = NULL; - foreach_ptr(foreignKeyCommand, tableConversionReturn->foreignKeyCommands) + foreach_declared_ptr(foreignKeyCommand, tableConversionReturn->foreignKeyCommands) { char *copyForeignKeyCommand = MemoryContextStrdup(CurrentMemoryContext, foreignKeyCommand); @@ -1129,7 +1129,7 @@ DropIndexesNotSupportedByColumnar(Oid relationId, bool suppressNoticeMessages) RelationClose(columnarRelation); Oid indexId = InvalidOid; - foreach_oid(indexId, indexIdList) + foreach_declared_oid(indexId, indexIdList) { char *indexAmName = GetIndexAccessMethodName(indexId); if (extern_ColumnarSupportsIndexAM(indexAmName)) @@ -1389,7 +1389,7 @@ CreateTableConversion(TableConversionParameters *params) * since they will be handled separately. */ Oid colocatedTableId = InvalidOid; - foreach_oid(colocatedTableId, colocatedTableList) + foreach_declared_oid(colocatedTableId, colocatedTableList) { if (PartitionTable(colocatedTableId)) { @@ -1605,7 +1605,7 @@ DoesCascadeDropUnsupportedObject(Oid classId, Oid objectId, HTAB *nodeMap) targetObjectId); HeapTuple depTup = NULL; - foreach_ptr(depTup, dependencyTupleList) + foreach_declared_ptr(depTup, dependencyTupleList) { Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup); @@ -1645,7 +1645,7 @@ GetViewCreationCommandsOfTable(Oid relationId) List *commands = NIL; Oid viewOid = InvalidOid; - foreach_oid(viewOid, views) + foreach_declared_oid(viewOid, views) { StringInfo query = makeStringInfo(); @@ -1683,7 +1683,7 @@ WrapTableDDLCommands(List *commandStrings) List *tableDDLCommands = NIL; char *command = NULL; - foreach_ptr(command, commandStrings) + foreach_declared_ptr(command, commandStrings) { tableDDLCommands = lappend(tableDDLCommands, makeTableDDLCommandString(command)); } @@ -1840,7 +1840,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, */ List *ownedSequences = getOwnedSequences_internal(sourceId, 0, DEPENDENCY_AUTO); Oid sequenceOid = InvalidOid; - foreach_oid(sequenceOid, ownedSequences) + foreach_declared_oid(sequenceOid, ownedSequences) { changeDependencyFor(RelationRelationId, sequenceOid, RelationRelationId, sourceId, targetId); @@ -1873,7 +1873,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, } char *justBeforeDropCommand = NULL; - foreach_ptr(justBeforeDropCommand, justBeforeDropCommands) + foreach_declared_ptr(justBeforeDropCommand, justBeforeDropCommands) { ExecuteQueryViaSPI(justBeforeDropCommand, SPI_OK_UTILITY); } @@ -1987,7 +1987,7 @@ CheckAlterDistributedTableConversionParameters(TableConversionState *con) Oid colocatedTableOid = InvalidOid; text *colocateWithText = cstring_to_text(con->colocateWith); Oid colocateWithTableOid = ResolveRelationId(colocateWithText, false); - foreach_oid(colocatedTableOid, con->colocatedTableList) + foreach_declared_oid(colocatedTableOid, con->colocatedTableList) { if (colocateWithTableOid == colocatedTableOid) { @@ -2214,7 +2214,7 @@ WillRecreateForeignKeyToReferenceTable(Oid relationId, { List *colocatedTableList = ColocatedTableList(relationId); Oid colocatedTableOid = InvalidOid; - foreach_oid(colocatedTableOid, colocatedTableList) + foreach_declared_oid(colocatedTableOid, colocatedTableList) { if (HasForeignKeyToReferenceTable(colocatedTableOid)) { @@ -2242,7 +2242,7 @@ WarningsForDroppingForeignKeysWithDistributedTables(Oid relationId) List *foreignKeys = list_concat(referencingForeingKeys, referencedForeignKeys); Oid foreignKeyOid = InvalidOid; - foreach_oid(foreignKeyOid, foreignKeys) + foreach_declared_oid(foreignKeyOid, foreignKeys) { ereport(WARNING, (errmsg("foreign key %s will be dropped", get_constraint_name(foreignKeyOid)))); diff --git a/src/backend/distributed/commands/begin.c b/src/backend/distributed/commands/begin.c index b19b044849d..3b5728868f2 100644 --- a/src/backend/distributed/commands/begin.c +++ b/src/backend/distributed/commands/begin.c @@ -33,7 +33,7 @@ SaveBeginCommandProperties(TransactionStmt *transactionStmt) * * While BEGIN can be quite frequent it will rarely have options set. */ - foreach_ptr(item, transactionStmt->options) + foreach_declared_ptr(item, transactionStmt->options) { A_Const *constant = (A_Const *) item->arg; diff --git a/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c b/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c index c88367462bd..02b175960ad 100644 --- a/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c +++ b/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c @@ -168,7 +168,7 @@ GetPartitionRelationIds(List *relationIdList) List *partitionRelationIdList = NIL; Oid relationId = InvalidOid; - foreach_oid(relationId, relationIdList) + foreach_declared_oid(relationId, relationIdList) { if (PartitionTable(relationId)) { @@ -189,7 +189,7 @@ LockRelationsWithLockMode(List *relationIdList, LOCKMODE lockMode) { Oid relationId; relationIdList = SortList(relationIdList, CompareOids); - foreach_oid(relationId, relationIdList) + foreach_declared_oid(relationId, relationIdList) { LockRelationOid(relationId, lockMode); } @@ -207,7 +207,7 @@ static void ErrorIfConvertingMultiLevelPartitionedTable(List *relationIdList) { Oid relationId; - foreach_oid(relationId, relationIdList) + foreach_declared_oid(relationId, relationIdList) { if (PartitionedTable(relationId) && PartitionTable(relationId)) { @@ -236,7 +236,7 @@ void ErrorIfAnyPartitionRelationInvolvedInNonInheritedFKey(List *relationIdList) { Oid relationId = InvalidOid; - foreach_oid(relationId, relationIdList) + foreach_declared_oid(relationId, relationIdList) { if (!PartitionTable(relationId)) { @@ -300,7 +300,7 @@ bool RelationIdListHasReferenceTable(List *relationIdList) { Oid relationId = InvalidOid; - foreach_oid(relationId, relationIdList) + foreach_declared_oid(relationId, relationIdList) { if (IsCitusTableType(relationId, REFERENCE_TABLE)) { @@ -322,7 +322,7 @@ GetFKeyCreationCommandsForRelationIdList(List *relationIdList) List *fKeyCreationCommands = NIL; Oid relationId = InvalidOid; - foreach_oid(relationId, relationIdList) + foreach_declared_oid(relationId, relationIdList) { List *relationFKeyCreationCommands = GetReferencingForeignConstaintCommands(relationId); @@ -342,7 +342,7 @@ static void DropRelationIdListForeignKeys(List *relationIdList, int fKeyFlags) { Oid relationId = InvalidOid; - foreach_oid(relationId, relationIdList) + foreach_declared_oid(relationId, relationIdList) { DropRelationForeignKeys(relationId, fKeyFlags); } @@ -399,7 +399,7 @@ GetRelationDropFkeyCommands(Oid relationId, int fKeyFlags) List *relationFKeyIdList = GetForeignKeyOids(relationId, fKeyFlags); Oid foreignKeyId; - foreach_oid(foreignKeyId, relationFKeyIdList) + foreach_declared_oid(foreignKeyId, relationFKeyIdList) { char *dropFkeyCascadeCommand = GetDropFkeyCascadeCommand(foreignKeyId); dropFkeyCascadeCommandList = lappend(dropFkeyCascadeCommandList, @@ -450,7 +450,7 @@ ExecuteCascadeOperationForRelationIdList(List *relationIdList, cascadeOperationType) { Oid relationId = InvalidOid; - foreach_oid(relationId, relationIdList) + foreach_declared_oid(relationId, relationIdList) { /* * The reason behind skipping certain table types in below loop is @@ -531,7 +531,7 @@ ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *utilityCommandL PG_TRY(); { char *utilityCommand = NULL; - foreach_ptr(utilityCommand, utilityCommandList) + foreach_declared_ptr(utilityCommand, utilityCommandList) { /* * CREATE MATERIALIZED VIEW commands need to be parsed/transformed, @@ -569,7 +569,7 @@ void ExecuteAndLogUtilityCommandList(List *utilityCommandList) { char *utilityCommand = NULL; - foreach_ptr(utilityCommand, utilityCommandList) + foreach_declared_ptr(utilityCommand, utilityCommandList) { ExecuteAndLogUtilityCommand(utilityCommand); } @@ -597,7 +597,7 @@ void ExecuteForeignKeyCreateCommandList(List *ddlCommandList, bool skip_validation) { char *ddlCommand = NULL; - foreach_ptr(ddlCommand, ddlCommandList) + foreach_declared_ptr(ddlCommand, ddlCommandList) { ExecuteForeignKeyCreateCommand(ddlCommand, skip_validation); } diff --git a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c index 93f1e7d28ec..dfc57f096ae 100644 --- a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c +++ b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c @@ -588,7 +588,7 @@ ErrorIfOptionListHasNoTableName(List *optionList) { char *table_nameString = "table_name"; DefElem *option = NULL; - foreach_ptr(option, optionList) + foreach_declared_ptr(option, optionList) { char *optionName = option->defname; if (strcmp(optionName, table_nameString) == 0) @@ -613,7 +613,7 @@ ForeignTableDropsTableNameOption(List *optionList) { char *table_nameString = "table_name"; DefElem *option = NULL; - foreach_ptr(option, optionList) + foreach_declared_ptr(option, optionList) { char *optionName = option->defname; DefElemAction optionAction = option->defaction; @@ -732,7 +732,7 @@ UpdateAutoConvertedForConnectedRelations(List *relationIds, bool autoConverted) List *relationIdList = NIL; Oid relid = InvalidOid; - foreach_oid(relid, relationIds) + foreach_declared_oid(relid, relationIds) { List *connectedRelations = GetForeignKeyConnectedRelationIdList(relid); relationIdList = list_concat_unique_oid(relationIdList, connectedRelations); @@ -740,7 +740,7 @@ UpdateAutoConvertedForConnectedRelations(List *relationIds, bool autoConverted) relationIdList = SortList(relationIdList, CompareOids); - foreach_oid(relid, relationIdList) + foreach_declared_oid(relid, relationIdList) { UpdatePgDistPartitionAutoConverted(relid, autoConverted); } @@ -776,7 +776,7 @@ GetShellTableDDLEventsForCitusLocalTable(Oid relationId) List *shellTableDDLEvents = NIL; TableDDLCommand *tableDDLCommand = NULL; - foreach_ptr(tableDDLCommand, tableDDLCommands) + foreach_declared_ptr(tableDDLCommand, tableDDLCommands) { Assert(CitusIsA(tableDDLCommand, TableDDLCommand)); shellTableDDLEvents = lappend(shellTableDDLEvents, @@ -863,7 +863,7 @@ RenameShardRelationConstraints(Oid shardRelationId, uint64 shardId) List *constraintNameList = GetConstraintNameList(shardRelationId); char *constraintName = NULL; - foreach_ptr(constraintName, constraintNameList) + foreach_declared_ptr(constraintName, constraintNameList) { const char *commandString = GetRenameShardConstraintCommand(shardRelationId, constraintName, shardId); @@ -958,7 +958,7 @@ RenameShardRelationIndexes(Oid shardRelationId, uint64 shardId) List *indexOidList = GetExplicitIndexOidList(shardRelationId); Oid indexOid = InvalidOid; - foreach_oid(indexOid, indexOidList) + foreach_declared_oid(indexOid, indexOidList) { const char *commandString = GetRenameShardIndexCommand(indexOid, shardId); ExecuteAndLogUtilityCommand(commandString); @@ -1008,7 +1008,7 @@ RenameShardRelationStatistics(Oid shardRelationId, uint64 shardId) List *statsCommandList = GetRenameStatsCommandList(statsOidList, shardId); char *command = NULL; - foreach_ptr(command, statsCommandList) + foreach_declared_ptr(command, statsCommandList) { ExecuteAndLogUtilityCommand(command); } @@ -1044,7 +1044,7 @@ RenameShardRelationNonTruncateTriggers(Oid shardRelationId, uint64 shardId) List *triggerIdList = GetExplicitTriggerIdList(shardRelationId); Oid triggerId = InvalidOid; - foreach_oid(triggerId, triggerIdList) + foreach_declared_oid(triggerId, triggerIdList) { bool missingOk = false; HeapTuple triggerTuple = GetTriggerTupleById(triggerId, missingOk); @@ -1097,7 +1097,7 @@ DropRelationTruncateTriggers(Oid relationId) List *triggerIdList = GetExplicitTriggerIdList(relationId); Oid triggerId = InvalidOid; - foreach_oid(triggerId, triggerIdList) + foreach_declared_oid(triggerId, triggerIdList) { bool missingOk = false; HeapTuple triggerTuple = GetTriggerTupleById(triggerId, missingOk); @@ -1175,7 +1175,7 @@ DropIdentitiesOnTable(Oid relationId) relation_close(relation, NoLock); char *dropCommand = NULL; - foreach_ptr(dropCommand, dropCommandList) + foreach_declared_ptr(dropCommand, dropCommandList) { /* * We need to disable/enable ddl propagation for this command, to prevent @@ -1218,7 +1218,7 @@ DropViewsOnTable(Oid relationId) List *reverseOrderedViews = ReversedOidList(views); Oid viewId = InvalidOid; - foreach_oid(viewId, reverseOrderedViews) + foreach_declared_oid(viewId, reverseOrderedViews) { char *qualifiedViewName = generate_qualified_relation_name(viewId); @@ -1241,7 +1241,7 @@ ReversedOidList(List *oidList) { List *reversed = NIL; Oid oid = InvalidOid; - foreach_oid(oid, oidList) + foreach_declared_oid(oid, oidList) { reversed = lcons_oid(oid, reversed); } @@ -1293,7 +1293,7 @@ GetRenameStatsCommandList(List *statsOidList, uint64 shardId) { List *statsCommandList = NIL; Oid statsOid; - foreach_oid(statsOid, statsOidList) + foreach_declared_oid(statsOid, statsOidList) { HeapTuple tup = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statsOid)); diff --git a/src/backend/distributed/commands/cluster.c b/src/backend/distributed/commands/cluster.c index 7a1dac30256..44a1b610919 100644 --- a/src/backend/distributed/commands/cluster.c +++ b/src/backend/distributed/commands/cluster.c @@ -115,7 +115,7 @@ static bool IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt) { DefElem *opt = NULL; - foreach_ptr(opt, clusterStmt->params) + foreach_declared_ptr(opt, clusterStmt->params) { if (strcmp(opt->defname, "verbose") == 0) { diff --git a/src/backend/distributed/commands/collation.c b/src/backend/distributed/commands/collation.c index 5ce3d1436cc..3f78bf4b6e3 100644 --- a/src/backend/distributed/commands/collation.c +++ b/src/backend/distributed/commands/collation.c @@ -132,6 +132,7 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati char *schemaName = get_namespace_name(collnamespace); *quotedCollationName = quote_qualified_identifier(schemaName, collname); const char *providerString = + collprovider == COLLPROVIDER_BUILTIN ? "builtin" : collprovider == COLLPROVIDER_DEFAULT ? "default" : collprovider == COLLPROVIDER_ICU ? "icu" : collprovider == COLLPROVIDER_LIBC ? "libc" : NULL; diff --git a/src/backend/distributed/commands/common.c b/src/backend/distributed/commands/common.c index 347a99e8af4..de05efe45ca 100644 --- a/src/backend/distributed/commands/common.c +++ b/src/backend/distributed/commands/common.c @@ -235,7 +235,7 @@ PreprocessDropDistributedObjectStmt(Node *node, const char *queryString, List *distributedObjects = NIL; List *distributedObjectAddresses = NIL; Node *object = NULL; - foreach_ptr(object, stmt->objects) + foreach_declared_ptr(object, stmt->objects) { /* TODO understand if the lock should be sth else */ Relation rel = NULL; /* not used, but required to pass to get_object_address */ @@ -267,7 +267,7 @@ PreprocessDropDistributedObjectStmt(Node *node, const char *queryString, * remove the entries for the distributed objects on dropping */ ObjectAddress *address = NULL; - foreach_ptr(address, distributedObjectAddresses) + foreach_declared_ptr(address, distributedObjectAddresses) { UnmarkObjectDistributed(address); } @@ -303,7 +303,7 @@ DropTextSearchDictObjectAddress(Node *node, bool missing_ok, bool isPostprocess) List *objectAddresses = NIL; List *objNameList = NIL; - foreach_ptr(objNameList, stmt->objects) + foreach_declared_ptr(objNameList, stmt->objects) { Oid tsdictOid = get_ts_dict_oid(objNameList, missing_ok); @@ -328,7 +328,7 @@ DropTextSearchConfigObjectAddress(Node *node, bool missing_ok, bool isPostproces List *objectAddresses = NIL; List *objNameList = NIL; - foreach_ptr(objNameList, stmt->objects) + foreach_declared_ptr(objNameList, stmt->objects) { Oid tsconfigOid = get_ts_config_oid(objNameList, missing_ok); diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 8c59aa19908..7af6f2dd0da 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -834,7 +834,7 @@ HashSplitPointsForShardList(List *shardList) List *splitPointList = NIL; ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardList) + foreach_declared_ptr(shardInterval, shardList) { int32 shardMaxValue = DatumGetInt32(shardInterval->maxValue); @@ -890,7 +890,7 @@ WorkerNodesForShardList(List *shardList) List *nodeIdList = NIL; ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardList) + foreach_declared_ptr(shardInterval, shardList) { WorkerNode *workerNode = ActiveShardPlacementWorkerNode(shardInterval->shardId); nodeIdList = lappend_int(nodeIdList, workerNode->nodeId); @@ -1337,7 +1337,7 @@ CreateCitusTable(Oid relationId, CitusTableType tableType, ALLOCSET_DEFAULT_SIZES); MemoryContext oldContext = MemoryContextSwitchTo(citusPartitionContext); - foreach_oid(partitionRelationId, partitionList) + foreach_declared_oid(partitionRelationId, partitionList) { MemoryContextReset(citusPartitionContext); @@ -1551,7 +1551,7 @@ ConvertCitusLocalTableToTableType(Oid relationId, CitusTableType tableType, MemoryContext oldContext = MemoryContextSwitchTo(citusPartitionContext); Oid partitionRelationId = InvalidOid; - foreach_oid(partitionRelationId, partitionList) + foreach_declared_oid(partitionRelationId, partitionList) { MemoryContextReset(citusPartitionContext); @@ -1701,7 +1701,7 @@ EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId, Oid ownerRelationId Oid attrDefOid; List *attrDefOids = GetAttrDefsFromSequence(seqOid); - foreach_oid(attrDefOid, attrDefOids) + foreach_declared_oid(attrDefOid, attrDefOids) { ObjectAddress columnAddress = GetAttrDefaultColumnAddress(attrDefOid); @@ -1783,7 +1783,7 @@ static void EnsureDistributedSequencesHaveOneType(Oid relationId, List *seqInfoList) { SequenceInfo *seqInfo = NULL; - foreach_ptr(seqInfo, seqInfoList) + foreach_declared_ptr(seqInfo, seqInfoList) { if (!seqInfo->isNextValDefault) { diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 5479a59edcf..d916b16a0bc 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -235,7 +235,7 @@ FilterDistributedDatabases(List *databases) { List *distributedDatabases = NIL; String *databaseName = NULL; - foreach_ptr(databaseName, databases) + foreach_declared_ptr(databaseName, databases) { bool missingOk = true; ObjectAddress *dbAddress = @@ -258,7 +258,7 @@ static bool IsSetTablespaceStatement(AlterDatabaseStmt *stmt) { DefElem *def = NULL; - foreach_ptr(def, stmt->options) + foreach_declared_ptr(def, stmt->options) { if (strcmp(def->defname, "tablespace") == 0) { @@ -510,7 +510,7 @@ PreprocessCreateDatabaseStmt(Node *node, const char *queryString, List *remoteNodes = TargetWorkerSetNodeList(ALL_SHARD_NODES, RowShareLock); WorkerNode *remoteNode = NULL; - foreach_ptr(remoteNode, remoteNodes) + foreach_declared_ptr(remoteNode, remoteNodes) { InsertCleanupRecordOutsideTransaction( CLEANUP_OBJECT_DATABASE, @@ -733,7 +733,7 @@ void EnsureSupportedCreateDatabaseCommand(CreatedbStmt *stmt) { DefElem *option = NULL; - foreach_ptr(option, stmt->options) + foreach_declared_ptr(option, stmt->options) { if (strcmp(option->defname, "oid") == 0) { diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index c7de5d874b7..e1e77a7eb62 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -162,7 +162,7 @@ EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target, } ObjectAddress *object = NULL; - foreach_ptr(object, objectsToBeCreated) + foreach_declared_ptr(object, objectsToBeCreated) { List *dependencyCommands = GetDependencyCreateDDLCommands(object); ddlCommands = list_concat(ddlCommands, dependencyCommands); @@ -201,7 +201,7 @@ EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target, */ List *addressSortedDependencies = SortList(objectsWithCommands, ObjectAddressComparator); - foreach_ptr(object, addressSortedDependencies) + foreach_declared_ptr(object, addressSortedDependencies) { LockDatabaseObject(object->classId, object->objectId, object->objectSubId, ExclusiveLock); @@ -240,7 +240,7 @@ EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target, else { WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, remoteNodeList) + foreach_declared_ptr(workerNode, remoteNodeList) { const char *nodeName = workerNode->workerName; uint32 nodePort = workerNode->workerPort; @@ -256,7 +256,7 @@ EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target, * that objects have been created on remote nodes before marking them * distributed, so MarkObjectDistributed wouldn't fail. */ - foreach_ptr(object, objectsWithCommands) + foreach_declared_ptr(object, objectsWithCommands) { /* * pg_dist_object entries must be propagated with the super user, since @@ -279,7 +279,7 @@ void EnsureAllObjectDependenciesExistOnAllNodes(const List *targets) { ObjectAddress *target = NULL; - foreach_ptr(target, targets) + foreach_declared_ptr(target, targets) { EnsureDependenciesExistOnAllNodes(target); } @@ -336,7 +336,7 @@ DeferErrorIfCircularDependencyExists(const ObjectAddress *objectAddress) List *dependencies = GetAllDependenciesForObject(objectAddress); ObjectAddress *dependency = NULL; - foreach_ptr(dependency, dependencies) + foreach_declared_ptr(dependency, dependencies) { if (dependency->classId == objectAddress->classId && dependency->objectId == objectAddress->objectId && @@ -424,7 +424,7 @@ GetDistributableDependenciesForObject(const ObjectAddress *target) /* filter the ones that can be distributed */ ObjectAddress *dependency = NULL; - foreach_ptr(dependency, dependencies) + foreach_declared_ptr(dependency, dependencies) { /* * TODO: maybe we can optimize the logic applied in below line. Actually we @@ -508,7 +508,7 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) INCLUDE_IDENTITY, creatingShellTableOnRemoteNode); TableDDLCommand *tableDDLCommand = NULL; - foreach_ptr(tableDDLCommand, tableDDLCommands) + foreach_declared_ptr(tableDDLCommand, tableDDLCommands) { Assert(CitusIsA(tableDDLCommand, TableDDLCommand)); commandList = lappend(commandList, GetTableDDLCommand( @@ -683,7 +683,7 @@ GetAllDependencyCreateDDLCommands(const List *dependencies) List *commands = NIL; ObjectAddress *dependency = NULL; - foreach_ptr(dependency, dependencies) + foreach_declared_ptr(dependency, dependencies) { commands = list_concat(commands, GetDependencyCreateDDLCommands(dependency)); } @@ -831,7 +831,7 @@ bool ShouldPropagateAnyObject(List *addresses) { ObjectAddress *address = NULL; - foreach_ptr(address, addresses) + foreach_declared_ptr(address, addresses) { if (ShouldPropagateObject(address)) { @@ -853,7 +853,7 @@ FilterObjectAddressListByPredicate(List *objectAddressList, AddressPredicate pre List *result = NIL; ObjectAddress *address = NULL; - foreach_ptr(address, objectAddressList) + foreach_declared_ptr(address, objectAddressList) { if (predicate(address)) { diff --git a/src/backend/distributed/commands/domain.c b/src/backend/distributed/commands/domain.c index 82ef80c0f4b..d62428ce439 100644 --- a/src/backend/distributed/commands/domain.c +++ b/src/backend/distributed/commands/domain.c @@ -210,7 +210,7 @@ MakeCollateClauseFromOid(Oid collationOid) getObjectIdentityParts(&collateAddress, &objName, &objArgs, false); char *name = NULL; - foreach_ptr(name, objName) + foreach_declared_ptr(name, objName) { collateClause->collname = lappend(collateClause->collname, makeString(name)); } diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c index 8d4c6431b77..17f9ff57512 100644 --- a/src/backend/distributed/commands/extension.c +++ b/src/backend/distributed/commands/extension.c @@ -274,7 +274,7 @@ PreprocessDropExtensionStmt(Node *node, const char *queryString, /* unmark each distributed extension */ ObjectAddress *address = NULL; - foreach_ptr(address, distributedExtensionAddresses) + foreach_declared_ptr(address, distributedExtensionAddresses) { UnmarkObjectDistributed(address); } @@ -313,7 +313,7 @@ FilterDistributedExtensions(List *extensionObjectList) List *extensionNameList = NIL; String *objectName = NULL; - foreach_ptr(objectName, extensionObjectList) + foreach_declared_ptr(objectName, extensionObjectList) { const char *extensionName = strVal(objectName); const bool missingOk = true; @@ -351,7 +351,7 @@ ExtensionNameListToObjectAddressList(List *extensionObjectList) List *extensionObjectAddressList = NIL; String *objectName; - foreach_ptr(objectName, extensionObjectList) + foreach_declared_ptr(objectName, extensionObjectList) { /* * We set missingOk to false as we assume all the objects in @@ -527,7 +527,7 @@ MarkExistingObjectDependenciesDistributedIfSupported() List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE); Oid citusTableId = InvalidOid; - foreach_oid(citusTableId, citusTableIdList) + foreach_declared_oid(citusTableId, citusTableIdList) { if (!ShouldMarkRelationDistributed(citusTableId)) { @@ -571,7 +571,7 @@ MarkExistingObjectDependenciesDistributedIfSupported() */ List *viewList = GetAllViews(); Oid viewOid = InvalidOid; - foreach_oid(viewOid, viewList) + foreach_declared_oid(viewOid, viewList) { if (!ShouldMarkRelationDistributed(viewOid)) { @@ -605,7 +605,7 @@ MarkExistingObjectDependenciesDistributedIfSupported() List *distributedObjectAddressList = GetDistributedObjectAddressList(); ObjectAddress *distributedObjectAddress = NULL; - foreach_ptr(distributedObjectAddress, distributedObjectAddressList) + foreach_declared_ptr(distributedObjectAddress, distributedObjectAddressList) { List *distributableDependencyObjectAddresses = GetDistributableDependenciesForObject(distributedObjectAddress); @@ -627,7 +627,7 @@ MarkExistingObjectDependenciesDistributedIfSupported() SetLocalEnableMetadataSync(false); ObjectAddress *objectAddress = NULL; - foreach_ptr(objectAddress, uniqueObjectAddresses) + foreach_declared_ptr(objectAddress, uniqueObjectAddresses) { MarkObjectDistributed(objectAddress); } @@ -831,7 +831,7 @@ IsDropCitusExtensionStmt(Node *parseTree) /* now that we have a DropStmt, check if citus extension is among the objects to dropped */ String *objectName; - foreach_ptr(objectName, dropStmt->objects) + foreach_declared_ptr(objectName, dropStmt->objects) { const char *extensionName = strVal(objectName); @@ -1061,7 +1061,7 @@ GenerateGrantCommandsOnExtensionDependentFDWs(Oid extensionId) List *FDWOids = GetDependentFDWsToExtension(extensionId); Oid FDWOid = InvalidOid; - foreach_oid(FDWOid, FDWOids) + foreach_declared_oid(FDWOid, FDWOids) { Acl *aclEntry = GetPrivilegesForFDW(FDWOid); diff --git a/src/backend/distributed/commands/foreign_constraint.c b/src/backend/distributed/commands/foreign_constraint.c index 2f60c3fb11f..b7162b1a493 100644 --- a/src/backend/distributed/commands/foreign_constraint.c +++ b/src/backend/distributed/commands/foreign_constraint.c @@ -202,7 +202,7 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis List *foreignKeyOids = GetForeignKeyOids(referencingTableId, flags); Oid foreignKeyOid = InvalidOid; - foreach_oid(foreignKeyOid, foreignKeyOids) + foreach_declared_oid(foreignKeyOid, foreignKeyOids) { HeapTuple heapTuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(foreignKeyOid)); @@ -414,7 +414,7 @@ ForeignKeySetsNextValColumnToDefault(HeapTuple pgConstraintTuple) List *setDefaultAttrs = ForeignKeyGetDefaultingAttrs(pgConstraintTuple); AttrNumber setDefaultAttr = InvalidAttrNumber; - foreach_int(setDefaultAttr, setDefaultAttrs) + foreach_declared_int(setDefaultAttr, setDefaultAttrs) { if (ColumnDefaultsToNextVal(pgConstraintForm->conrelid, setDefaultAttr)) { @@ -727,7 +727,7 @@ ColumnAppearsInForeignKeyToReferenceTable(char *columnName, Oid relationId) GetForeignKeyIdsForColumn(columnName, relationId, searchForeignKeyColumnFlags); Oid foreignKeyId = InvalidOid; - foreach_oid(foreignKeyId, foreignKeyIdsColumnAppeared) + foreach_declared_oid(foreignKeyId, foreignKeyIdsColumnAppeared) { Oid referencedTableId = GetReferencedTableId(foreignKeyId); if (IsCitusTableType(referencedTableId, REFERENCE_TABLE)) @@ -901,7 +901,7 @@ GetForeignConstraintCommandsInternal(Oid relationId, int flags) int saveNestLevel = PushEmptySearchPath(); Oid foreignKeyOid = InvalidOid; - foreach_oid(foreignKeyOid, foreignKeyOids) + foreach_declared_oid(foreignKeyOid, foreignKeyOids) { char *statementDef = pg_get_constraintdef_command(foreignKeyOid); @@ -1157,7 +1157,7 @@ static Oid FindForeignKeyOidWithName(List *foreignKeyOids, const char *inputConstraintName) { Oid foreignKeyOid = InvalidOid; - foreach_oid(foreignKeyOid, foreignKeyOids) + foreach_declared_oid(foreignKeyOid, foreignKeyOids) { char *constraintName = get_constraint_name(foreignKeyOid); @@ -1472,7 +1472,7 @@ RelationInvolvedInAnyNonInheritedForeignKeys(Oid relationId) List *foreignKeysRelationInvolved = list_concat(referencingForeignKeys, referencedForeignKeys); Oid foreignKeyId = InvalidOid; - foreach_oid(foreignKeyId, foreignKeysRelationInvolved) + foreach_declared_oid(foreignKeyId, foreignKeysRelationInvolved) { HeapTuple heapTuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(foreignKeyId)); if (!HeapTupleIsValid(heapTuple)) diff --git a/src/backend/distributed/commands/foreign_data_wrapper.c b/src/backend/distributed/commands/foreign_data_wrapper.c index a181e63a733..e095f9894c4 100644 --- a/src/backend/distributed/commands/foreign_data_wrapper.c +++ b/src/backend/distributed/commands/foreign_data_wrapper.c @@ -86,7 +86,7 @@ static bool NameListHasFDWOwnedByDistributedExtension(List *FDWNames) { String *FDWValue = NULL; - foreach_ptr(FDWValue, FDWNames) + foreach_declared_ptr(FDWValue, FDWNames) { /* captures the extension address during lookup */ ObjectAddress *extensionAddress = palloc0(sizeof(ObjectAddress)); diff --git a/src/backend/distributed/commands/foreign_server.c b/src/backend/distributed/commands/foreign_server.c index d2e5755643d..096690e2b04 100644 --- a/src/backend/distributed/commands/foreign_server.c +++ b/src/backend/distributed/commands/foreign_server.c @@ -229,7 +229,7 @@ RecreateForeignServerStmt(Oid serverId) int location = -1; DefElem *option = NULL; - foreach_ptr(option, server->options) + foreach_declared_ptr(option, server->options) { DefElem *copyOption = makeDefElem(option->defname, option->arg, location); createStmt->options = lappend(createStmt->options, copyOption); @@ -247,7 +247,7 @@ static bool NameListHasDistributedServer(List *serverNames) { String *serverValue = NULL; - foreach_ptr(serverValue, serverNames) + foreach_declared_ptr(serverValue, serverNames) { List *addresses = GetObjectAddressByServerName(strVal(serverValue), false); diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index 6d2dd0ba975..b2b3484e6f4 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -256,7 +256,7 @@ create_distributed_function(PG_FUNCTION_ARGS) createFunctionSQL, alterFunctionOwnerSQL); List *grantDDLCommands = GrantOnFunctionDDLCommands(funcOid); char *grantOnFunctionSQL = NULL; - foreach_ptr(grantOnFunctionSQL, grantDDLCommands) + foreach_declared_ptr(grantOnFunctionSQL, grantDDLCommands) { appendStringInfo(&ddlCommand, ";%s", grantOnFunctionSQL); } @@ -370,7 +370,7 @@ ErrorIfAnyNodeDoesNotHaveMetadata(void) ActivePrimaryNonCoordinatorNodeList(ShareLock); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { if (!workerNode->hasMetadata) { @@ -1476,7 +1476,7 @@ CreateFunctionStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) objectWithArgs->objname = stmt->funcname; FunctionParameter *funcParam = NULL; - foreach_ptr(funcParam, stmt->parameters) + foreach_declared_ptr(funcParam, stmt->parameters) { if (ShouldAddFunctionSignature(funcParam->mode)) { @@ -1519,7 +1519,7 @@ DefineAggregateStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess if (stmt->args != NIL) { FunctionParameter *funcParam = NULL; - foreach_ptr(funcParam, linitial(stmt->args)) + foreach_declared_ptr(funcParam, linitial(stmt->args)) { objectWithArgs->objargs = lappend(objectWithArgs->objargs, funcParam->argType); @@ -1528,7 +1528,7 @@ DefineAggregateStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess else { DefElem *defItem = NULL; - foreach_ptr(defItem, stmt->definition) + foreach_declared_ptr(defItem, stmt->definition) { /* * If no explicit args are given, pg includes basetype in the signature. @@ -1933,7 +1933,7 @@ static void ErrorIfUnsupportedAlterFunctionStmt(AlterFunctionStmt *stmt) { DefElem *action = NULL; - foreach_ptr(action, stmt->actions) + foreach_declared_ptr(action, stmt->actions) { if (strcmp(action->defname, "set") == 0) { @@ -2040,7 +2040,7 @@ PreprocessGrantOnFunctionStmt(Node *node, const char *queryString, List *grantFunctionList = NIL; ObjectAddress *functionAddress = NULL; - foreach_ptr(functionAddress, distributedFunctions) + foreach_declared_ptr(functionAddress, distributedFunctions) { ObjectWithArgs *distFunction = ObjectWithArgsFromOid( functionAddress->objectId); @@ -2083,7 +2083,7 @@ PostprocessGrantOnFunctionStmt(Node *node, const char *queryString) } ObjectAddress *functionAddress = NULL; - foreach_ptr(functionAddress, distributedFunctions) + foreach_declared_ptr(functionAddress, distributedFunctions) { EnsureAllObjectDependenciesExistOnAllNodes(list_make1(functionAddress)); } @@ -2120,7 +2120,7 @@ FilterDistributedFunctions(GrantStmt *grantStmt) /* iterate over all namespace names provided to get their oid's */ String *namespaceValue = NULL; - foreach_ptr(namespaceValue, grantStmt->objects) + foreach_declared_ptr(namespaceValue, grantStmt->objects) { char *nspname = strVal(namespaceValue); bool missing_ok = false; @@ -2132,7 +2132,7 @@ FilterDistributedFunctions(GrantStmt *grantStmt) * iterate over all distributed functions to filter the ones * that belong to one of the namespaces from above */ - foreach_ptr(distributedFunction, distributedFunctionList) + foreach_declared_ptr(distributedFunction, distributedFunctionList) { Oid namespaceOid = get_func_namespace(distributedFunction->objectId); @@ -2151,7 +2151,7 @@ FilterDistributedFunctions(GrantStmt *grantStmt) { bool missingOk = false; ObjectWithArgs *objectWithArgs = NULL; - foreach_ptr(objectWithArgs, grantStmt->objects) + foreach_declared_ptr(objectWithArgs, grantStmt->objects) { ObjectAddress *functionAddress = palloc0(sizeof(ObjectAddress)); functionAddress->classId = ProcedureRelationId; diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index e97312df271..f4943ebde61 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -337,7 +337,7 @@ ExecuteFunctionOnEachTableIndex(Oid relationId, PGIndexProcessor pgIndexProcesso List *indexIdList = RelationGetIndexList(relation); Oid indexId = InvalidOid; - foreach_oid(indexId, indexIdList) + foreach_declared_oid(indexId, indexIdList) { HeapTuple indexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexId)); if (!HeapTupleIsValid(indexTuple)) @@ -708,7 +708,7 @@ PreprocessDropIndexStmt(Node *node, const char *dropIndexCommand, /* check if any of the indexes being dropped belong to a distributed table */ List *objectNameList = NULL; - foreach_ptr(objectNameList, dropIndexStatement->objects) + foreach_declared_ptr(objectNameList, dropIndexStatement->objects) { struct DropRelationCallbackState state; uint32 rvrFlags = RVR_MISSING_OK; @@ -880,7 +880,7 @@ ErrorIfUnsupportedAlterIndexStmt(AlterTableStmt *alterTableStatement) /* error out if any of the subcommands are unsupported */ List *commandList = alterTableStatement->cmds; AlterTableCmd *command = NULL; - foreach_ptr(command, commandList) + foreach_declared_ptr(command, commandList) { AlterTableType alterTableType = command->subtype; @@ -932,7 +932,7 @@ CreateIndexTaskList(IndexStmt *indexStmt) LockShardListMetadata(shardIntervalList, ShareLock); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { uint64 shardId = shardInterval->shardId; @@ -977,7 +977,7 @@ CreateReindexTaskList(Oid relationId, ReindexStmt *reindexStmt) LockShardListMetadata(shardIntervalList, ShareLock); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { uint64 shardId = shardInterval->shardId; @@ -1226,7 +1226,7 @@ ErrorIfUnsupportedIndexStmt(IndexStmt *createIndexStatement) Var *partitionKey = DistPartitionKeyOrError(relationId); List *indexParameterList = createIndexStatement->indexParams; IndexElem *indexElement = NULL; - foreach_ptr(indexElement, indexParameterList) + foreach_declared_ptr(indexElement, indexParameterList) { const char *columnName = indexElement->name; @@ -1295,7 +1295,7 @@ DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt) LockShardListMetadata(shardIntervalList, ShareLock); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { uint64 shardId = shardInterval->shardId; char *shardIndexName = pstrdup(indexName); diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index cb64ef7f55a..573d49a324a 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -1957,7 +1957,7 @@ ShardIntervalListHasLocalPlacements(List *shardIntervalList) { int32 localGroupId = GetLocalGroupId(); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { if (ActiveShardPlacementOnGroup(localGroupId, shardInterval->shardId) != NULL) { @@ -2452,7 +2452,7 @@ ProcessAppendToShardOption(Oid relationId, CopyStmt *copyStatement) bool appendToShardSet = false; DefElem *defel = NULL; - foreach_ptr(defel, copyStatement->options) + foreach_declared_ptr(defel, copyStatement->options) { if (strncmp(defel->defname, APPEND_TO_SHARD_OPTION, NAMEDATALEN) == 0) { diff --git a/src/backend/distributed/commands/non_main_db_distribute_object_ops.c b/src/backend/distributed/commands/non_main_db_distribute_object_ops.c index b777936d3e4..fdd29b1e18a 100644 --- a/src/backend/distributed/commands/non_main_db_distribute_object_ops.c +++ b/src/backend/distributed/commands/non_main_db_distribute_object_ops.c @@ -255,7 +255,7 @@ static void DropRoleStmtUnmarkDistOnLocalMainDb(DropRoleStmt *dropRoleStmt) { RoleSpec *roleSpec = NULL; - foreach_ptr(roleSpec, dropRoleStmt->roles) + foreach_declared_ptr(roleSpec, dropRoleStmt->roles) { Oid roleOid = get_role_oid(roleSpec->rolename, dropRoleStmt->missing_ok); diff --git a/src/backend/distributed/commands/policy.c b/src/backend/distributed/commands/policy.c index a2a926b6677..97292e29d92 100644 --- a/src/backend/distributed/commands/policy.c +++ b/src/backend/distributed/commands/policy.c @@ -48,7 +48,7 @@ CreatePolicyCommands(Oid relationId) List *policyList = GetPolicyListForRelation(relationId); RowSecurityPolicy *policy; - foreach_ptr(policy, policyList) + foreach_declared_ptr(policy, policyList) { char *createPolicyCommand = CreatePolicyCommandForPolicy(relationId, policy); commands = lappend(commands, makeTableDDLCommandString(createPolicyCommand)); @@ -88,7 +88,7 @@ GetPolicyListForRelation(Oid relationId) List *policyList = NIL; RowSecurityPolicy *policy; - foreach_ptr(policy, relation->rd_rsdesc->policies) + foreach_declared_ptr(policy, relation->rd_rsdesc->policies) { policyList = lappend(policyList, policy); } @@ -310,7 +310,7 @@ GetPolicyByName(Oid relationId, const char *policyName) List *policyList = GetPolicyListForRelation(relationId); RowSecurityPolicy *policy = NULL; - foreach_ptr(policy, policyList) + foreach_declared_ptr(policy, policyList) { if (strncmp(policy->policy_name, policyName, NAMEDATALEN) == 0) { diff --git a/src/backend/distributed/commands/publication.c b/src/backend/distributed/commands/publication.c index c1cfd5e7732..16dbc9171bf 100644 --- a/src/backend/distributed/commands/publication.c +++ b/src/backend/distributed/commands/publication.c @@ -158,7 +158,7 @@ BuildCreatePublicationStmt(Oid publicationId) List *schemaIds = GetPublicationSchemas(publicationId); Oid schemaId = InvalidOid; - foreach_oid(schemaId, schemaIds) + foreach_declared_oid(schemaId, schemaIds) { char *schemaName = get_namespace_name(schemaId); @@ -181,7 +181,7 @@ BuildCreatePublicationStmt(Oid publicationId) /* mainly for consistent ordering in test output */ relationIds = SortList(relationIds, CompareOids); - foreach_oid(relationId, relationIds) + foreach_declared_oid(relationId, relationIds) { #if (PG_VERSION_NUM >= PG_VERSION_15) bool tableOnly = false; @@ -414,7 +414,7 @@ GetAlterPublicationDDLCommandsForTable(Oid relationId, bool isAdd) List *publicationIds = GetRelationPublications(relationId); Oid publicationId = InvalidOid; - foreach_oid(publicationId, publicationIds) + foreach_declared_oid(publicationId, publicationIds) { char *command = GetAlterPublicationTableDDLCommand(publicationId, relationId, isAdd); diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index 7f5f697f2b1..41cba74d063 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -74,7 +74,9 @@ static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple, TupleDesc DbRoleSettingDescription); static char * GetDatabaseNameFromDbRoleSetting(HeapTuple tuple, TupleDesc DbRoleSettingDescription); +#if PG_VERSION_NUM < PG_VERSION_17 static Node * makeStringConst(char *str, int location); +#endif static Node * makeIntConst(int val, int location); static Node * makeFloatConst(char *str, int location); static const char * WrapQueryInAlterRoleIfExistsCall(const char *query, RoleSpec *role); @@ -163,7 +165,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString) AlterRoleStmt *stmt = castNode(AlterRoleStmt, node); DefElem *option = NULL; - foreach_ptr(option, stmt->options) + foreach_declared_ptr(option, stmt->options) { if (strcasecmp(option->defname, "password") == 0) { @@ -564,7 +566,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) { List *grantRoleStmts = GenerateGrantRoleStmtsOfRole(roleOid); Node *stmt = NULL; - foreach_ptr(stmt, grantRoleStmts) + foreach_declared_ptr(stmt, grantRoleStmts) { completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt)); } @@ -578,7 +580,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) */ List *secLabelOnRoleStmts = GenerateSecLabelOnRoleStmts(roleOid, rolename); stmt = NULL; - foreach_ptr(stmt, secLabelOnRoleStmts) + foreach_declared_ptr(stmt, secLabelOnRoleStmts) { completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt)); } @@ -787,7 +789,7 @@ MakeSetStatementArguments(char *configurationName, char *configurationValue) } char *configuration = NULL; - foreach_ptr(configuration, configurationList) + foreach_declared_ptr(configuration, configurationList) { Node *arg = makeStringConst(configuration, -1); args = lappend(args, arg); @@ -823,7 +825,7 @@ GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options) List *stmts = NIL; DefElem *option = NULL; - foreach_ptr(option, options) + foreach_declared_ptr(option, options) { if (strcmp(option->defname, "adminmembers") != 0 && strcmp(option->defname, "rolemembers") != 0 && @@ -1047,7 +1049,7 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString, /* deparse all grant statements and add them to the commands list */ Node *stmt = NULL; - foreach_ptr(stmt, grantRoleStmts) + foreach_declared_ptr(stmt, grantRoleStmts) { commands = lappend(commands, DeparseTreeNode(stmt)); } @@ -1058,6 +1060,8 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString, } +#if PG_VERSION_NUM < PG_VERSION_17 + /* * makeStringConst creates a Const Node that stores a given string * @@ -1081,6 +1085,9 @@ makeStringConst(char *str, int location) } +#endif + + /* * makeIntConst creates a Const Node that stores a given integer * @@ -1174,7 +1181,7 @@ void UnmarkRolesDistributed(List *roles) { Node *roleNode = NULL; - foreach_ptr(roleNode, roles) + foreach_declared_ptr(roleNode, roles) { RoleSpec *role = castNode(RoleSpec, roleNode); ObjectAddress roleAddress = { 0 }; @@ -1204,7 +1211,7 @@ FilterDistributedRoles(List *roles) { List *distributedRoles = NIL; Node *roleNode = NULL; - foreach_ptr(roleNode, roles) + foreach_declared_ptr(roleNode, roles) { RoleSpec *role = castNode(RoleSpec, roleNode); Oid roleOid = get_rolespec_oid(role, true); @@ -1282,7 +1289,7 @@ PostprocessGrantRoleStmt(Node *node, const char *queryString) GrantRoleStmt *stmt = castNode(GrantRoleStmt, node); RoleSpec *role = NULL; - foreach_ptr(role, stmt->grantee_roles) + foreach_declared_ptr(role, stmt->grantee_roles) { Oid roleOid = get_rolespec_oid(role, false); ObjectAddress *roleAddress = palloc0(sizeof(ObjectAddress)); diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index 7f79897faed..b079fe3f674 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -162,7 +162,7 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString, EnsureSequentialMode(OBJECT_SCHEMA); String *schemaVal = NULL; - foreach_ptr(schemaVal, distributedSchemas) + foreach_declared_ptr(schemaVal, distributedSchemas) { if (SchemaHasDistributedTableWithFKey(strVal(schemaVal))) { @@ -322,7 +322,7 @@ FilterDistributedSchemas(List *schemas) List *distributedSchemas = NIL; String *schemaValue = NULL; - foreach_ptr(schemaValue, schemas) + foreach_declared_ptr(schemaValue, schemas) { const char *schemaName = strVal(schemaValue); Oid schemaOid = get_namespace_oid(schemaName, true); @@ -443,7 +443,7 @@ GetGrantCommandsFromCreateSchemaStmt(Node *node) CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node); Node *element = NULL; - foreach_ptr(element, stmt->schemaElts) + foreach_declared_ptr(element, stmt->schemaElts) { if (!IsA(element, GrantStmt)) { @@ -480,7 +480,7 @@ static bool CreateSchemaStmtCreatesTable(CreateSchemaStmt *stmt) { Node *element = NULL; - foreach_ptr(element, stmt->schemaElts) + foreach_declared_ptr(element, stmt->schemaElts) { /* * CREATE TABLE AS and CREATE FOREIGN TABLE commands cannot be diff --git a/src/backend/distributed/commands/schema_based_sharding.c b/src/backend/distributed/commands/schema_based_sharding.c index 7cde9698295..6635d6817a5 100644 --- a/src/backend/distributed/commands/schema_based_sharding.c +++ b/src/backend/distributed/commands/schema_based_sharding.c @@ -174,7 +174,7 @@ EnsureTableKindSupportedForTenantSchema(Oid relationId) List *partitionList = PartitionList(relationId); Oid partitionRelationId = InvalidOid; - foreach_oid(partitionRelationId, partitionList) + foreach_declared_oid(partitionRelationId, partitionList) { ErrorIfIllegalPartitioningInTenantSchema(relationId, partitionRelationId); } @@ -199,7 +199,7 @@ EnsureFKeysForTenantTable(Oid relationId) int fKeyReferencingFlags = INCLUDE_REFERENCING_CONSTRAINTS | INCLUDE_ALL_TABLE_TYPES; List *referencingForeignKeys = GetForeignKeyOids(relationId, fKeyReferencingFlags); Oid foreignKeyId = InvalidOid; - foreach_oid(foreignKeyId, referencingForeignKeys) + foreach_declared_oid(foreignKeyId, referencingForeignKeys) { Oid referencingTableId = GetReferencingTableId(foreignKeyId); Oid referencedTableId = GetReferencedTableId(foreignKeyId); @@ -232,7 +232,7 @@ EnsureFKeysForTenantTable(Oid relationId) int fKeyReferencedFlags = INCLUDE_REFERENCED_CONSTRAINTS | INCLUDE_ALL_TABLE_TYPES; List *referencedForeignKeys = GetForeignKeyOids(relationId, fKeyReferencedFlags); - foreach_oid(foreignKeyId, referencedForeignKeys) + foreach_declared_oid(foreignKeyId, referencedForeignKeys) { Oid referencingTableId = GetReferencingTableId(foreignKeyId); Oid referencedTableId = GetReferencedTableId(foreignKeyId); @@ -429,7 +429,7 @@ EnsureSchemaCanBeDistributed(Oid schemaId, List *schemaTableIdList) } Oid relationId = InvalidOid; - foreach_oid(relationId, schemaTableIdList) + foreach_declared_oid(relationId, schemaTableIdList) { EnsureTenantTable(relationId, "citus_schema_distribute"); } @@ -637,7 +637,7 @@ citus_schema_distribute(PG_FUNCTION_ARGS) List *tableIdListInSchema = SchemaGetNonShardTableIdList(schemaId); List *tableIdListToConvert = NIL; Oid relationId = InvalidOid; - foreach_oid(relationId, tableIdListInSchema) + foreach_declared_oid(relationId, tableIdListInSchema) { /* prevent concurrent drop of the relation */ LockRelationOid(relationId, AccessShareLock); @@ -675,7 +675,7 @@ citus_schema_distribute(PG_FUNCTION_ARGS) * tables. */ List *originalForeignKeyRecreationCommands = NIL; - foreach_oid(relationId, tableIdListToConvert) + foreach_declared_oid(relationId, tableIdListToConvert) { List *fkeyCommandsForRelation = GetFKeyCreationCommandsRelationInvolvedWithTableType(relationId, @@ -741,7 +741,7 @@ citus_schema_undistribute(PG_FUNCTION_ARGS) List *tableIdListInSchema = SchemaGetNonShardTableIdList(schemaId); List *tableIdListToConvert = NIL; Oid relationId = InvalidOid; - foreach_oid(relationId, tableIdListInSchema) + foreach_declared_oid(relationId, tableIdListInSchema) { /* prevent concurrent drop of the relation */ LockRelationOid(relationId, AccessShareLock); @@ -883,7 +883,7 @@ TenantSchemaPickAnchorShardId(Oid schemaId) } Oid relationId = InvalidOid; - foreach_oid(relationId, tablesInSchema) + foreach_declared_oid(relationId, tablesInSchema) { /* * Make sure the relation isn't dropped for the remainder of diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index cfb55faf740..4af4c4853a2 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -123,7 +123,7 @@ static bool OptionsSpecifyOwnedBy(List *optionList, Oid *ownedByTableId) { DefElem *defElem = NULL; - foreach_ptr(defElem, optionList) + foreach_declared_ptr(defElem, optionList) { if (strcmp(defElem->defname, "owned_by") == 0) { @@ -202,7 +202,7 @@ ExtractDefaultColumnsAndOwnedSequences(Oid relationId, List **columnNameList, } Oid ownedSequenceId = InvalidOid; - foreach_oid(ownedSequenceId, columnOwnedSequences) + foreach_declared_oid(ownedSequenceId, columnOwnedSequences) { /* * A column might have multiple sequences one via OWNED BY one another @@ -288,7 +288,7 @@ PreprocessDropSequenceStmt(Node *node, const char *queryString, */ List *deletingSequencesList = stmt->objects; List *objectNameList = NULL; - foreach_ptr(objectNameList, deletingSequencesList) + foreach_declared_ptr(objectNameList, deletingSequencesList) { RangeVar *seq = makeRangeVarFromNameList(objectNameList); @@ -322,7 +322,7 @@ PreprocessDropSequenceStmt(Node *node, const char *queryString, /* remove the entries for the distributed objects on dropping */ ObjectAddress *address = NULL; - foreach_ptr(address, distributedSequenceAddresses) + foreach_declared_ptr(address, distributedSequenceAddresses) { UnmarkObjectDistributed(address); } @@ -356,7 +356,7 @@ SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok, bool isPostprocess) List *droppingSequencesList = dropSeqStmt->objects; List *objectNameList = NULL; - foreach_ptr(objectNameList, droppingSequencesList) + foreach_declared_ptr(objectNameList, droppingSequencesList) { RangeVar *seq = makeRangeVarFromNameList(objectNameList); @@ -476,7 +476,7 @@ PreprocessAlterSequenceStmt(Node *node, const char *queryString, { List *options = stmt->options; DefElem *defel = NULL; - foreach_ptr(defel, options) + foreach_declared_ptr(defel, options) { if (strcmp(defel->defname, "as") == 0) { @@ -511,7 +511,7 @@ SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress, char depTyp Oid relationId; List *relations = GetDependentRelationsWithSequence(sequenceAddress->objectId, depType); - foreach_oid(relationId, relations) + foreach_declared_oid(relationId, relations) { if (IsCitusTable(relationId)) { @@ -930,7 +930,7 @@ PostprocessGrantOnSequenceStmt(Node *node, const char *queryString) EnsureCoordinator(); RangeVar *sequence = NULL; - foreach_ptr(sequence, distributedSequences) + foreach_declared_ptr(sequence, distributedSequences) { ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); Oid sequenceOid = RangeVarGetRelid(sequence, NoLock, false); @@ -1014,7 +1014,7 @@ FilterDistributedSequences(GrantStmt *stmt) /* iterate over all namespace names provided to get their oid's */ List *namespaceOidList = NIL; String *namespaceValue = NULL; - foreach_ptr(namespaceValue, stmt->objects) + foreach_declared_ptr(namespaceValue, stmt->objects) { char *nspname = strVal(namespaceValue); bool missing_ok = false; @@ -1028,7 +1028,7 @@ FilterDistributedSequences(GrantStmt *stmt) */ List *distributedSequenceList = DistributedSequenceList(); ObjectAddress *sequenceAddress = NULL; - foreach_ptr(sequenceAddress, distributedSequenceList) + foreach_declared_ptr(sequenceAddress, distributedSequenceList) { Oid namespaceOid = get_rel_namespace(sequenceAddress->objectId); @@ -1052,7 +1052,7 @@ FilterDistributedSequences(GrantStmt *stmt) { bool missing_ok = false; RangeVar *sequenceRangeVar = NULL; - foreach_ptr(sequenceRangeVar, stmt->objects) + foreach_declared_ptr(sequenceRangeVar, stmt->objects) { Oid sequenceOid = RangeVarGetRelid(sequenceRangeVar, NoLock, missing_ok); ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); diff --git a/src/backend/distributed/commands/statistics.c b/src/backend/distributed/commands/statistics.c index 5fac767fd70..b43f6335ef4 100644 --- a/src/backend/distributed/commands/statistics.c +++ b/src/backend/distributed/commands/statistics.c @@ -184,7 +184,7 @@ PreprocessDropStatisticsStmt(Node *node, const char *queryString, List *ddlJobs = NIL; List *processedStatsOids = NIL; List *objectNameList = NULL; - foreach_ptr(objectNameList, dropStatisticsStmt->objects) + foreach_declared_ptr(objectNameList, dropStatisticsStmt->objects) { Oid statsOid = get_statistics_object_oid(objectNameList, dropStatisticsStmt->missing_ok); @@ -234,7 +234,7 @@ DropStatisticsObjectAddress(Node *node, bool missing_ok, bool isPostprocess) List *objectAddresses = NIL; List *objectNameList = NULL; - foreach_ptr(objectNameList, dropStatisticsStmt->objects) + foreach_declared_ptr(objectNameList, dropStatisticsStmt->objects) { Oid statsOid = get_statistics_object_oid(objectNameList, dropStatisticsStmt->missing_ok); @@ -535,7 +535,7 @@ GetExplicitStatisticsCommandList(Oid relationId) int saveNestLevel = PushEmptySearchPath(); Oid statisticsId = InvalidOid; - foreach_oid(statisticsId, statisticsIdList) + foreach_declared_oid(statisticsId, statisticsIdList) { /* we need create commands for already created stats before distribution */ Datum commandText = DirectFunctionCall1(pg_get_statisticsobjdef, @@ -606,7 +606,7 @@ GetExplicitStatisticsSchemaIdList(Oid relationId) RelationClose(relation); Oid statsId = InvalidOid; - foreach_oid(statsId, statsIdList) + foreach_declared_oid(statsId, statsIdList) { HeapTuple heapTuple = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statsId)); if (!HeapTupleIsValid(heapTuple)) @@ -651,14 +651,15 @@ GetAlterIndexStatisticsCommands(Oid indexOid) } Form_pg_attribute targetAttr = (Form_pg_attribute) GETSTRUCT(attTuple); - if (targetAttr->attstattarget != DEFAULT_STATISTICS_TARGET) + int32 targetAttstattarget = getAttstattarget_compat(attTuple); + if (targetAttstattarget != DEFAULT_STATISTICS_TARGET) { char *indexNameWithSchema = generate_qualified_relation_name(indexOid); char *command = GenerateAlterIndexColumnSetStatsCommand(indexNameWithSchema, targetAttr->attnum, - targetAttr->attstattarget); + targetAttstattarget); alterIndexStatisticsCommandList = lappend(alterIndexStatisticsCommandList, @@ -773,9 +774,10 @@ CreateAlterCommandIfTargetNotDefault(Oid statsOid) } Form_pg_statistic_ext statisticsForm = (Form_pg_statistic_ext) GETSTRUCT(tup); + int16 currentStxstattarget = getStxstattarget_compat(tup); ReleaseSysCache(tup); - if (statisticsForm->stxstattarget == -1) + if (currentStxstattarget == -1) { return NULL; } @@ -785,7 +787,8 @@ CreateAlterCommandIfTargetNotDefault(Oid statsOid) char *schemaName = get_namespace_name(statisticsForm->stxnamespace); char *statName = NameStr(statisticsForm->stxname); - alterStatsStmt->stxstattarget = statisticsForm->stxstattarget; + alterStatsStmt->stxstattarget = getAlterStatsStxstattarget_compat( + currentStxstattarget); alterStatsStmt->defnames = list_make2(makeString(schemaName), makeString(statName)); return DeparseAlterStatisticsStmt((Node *) alterStatsStmt); diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 30b028b79b1..e65f57961ba 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -154,7 +154,7 @@ PreprocessDropTableStmt(Node *node, const char *queryString, Assert(dropTableStatement->removeType == OBJECT_TABLE); List *tableNameList = NULL; - foreach_ptr(tableNameList, dropTableStatement->objects) + foreach_declared_ptr(tableNameList, dropTableStatement->objects) { RangeVar *tableRangeVar = makeRangeVarFromNameList(tableNameList); bool missingOK = true; @@ -202,7 +202,7 @@ PreprocessDropTableStmt(Node *node, const char *queryString, SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); Oid partitionRelationId = InvalidOid; - foreach_oid(partitionRelationId, partitionList) + foreach_declared_oid(partitionRelationId, partitionList) { char *detachPartitionCommand = GenerateDetachPartitionCommand(partitionRelationId); @@ -263,7 +263,7 @@ PostprocessCreateTableStmt(CreateStmt *createStatement, const char *queryString) } RangeVar *parentRelation = NULL; - foreach_ptr(parentRelation, createStatement->inhRelations) + foreach_declared_ptr(parentRelation, createStatement->inhRelations) { Oid parentRelationId = RangeVarGetRelid(parentRelation, NoLock, missingOk); @@ -480,7 +480,7 @@ PreprocessAlterTableStmtAttachPartition(AlterTableStmt *alterTableStatement, { List *commandList = alterTableStatement->cmds; AlterTableCmd *alterTableCommand = NULL; - foreach_ptr(alterTableCommand, commandList) + foreach_declared_ptr(alterTableCommand, commandList) { if (alterTableCommand->subtype == AT_AttachPartition) { @@ -792,7 +792,7 @@ ChooseForeignKeyConstraintNameAddition(List *columnNames) String *columnNameString = NULL; - foreach_ptr(columnNameString, columnNames) + foreach_declared_ptr(columnNameString, columnNames) { const char *name = strVal(columnNameString); @@ -1314,7 +1314,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, AlterTableCmd *newCmd = makeNode(AlterTableCmd); AlterTableCmd *command = NULL; - foreach_ptr(command, commandList) + foreach_declared_ptr(command, commandList) { AlterTableType alterTableType = command->subtype; @@ -1418,7 +1418,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, List *columnConstraints = columnDefinition->constraints; Constraint *constraint = NULL; - foreach_ptr(constraint, columnConstraints) + foreach_declared_ptr(constraint, columnConstraints) { if (constraint->contype == CONSTR_FOREIGN) { @@ -1442,7 +1442,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, deparseAT = true; constraint = NULL; - foreach_ptr(constraint, columnConstraints) + foreach_declared_ptr(constraint, columnConstraints) { if (ConstrTypeCitusCanDefaultName(constraint->contype)) { @@ -1467,7 +1467,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, */ constraint = NULL; int constraintIdx = 0; - foreach_ptr(constraint, columnConstraints) + foreach_declared_ptr(constraint, columnConstraints) { if (constraint->contype == CONSTR_DEFAULT) { @@ -1696,7 +1696,7 @@ DeparserSupportsAlterTableAddColumn(AlterTableStmt *alterTableStatement, { ColumnDef *columnDefinition = (ColumnDef *) addColumnSubCommand->def; Constraint *constraint = NULL; - foreach_ptr(constraint, columnDefinition->constraints) + foreach_declared_ptr(constraint, columnDefinition->constraints) { if (constraint->contype == CONSTR_CHECK) { @@ -1792,7 +1792,7 @@ static bool RelationIdListContainsCitusTableType(List *relationIdList, CitusTableType citusTableType) { Oid relationId = InvalidOid; - foreach_oid(relationId, relationIdList) + foreach_declared_oid(relationId, relationIdList) { if (IsCitusTableType(relationId, citusTableType)) { @@ -1812,7 +1812,7 @@ static bool RelationIdListContainsPostgresTable(List *relationIdList) { Oid relationId = InvalidOid; - foreach_oid(relationId, relationIdList) + foreach_declared_oid(relationId, relationIdList) { if (OidIsValid(relationId) && !IsCitusTable(relationId)) { @@ -1851,7 +1851,7 @@ ConvertPostgresLocalTablesToCitusLocalTables(AlterTableStmt *alterTableStatement * change in below loop due to CreateCitusLocalTable. */ RangeVar *relationRangeVar; - foreach_ptr(relationRangeVar, relationRangeVarList) + foreach_declared_ptr(relationRangeVar, relationRangeVarList) { List *commandList = alterTableStatement->cmds; LOCKMODE lockMode = AlterTableGetLockLevel(commandList); @@ -1979,7 +1979,7 @@ RangeVarListHasLocalRelationConvertedByUser(List *relationRangeVarList, AlterTableStmt *alterTableStatement) { RangeVar *relationRangeVar; - foreach_ptr(relationRangeVar, relationRangeVarList) + foreach_declared_ptr(relationRangeVar, relationRangeVarList) { /* * Here we iterate the relation list, and if at least one of the relations @@ -2076,7 +2076,7 @@ GetAlterTableAddFKeyConstraintList(AlterTableStmt *alterTableStatement) List *commandList = alterTableStatement->cmds; AlterTableCmd *command = NULL; - foreach_ptr(command, commandList) + foreach_declared_ptr(command, commandList) { List *commandForeignKeyConstraintList = GetAlterTableCommandFKeyConstraintList(command); @@ -2116,7 +2116,7 @@ GetAlterTableCommandFKeyConstraintList(AlterTableCmd *command) List *columnConstraints = columnDefinition->constraints; Constraint *constraint = NULL; - foreach_ptr(constraint, columnConstraints) + foreach_declared_ptr(constraint, columnConstraints) { if (constraint->contype == CONSTR_FOREIGN) { @@ -2139,7 +2139,7 @@ GetRangeVarListFromFKeyConstraintList(List *fKeyConstraintList) List *rightRelationRangeVarList = NIL; Constraint *fKeyConstraint = NULL; - foreach_ptr(fKeyConstraint, fKeyConstraintList) + foreach_declared_ptr(fKeyConstraint, fKeyConstraintList) { RangeVar *rightRelationRangeVar = fKeyConstraint->pktable; rightRelationRangeVarList = lappend(rightRelationRangeVarList, @@ -2160,7 +2160,7 @@ GetRelationIdListFromRangeVarList(List *rangeVarList, LOCKMODE lockMode, bool mi List *relationIdList = NIL; RangeVar *rangeVar = NULL; - foreach_ptr(rangeVar, rangeVarList) + foreach_declared_ptr(rangeVar, rangeVarList) { Oid rightRelationId = RangeVarGetRelid(rangeVar, lockMode, missingOk); relationIdList = lappend_oid(relationIdList, rightRelationId); @@ -2234,7 +2234,7 @@ AlterTableDropsForeignKey(AlterTableStmt *alterTableStatement) Oid relationId = AlterTableLookupRelation(alterTableStatement, lockmode); AlterTableCmd *command = NULL; - foreach_ptr(command, alterTableStatement->cmds) + foreach_declared_ptr(command, alterTableStatement->cmds) { AlterTableType alterTableType = command->subtype; @@ -2296,7 +2296,7 @@ AnyForeignKeyDependsOnIndex(Oid indexId) GetPgDependTuplesForDependingObjects(dependentObjectClassId, dependentObjectId); HeapTuple dependencyTuple = NULL; - foreach_ptr(dependencyTuple, dependencyTupleList) + foreach_declared_ptr(dependencyTuple, dependencyTupleList) { Form_pg_depend dependencyForm = (Form_pg_depend) GETSTRUCT(dependencyTuple); Oid dependingClassId = dependencyForm->classid; @@ -2484,7 +2484,7 @@ SkipForeignKeyValidationIfConstraintIsFkey(AlterTableStmt *alterTableStatement, * shards anyway. */ AlterTableCmd *command = NULL; - foreach_ptr(command, alterTableStatement->cmds) + foreach_declared_ptr(command, alterTableStatement->cmds) { AlterTableType alterTableType = command->subtype; @@ -2565,7 +2565,7 @@ ErrorIfAlterDropsPartitionColumn(AlterTableStmt *alterTableStatement) /* then check if any of subcommands drop partition column.*/ List *commandList = alterTableStatement->cmds; AlterTableCmd *command = NULL; - foreach_ptr(command, commandList) + foreach_declared_ptr(command, commandList) { AlterTableType alterTableType = command->subtype; if (alterTableType == AT_DropColumn) @@ -2634,7 +2634,7 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement) List *commandList = alterTableStatement->cmds; AlterTableCmd *command = NULL; - foreach_ptr(command, commandList) + foreach_declared_ptr(command, commandList) { AlterTableType alterTableType = command->subtype; @@ -2670,7 +2670,7 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement) } Constraint *constraint = NULL; - foreach_ptr(constraint, columnConstraints) + foreach_declared_ptr(constraint, columnConstraints) { if (constraint->conname == NULL && (constraint->contype == CONSTR_PRIMARY || @@ -2690,7 +2690,7 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement) * that sequence is supported */ constraint = NULL; - foreach_ptr(constraint, columnConstraints) + foreach_declared_ptr(constraint, columnConstraints) { if (constraint->contype == CONSTR_DEFAULT) { @@ -2802,7 +2802,7 @@ FixAlterTableStmtIndexNames(AlterTableStmt *alterTableStatement) List *commandList = alterTableStatement->cmds; AlterTableCmd *command = NULL; - foreach_ptr(command, commandList) + foreach_declared_ptr(command, commandList) { AlterTableType alterTableType = command->subtype; @@ -3165,7 +3165,7 @@ ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod, List *indexOidList = RelationGetIndexList(relation); Oid indexOid = InvalidOid; - foreach_oid(indexOid, indexOidList) + foreach_declared_oid(indexOid, indexOidList) { Relation indexDesc = index_open(indexOid, RowExclusiveLock); bool hasDistributionColumn = false; @@ -3310,7 +3310,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) /* error out if any of the subcommands are unsupported */ AlterTableCmd *command = NULL; - foreach_ptr(command, commandList) + foreach_declared_ptr(command, commandList) { AlterTableType alterTableType = command->subtype; @@ -3385,7 +3385,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) Constraint *columnConstraint = NULL; - foreach_ptr(columnConstraint, column->constraints) + foreach_declared_ptr(columnConstraint, column->constraints) { if (columnConstraint->contype == CONSTR_IDENTITY) { @@ -3417,7 +3417,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) List *columnConstraints = column->constraints; Constraint *constraint = NULL; - foreach_ptr(constraint, columnConstraints) + foreach_declared_ptr(constraint, columnConstraints) { if (constraint->contype == CONSTR_DEFAULT) { @@ -3770,7 +3770,7 @@ SetupExecutionModeForAlterTable(Oid relationId, AlterTableCmd *command) List *columnConstraints = columnDefinition->constraints; Constraint *constraint = NULL; - foreach_ptr(constraint, columnConstraints) + foreach_declared_ptr(constraint, columnConstraints) { if (constraint->contype == CONSTR_FOREIGN) { @@ -3970,10 +3970,10 @@ SetInterShardDDLTaskPlacementList(Task *task, ShardInterval *leftShardInterval, List *intersectedPlacementList = NIL; ShardPlacement *leftShardPlacement = NULL; - foreach_ptr(leftShardPlacement, leftShardPlacementList) + foreach_declared_ptr(leftShardPlacement, leftShardPlacementList) { ShardPlacement *rightShardPlacement = NULL; - foreach_ptr(rightShardPlacement, rightShardPlacementList) + foreach_declared_ptr(rightShardPlacement, rightShardPlacementList) { if (leftShardPlacement->nodeId == rightShardPlacement->nodeId) { diff --git a/src/backend/distributed/commands/trigger.c b/src/backend/distributed/commands/trigger.c index 74cb6259ffb..01ee72d3154 100644 --- a/src/backend/distributed/commands/trigger.c +++ b/src/backend/distributed/commands/trigger.c @@ -81,7 +81,7 @@ GetExplicitTriggerCommandList(Oid relationId) List *triggerIdList = GetExplicitTriggerIdList(relationId); Oid triggerId = InvalidOid; - foreach_oid(triggerId, triggerIdList) + foreach_declared_oid(triggerId, triggerIdList) { bool prettyOutput = false; Datum commandText = DirectFunctionCall2(pg_get_triggerdef_ext, @@ -742,7 +742,7 @@ ErrorIfRelationHasUnsupportedTrigger(Oid relationId) List *relationTriggerList = GetExplicitTriggerIdList(relationId); Oid triggerId = InvalidOid; - foreach_oid(triggerId, relationTriggerList) + foreach_declared_oid(triggerId, relationTriggerList) { ObjectAddress triggerObjectAddress = InvalidObjectAddress; ObjectAddressSet(triggerObjectAddress, TriggerRelationId, triggerId); diff --git a/src/backend/distributed/commands/truncate.c b/src/backend/distributed/commands/truncate.c index 0eb43f529cd..46cf5e602a1 100644 --- a/src/backend/distributed/commands/truncate.c +++ b/src/backend/distributed/commands/truncate.c @@ -135,7 +135,7 @@ TruncateTaskList(Oid relationId) LockShardListMetadata(shardIntervalList, ShareLock); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { uint64 shardId = shardInterval->shardId; char *shardRelationName = pstrdup(relationName); @@ -264,7 +264,7 @@ ErrorIfUnsupportedTruncateStmt(TruncateStmt *truncateStatement) { List *relationList = truncateStatement->relations; RangeVar *rangeVar = NULL; - foreach_ptr(rangeVar, relationList) + foreach_declared_ptr(rangeVar, relationList) { Oid relationId = RangeVarGetRelid(rangeVar, NoLock, false); @@ -294,7 +294,7 @@ static void EnsurePartitionTableNotReplicatedForTruncate(TruncateStmt *truncateStatement) { RangeVar *rangeVar = NULL; - foreach_ptr(rangeVar, truncateStatement->relations) + foreach_declared_ptr(rangeVar, truncateStatement->relations) { Oid relationId = RangeVarGetRelid(rangeVar, NoLock, false); @@ -322,7 +322,7 @@ ExecuteTruncateStmtSequentialIfNecessary(TruncateStmt *command) bool failOK = false; RangeVar *rangeVar = NULL; - foreach_ptr(rangeVar, relationList) + foreach_declared_ptr(rangeVar, relationList) { Oid relationId = RangeVarGetRelid(rangeVar, NoLock, failOK); diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 9426e13c0ea..4d297297b30 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -454,7 +454,7 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt, bool analyze = false; DefElem *option = NULL; - foreach_ptr(option, explainStmt->options) + foreach_declared_ptr(option, explainStmt->options) { if (strcmp(option->defname, "analyze") == 0) { @@ -695,7 +695,7 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt, { AlterTableStmt *alterTableStmt = (AlterTableStmt *) parsetree; AlterTableCmd *command = NULL; - foreach_ptr(command, alterTableStmt->cmds) + foreach_declared_ptr(command, alterTableStmt->cmds) { AlterTableType alterTableType = command->subtype; @@ -879,7 +879,7 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt, } DDLJob *ddlJob = NULL; - foreach_ptr(ddlJob, ddlJobs) + foreach_declared_ptr(ddlJob, ddlJobs) { ExecuteDistributedDDLJob(ddlJob); } @@ -939,7 +939,7 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt, { List *addresses = GetObjectAddressListFromParseTree(parsetree, false, true); ObjectAddress *address = NULL; - foreach_ptr(address, addresses) + foreach_declared_ptr(address, addresses) { MarkObjectDistributed(address); TrackPropagatedObject(address); @@ -962,7 +962,7 @@ UndistributeDisconnectedCitusLocalTables(void) citusLocalTableIdList = SortList(citusLocalTableIdList, CompareOids); Oid citusLocalTableId = InvalidOid; - foreach_oid(citusLocalTableId, citusLocalTableIdList) + foreach_declared_oid(citusLocalTableId, citusLocalTableIdList) { /* acquire ShareRowExclusiveLock to prevent concurrent foreign key creation */ LOCKMODE lockMode = ShareRowExclusiveLock; @@ -1349,7 +1349,7 @@ CurrentSearchPath(void) bool schemaAdded = false; Oid searchPathOid = InvalidOid; - foreach_oid(searchPathOid, searchPathList) + foreach_declared_oid(searchPathOid, searchPathList) { char *schemaName = get_namespace_name(searchPathOid); @@ -1483,7 +1483,7 @@ DDLTaskList(Oid relationId, const char *commandString) LockShardListMetadata(shardIntervalList, ShareLock); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { uint64 shardId = shardInterval->shardId; StringInfo applyCommand = makeStringInfo(); @@ -1525,10 +1525,10 @@ NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands, { List *ddlJobs = NodeDDLTaskList(targets, commands); DDLJob *ddlJob = NULL; - foreach_ptr(ddlJob, ddlJobs) + foreach_declared_ptr(ddlJob, ddlJobs) { Task *task = NULL; - foreach_ptr(task, ddlJob->taskList) + foreach_declared_ptr(task, ddlJob->taskList) { task->cannotBeExecutedInTransaction = true; } @@ -1564,7 +1564,7 @@ NodeDDLTaskList(TargetWorkerSet targets, List *commands) SetTaskQueryStringList(task, commands); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodes) + foreach_declared_ptr(workerNode, workerNodes) { ShardPlacement *targetPlacement = CitusMakeNode(ShardPlacement); targetPlacement->nodeName = workerNode->workerName; diff --git a/src/backend/distributed/commands/vacuum.c b/src/backend/distributed/commands/vacuum.c index 5988a447ddf..3bdabe4671c 100644 --- a/src/backend/distributed/commands/vacuum.c +++ b/src/backend/distributed/commands/vacuum.c @@ -135,7 +135,7 @@ VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams) List *relationIdList = NIL; RangeVar *vacuumRelation = NULL; - foreach_ptr(vacuumRelation, vacuumRelationList) + foreach_declared_ptr(vacuumRelation, vacuumRelationList) { /* * If skip_locked option is enabled, we are skipping that relation @@ -164,7 +164,7 @@ static bool IsDistributedVacuumStmt(List *vacuumRelationIdList) { Oid relationId = InvalidOid; - foreach_oid(relationId, vacuumRelationIdList) + foreach_declared_oid(relationId, vacuumRelationIdList) { if (OidIsValid(relationId) && IsCitusTable(relationId)) { @@ -187,7 +187,7 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList, int relationIndex = 0; Oid relationId = InvalidOid; - foreach_oid(relationId, relationIdList) + foreach_declared_oid(relationId, relationIdList) { if (IsCitusTable(relationId)) { @@ -252,7 +252,7 @@ VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColum LockShardListMetadata(shardIntervalList, ShareLock); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { uint64 shardId = shardInterval->shardId; char *shardRelationName = pstrdup(relationName); @@ -473,7 +473,7 @@ DeparseVacuumColumnNames(List *columnNameList) appendStringInfoString(columnNames, " ("); String *columnName = NULL; - foreach_ptr(columnName, columnNameList) + foreach_declared_ptr(columnName, columnNameList) { appendStringInfo(columnNames, "%s,", strVal(columnName)); } @@ -508,7 +508,7 @@ ExtractVacuumTargetRels(VacuumStmt *vacuumStmt) List *vacuumList = NIL; VacuumRelation *vacuumRelation = NULL; - foreach_ptr(vacuumRelation, vacuumStmt->rels) + foreach_declared_ptr(vacuumRelation, vacuumStmt->rels) { vacuumList = lappend(vacuumList, vacuumRelation->relation); } @@ -552,7 +552,7 @@ VacuumStmtParams(VacuumStmt *vacstmt) /* Parse options list */ DefElem *opt = NULL; - foreach_ptr(opt, vacstmt->options) + foreach_declared_ptr(opt, vacstmt->options) { /* Parse common options for VACUUM and ANALYZE */ if (strcmp(opt->defname, "verbose") == 0) @@ -725,7 +725,7 @@ ExecuteUnqualifiedVacuumTasks(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumPa int32 localNodeGroupId = GetLocalGroupId(); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodes) + foreach_declared_ptr(workerNode, workerNodes) { if (workerNode->groupId != localNodeGroupId) { diff --git a/src/backend/distributed/commands/view.c b/src/backend/distributed/commands/view.c index 9689b92679f..0ffd00ec406 100644 --- a/src/backend/distributed/commands/view.c +++ b/src/backend/distributed/commands/view.c @@ -69,7 +69,7 @@ ViewHasDistributedRelationDependency(ObjectAddress *viewObjectAddress) List *dependencies = GetAllDependenciesForObject(viewObjectAddress); ObjectAddress *dependency = NULL; - foreach_ptr(dependency, dependencies) + foreach_declared_ptr(dependency, dependencies) { if (dependency->classId == RelationRelationId && IsAnyObjectDistributed( list_make1(dependency))) @@ -304,7 +304,7 @@ DropViewStmtObjectAddress(Node *stmt, bool missing_ok, bool isPostprocess) List *objectAddresses = NIL; List *possiblyQualifiedViewName = NULL; - foreach_ptr(possiblyQualifiedViewName, dropStmt->objects) + foreach_declared_ptr(possiblyQualifiedViewName, dropStmt->objects) { RangeVar *viewRangeVar = makeRangeVarFromNameList(possiblyQualifiedViewName); Oid viewOid = RangeVarGetRelid(viewRangeVar, AccessShareLock, @@ -332,7 +332,7 @@ FilterNameListForDistributedViews(List *viewNamesList, bool missing_ok) List *distributedViewNames = NIL; List *possiblyQualifiedViewName = NULL; - foreach_ptr(possiblyQualifiedViewName, viewNamesList) + foreach_declared_ptr(possiblyQualifiedViewName, viewNamesList) { char *viewName = NULL; char *schemaName = NULL; diff --git a/src/backend/distributed/connection/connection_management.c b/src/backend/distributed/connection/connection_management.c index f8e4816ed7d..4787d8f2fd3 100644 --- a/src/backend/distributed/connection/connection_management.c +++ b/src/backend/distributed/connection/connection_management.c @@ -866,7 +866,8 @@ WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount) *waitCount = 0; } - WaitEventSet *waitEventSet = CreateWaitEventSet(CurrentMemoryContext, eventSetSize); + WaitEventSet *waitEventSet = CreateWaitEventSet(WaitEventSetTracker_compat, + eventSetSize); EnsureReleaseResource((MemoryContextCallbackFunction) (&FreeWaitEventSet), waitEventSet); @@ -879,7 +880,7 @@ WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount) numEventsAdded += 2; MultiConnectionPollState *connectionState = NULL; - foreach_ptr(connectionState, connections) + foreach_declared_ptr(connectionState, connections) { if (numEventsAdded >= eventSetSize) { @@ -961,7 +962,7 @@ FinishConnectionListEstablishment(List *multiConnectionList) int waitCount = 0; MultiConnection *connection = NULL; - foreach_ptr(connection, multiConnectionList) + foreach_declared_ptr(connection, multiConnectionList) { MultiConnectionPollState *connectionState = palloc0(sizeof(MultiConnectionPollState)); @@ -1160,7 +1161,7 @@ static void CloseNotReadyMultiConnectionStates(List *connectionStates) { MultiConnectionPollState *connectionState = NULL; - foreach_ptr(connectionState, connectionStates) + foreach_declared_ptr(connectionState, connectionStates) { MultiConnection *connection = connectionState->connection; diff --git a/src/backend/distributed/connection/locally_reserved_shared_connections.c b/src/backend/distributed/connection/locally_reserved_shared_connections.c index a64930b3296..4dfcc0a9812 100644 --- a/src/backend/distributed/connection/locally_reserved_shared_connections.c +++ b/src/backend/distributed/connection/locally_reserved_shared_connections.c @@ -360,7 +360,7 @@ EnsureConnectionPossibilityForNodeList(List *nodeList) nodeList = SortList(nodeList, CompareWorkerNodes); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, nodeList) + foreach_declared_ptr(workerNode, nodeList) { bool waitForConnection = true; EnsureConnectionPossibilityForNode(workerNode, waitForConnection); diff --git a/src/backend/distributed/connection/placement_connection.c b/src/backend/distributed/connection/placement_connection.c index 10c99bd8074..841deba08db 100644 --- a/src/backend/distributed/connection/placement_connection.c +++ b/src/backend/distributed/connection/placement_connection.c @@ -370,7 +370,7 @@ AssignPlacementListToConnection(List *placementAccessList, MultiConnection *conn const char *userName = connection->user; ShardPlacementAccess *placementAccess = NULL; - foreach_ptr(placementAccess, placementAccessList) + foreach_declared_ptr(placementAccess, placementAccessList) { ShardPlacement *placement = placementAccess->placement; ShardPlacementAccessType accessType = placementAccess->accessType; @@ -533,7 +533,7 @@ FindPlacementListConnection(int flags, List *placementAccessList, const char *us * suitable connection found for a placement in the placementAccessList. */ ShardPlacementAccess *placementAccess = NULL; - foreach_ptr(placementAccess, placementAccessList) + foreach_declared_ptr(placementAccess, placementAccessList) { ShardPlacement *placement = placementAccess->placement; ShardPlacementAccessType accessType = placementAccess->accessType; diff --git a/src/backend/distributed/connection/remote_commands.c b/src/backend/distributed/connection/remote_commands.c index cbd74ff51b1..c9860c06185 100644 --- a/src/backend/distributed/connection/remote_commands.c +++ b/src/backend/distributed/connection/remote_commands.c @@ -392,7 +392,7 @@ void ExecuteCriticalRemoteCommandList(MultiConnection *connection, List *commandList) { const char *command = NULL; - foreach_ptr(command, commandList) + foreach_declared_ptr(command, commandList) { ExecuteCriticalRemoteCommand(connection, command); } @@ -435,7 +435,7 @@ ExecuteRemoteCommandInConnectionList(List *nodeConnectionList, const char *comma { MultiConnection *connection = NULL; - foreach_ptr(connection, nodeConnectionList) + foreach_declared_ptr(connection, nodeConnectionList) { int querySent = SendRemoteCommand(connection, command); @@ -446,7 +446,7 @@ ExecuteRemoteCommandInConnectionList(List *nodeConnectionList, const char *comma } /* Process the result */ - foreach_ptr(connection, nodeConnectionList) + foreach_declared_ptr(connection, nodeConnectionList) { bool raiseInterrupts = true; PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); @@ -887,7 +887,7 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts) /* convert connection list to an array such that we can move items around */ MultiConnection *connectionItem = NULL; - foreach_ptr(connectionItem, connectionList) + foreach_declared_ptr(connectionItem, connectionList) { allConnections[connectionIndex] = connectionItem; connectionReady[connectionIndex] = false; @@ -1130,7 +1130,7 @@ BuildWaitEventSet(MultiConnection **allConnections, int totalConnectionCount, /* allocate pending connections + 2 for the signal latch and postmaster death */ /* (CreateWaitEventSet makes room for pgwin32_signal_event automatically) */ - WaitEventSet *waitEventSet = CreateWaitEventSet(CurrentMemoryContext, + WaitEventSet *waitEventSet = CreateWaitEventSet(WaitEventSetTracker_compat, pendingConnectionCount + 2); for (int connectionIndex = 0; connectionIndex < pendingConnectionCount; diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c index f99462058d9..6e61564b80a 100644 --- a/src/backend/distributed/deparser/citus_ruleutils.c +++ b/src/backend/distributed/deparser/citus_ruleutils.c @@ -395,7 +395,8 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults if (attributeForm->attidentity && includeIdentityDefaults) { bool missing_ok = false; - Oid seqOid = getIdentitySequence(RelationGetRelid(relation), + Oid seqOid = getIdentitySequence(identitySequenceRelation_compat( + relation), attributeForm->attnum, missing_ok); if (includeIdentityDefaults == INCLUDE_IDENTITY) @@ -738,7 +739,12 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId) * If the user changed the column's statistics target, create * alter statement and add statement to a list for later processing. */ - if (attributeForm->attstattarget >= 0) + HeapTuple tp = SearchSysCache2(ATTNUM, + ObjectIdGetDatum(tableRelationId), + Int16GetDatum(attributeForm->attnum)); + int32 targetAttstattarget = getAttstattarget_compat(tp); + ReleaseSysCache(tp); + if (targetAttstattarget >= 0) { StringInfoData statement = { NULL, 0, 0, 0 }; initStringInfo(&statement); @@ -746,7 +752,7 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId) appendStringInfo(&statement, "ALTER COLUMN %s ", quote_identifier(attributeName)); appendStringInfo(&statement, "SET STATISTICS %d", - attributeForm->attstattarget); + targetAttstattarget); columnOptionList = lappend(columnOptionList, statement.data); } @@ -938,7 +944,7 @@ bool IsReindexWithParam_compat(ReindexStmt *reindexStmt, char *param) { DefElem *opt = NULL; - foreach_ptr(opt, reindexStmt->params) + foreach_declared_ptr(opt, reindexStmt->params) { if (strcmp(opt->defname, param) == 0) { @@ -963,7 +969,7 @@ AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer) char *tableSpaceName = NULL; DefElem *opt = NULL; - foreach_ptr(opt, reindexStmt->params) + foreach_declared_ptr(opt, reindexStmt->params) { if (strcmp(opt->defname, "tablespace") == 0) { diff --git a/src/backend/distributed/deparser/deparse.c b/src/backend/distributed/deparser/deparse.c index 8312d64075c..9963a84f21a 100644 --- a/src/backend/distributed/deparser/deparse.c +++ b/src/backend/distributed/deparser/deparse.c @@ -47,7 +47,7 @@ DeparseTreeNodes(List *stmts) { List *sqls = NIL; Node *stmt = NULL; - foreach_ptr(stmt, stmts) + foreach_declared_ptr(stmt, stmts) { sqls = lappend(sqls, DeparseTreeNode(stmt)); } diff --git a/src/backend/distributed/deparser/deparse_database_stmts.c b/src/backend/distributed/deparser/deparse_database_stmts.c index 66df5361e50..fb233ddfd68 100644 --- a/src/backend/distributed/deparser/deparse_database_stmts.c +++ b/src/backend/distributed/deparser/deparse_database_stmts.c @@ -174,7 +174,7 @@ static void AppendBasicAlterDatabaseOptions(StringInfo buf, AlterDatabaseStmt *stmt) { DefElem *def = NULL; - foreach_ptr(def, stmt->options) + foreach_declared_ptr(def, stmt->options) { DefElemOptionToStatement(buf, def, alterDatabaseOptionFormats, lengthof( alterDatabaseOptionFormats)); @@ -290,7 +290,7 @@ AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt) quote_identifier(stmt->dbname)); DefElem *option = NULL; - foreach_ptr(option, stmt->options) + foreach_declared_ptr(option, stmt->options) { DefElemOptionToStatement(buf, option, createDatabaseOptionFormats, lengthof(createDatabaseOptionFormats)); diff --git a/src/backend/distributed/deparser/deparse_domain_stmts.c b/src/backend/distributed/deparser/deparse_domain_stmts.c index e517074ecd4..9702eb3108e 100644 --- a/src/backend/distributed/deparser/deparse_domain_stmts.c +++ b/src/backend/distributed/deparser/deparse_domain_stmts.c @@ -70,7 +70,7 @@ DeparseCreateDomainStmt(Node *node) } Constraint *constraint = NULL; - foreach_ptr(constraint, stmt->constraints) + foreach_declared_ptr(constraint, stmt->constraints) { AppendConstraint(&buf, constraint, stmt->domainname, stmt->typeName); } @@ -117,7 +117,7 @@ DeparseDropDomainStmt(Node *node) TypeName *domainName = NULL; bool first = true; - foreach_ptr(domainName, stmt->objects) + foreach_declared_ptr(domainName, stmt->objects) { if (!first) { diff --git a/src/backend/distributed/deparser/deparse_extension_stmts.c b/src/backend/distributed/deparser/deparse_extension_stmts.c index 92d54602f06..256d22214ea 100644 --- a/src/backend/distributed/deparser/deparse_extension_stmts.c +++ b/src/backend/distributed/deparser/deparse_extension_stmts.c @@ -40,7 +40,7 @@ DefElem * GetExtensionOption(List *extensionOptions, const char *defname) { DefElem *defElement = NULL; - foreach_ptr(defElement, extensionOptions) + foreach_declared_ptr(defElement, extensionOptions) { if (IsA(defElement, DefElem) && strncmp(defElement->defname, defname, NAMEDATALEN) == 0) @@ -112,7 +112,7 @@ AppendCreateExtensionStmtOptions(StringInfo buf, List *options) /* Add the options to the statement */ DefElem *defElem = NULL; - foreach_ptr(defElem, options) + foreach_declared_ptr(defElem, options) { if (strcmp(defElem->defname, "schema") == 0) { @@ -181,7 +181,7 @@ AppendAlterExtensionStmt(StringInfo buf, AlterExtensionStmt *alterExtensionStmt) * the options. */ DefElem *option = NULL; - foreach_ptr(option, optionsList) + foreach_declared_ptr(option, optionsList) { if (strcmp(option->defname, "new_version") == 0) { diff --git a/src/backend/distributed/deparser/deparse_foreign_server_stmts.c b/src/backend/distributed/deparser/deparse_foreign_server_stmts.c index 9c708a771a7..6b278f757a7 100644 --- a/src/backend/distributed/deparser/deparse_foreign_server_stmts.c +++ b/src/backend/distributed/deparser/deparse_foreign_server_stmts.c @@ -176,7 +176,7 @@ AppendAlterForeignServerOptions(StringInfo buf, AlterForeignServerStmt *stmt) DefElemAction action = DEFELEM_UNSPEC; DefElem *def = NULL; - foreach_ptr(def, stmt->options) + foreach_declared_ptr(def, stmt->options) { if (def->defaction != DEFELEM_UNSPEC) { @@ -242,7 +242,7 @@ static void AppendServerNames(StringInfo buf, DropStmt *stmt) { String *serverValue = NULL; - foreach_ptr(serverValue, stmt->objects) + foreach_declared_ptr(serverValue, stmt->objects) { const char *serverString = quote_identifier(strVal(serverValue)); appendStringInfo(buf, "%s", serverString); diff --git a/src/backend/distributed/deparser/deparse_publication_stmts.c b/src/backend/distributed/deparser/deparse_publication_stmts.c index 8e311817199..35068266ecb 100644 --- a/src/backend/distributed/deparser/deparse_publication_stmts.c +++ b/src/backend/distributed/deparser/deparse_publication_stmts.c @@ -118,7 +118,7 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt, * Check whether there are objects to propagate, mainly to know whether * we should include "FOR". */ - foreach_ptr(publicationObject, stmt->pubobjects) + foreach_declared_ptr(publicationObject, stmt->pubobjects) { if (publicationObject->pubobjtype == PUBLICATIONOBJ_TABLE) { @@ -156,7 +156,7 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt, * Check whether there are tables to propagate, mainly to know whether * we should include "FOR". */ - foreach_ptr(rangeVar, stmt->tables) + foreach_declared_ptr(rangeVar, stmt->tables) { if (includeLocalTables || IsCitusTableRangeVar(rangeVar, NoLock, false)) { @@ -198,7 +198,7 @@ AppendPublicationObjects(StringInfo buf, List *publicationObjects, PublicationObjSpec *publicationObject = NULL; bool appendedObject = false; - foreach_ptr(publicationObject, publicationObjects) + foreach_declared_ptr(publicationObject, publicationObjects) { if (publicationObject->pubobjtype == PUBLICATIONOBJ_TABLE) { @@ -334,7 +334,7 @@ AppendTables(StringInfo buf, List *tables, bool includeLocalTables) RangeVar *rangeVar = NULL; bool appendedObject = false; - foreach_ptr(rangeVar, tables) + foreach_declared_ptr(rangeVar, tables) { if (!includeLocalTables && !IsCitusTableRangeVar(rangeVar, NoLock, false)) diff --git a/src/backend/distributed/deparser/deparse_role_stmts.c b/src/backend/distributed/deparser/deparse_role_stmts.c index a4a085026c9..61c0be24670 100644 --- a/src/backend/distributed/deparser/deparse_role_stmts.c +++ b/src/backend/distributed/deparser/deparse_role_stmts.c @@ -404,7 +404,7 @@ AppendRevokeAdminOptionFor(StringInfo buf, GrantRoleStmt *stmt) if (!stmt->is_grant) { DefElem *opt = NULL; - foreach_ptr(opt, stmt->opt) + foreach_declared_ptr(opt, stmt->opt) { if (strcmp(opt->defname, "admin") == 0) { @@ -440,7 +440,7 @@ AppendGrantWithAdminOption(StringInfo buf, GrantRoleStmt *stmt) #if PG_VERSION_NUM >= PG_VERSION_16 int opt_count = 0; DefElem *opt = NULL; - foreach_ptr(opt, stmt->opt) + foreach_declared_ptr(opt, stmt->opt) { char *optval = defGetString(opt); bool option_value = false; diff --git a/src/backend/distributed/deparser/deparse_schema_stmts.c b/src/backend/distributed/deparser/deparse_schema_stmts.c index 0a9c49801aa..50e3974c0f2 100644 --- a/src/backend/distributed/deparser/deparse_schema_stmts.c +++ b/src/backend/distributed/deparser/deparse_schema_stmts.c @@ -152,7 +152,7 @@ AppendDropSchemaStmt(StringInfo buf, DropStmt *stmt) } String *schemaValue = NULL; - foreach_ptr(schemaValue, stmt->objects) + foreach_declared_ptr(schemaValue, stmt->objects) { const char *schemaString = quote_identifier(strVal(schemaValue)); appendStringInfo(buf, "%s", schemaString); diff --git a/src/backend/distributed/deparser/deparse_statistics_stmts.c b/src/backend/distributed/deparser/deparse_statistics_stmts.c index 99b9d1c2ddf..79be835b93b 100644 --- a/src/backend/distributed/deparser/deparse_statistics_stmts.c +++ b/src/backend/distributed/deparser/deparse_statistics_stmts.c @@ -177,8 +177,9 @@ AppendAlterStatisticsSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt) static void AppendAlterStatisticsStmt(StringInfo buf, AlterStatsStmt *stmt) { - appendStringInfo(buf, "ALTER STATISTICS %s SET STATISTICS %d", NameListToQuotedString( - stmt->defnames), stmt->stxstattarget); + appendStringInfo(buf, "ALTER STATISTICS %s SET STATISTICS %d", + NameListToQuotedString(stmt->defnames), + getIntStxstattarget_compat(stmt->stxstattarget)); } @@ -216,7 +217,7 @@ AppendStatTypes(StringInfo buf, CreateStatsStmt *stmt) appendStringInfoString(buf, " ("); String *statType = NULL; - foreach_ptr(statType, stmt->stat_types) + foreach_declared_ptr(statType, stmt->stat_types) { appendStringInfoString(buf, strVal(statType)); @@ -235,7 +236,7 @@ AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt) { StatsElem *column = NULL; - foreach_ptr(column, stmt->exprs) + foreach_declared_ptr(column, stmt->exprs) { if (!column->name) { diff --git a/src/backend/distributed/deparser/deparse_text_search.c b/src/backend/distributed/deparser/deparse_text_search.c index ab5498ad81a..2ca09b8efc5 100644 --- a/src/backend/distributed/deparser/deparse_text_search.c +++ b/src/backend/distributed/deparser/deparse_text_search.c @@ -86,7 +86,7 @@ AppendDefElemList(StringInfo buf, List *defelems, char *objectName) { DefElem *defelem = NULL; bool first = true; - foreach_ptr(defelem, defelems) + foreach_declared_ptr(defelem, defelems) { if (!first) { @@ -133,7 +133,7 @@ DeparseDropTextSearchConfigurationStmt(Node *node) appendStringInfoString(&buf, "DROP TEXT SEARCH CONFIGURATION "); List *nameList = NIL; bool first = true; - foreach_ptr(nameList, stmt->objects) + foreach_declared_ptr(nameList, stmt->objects) { if (!first) { @@ -171,7 +171,7 @@ DeparseDropTextSearchDictionaryStmt(Node *node) appendStringInfoString(&buf, "DROP TEXT SEARCH DICTIONARY "); List *nameList = NIL; bool first = true; - foreach_ptr(nameList, stmt->objects) + foreach_declared_ptr(nameList, stmt->objects) { if (!first) { @@ -404,7 +404,7 @@ AppendStringInfoTokentypeList(StringInfo buf, List *tokentypes) { String *tokentype = NULL; bool first = true; - foreach_ptr(tokentype, tokentypes) + foreach_declared_ptr(tokentype, tokentypes) { if (nodeTag(tokentype) != T_String) { @@ -432,7 +432,7 @@ AppendStringInfoDictnames(StringInfo buf, List *dicts) { List *dictNames = NIL; bool first = true; - foreach_ptr(dictNames, dicts) + foreach_declared_ptr(dictNames, dicts) { if (!first) { diff --git a/src/backend/distributed/deparser/deparse_view_stmts.c b/src/backend/distributed/deparser/deparse_view_stmts.c index 5592aec9d53..2e046c0994e 100644 --- a/src/backend/distributed/deparser/deparse_view_stmts.c +++ b/src/backend/distributed/deparser/deparse_view_stmts.c @@ -88,7 +88,7 @@ AppendViewNameList(StringInfo buf, List *viewNamesList) { bool isFirstView = true; List *qualifiedViewName = NULL; - foreach_ptr(qualifiedViewName, viewNamesList) + foreach_declared_ptr(qualifiedViewName, viewNamesList) { char *quotedQualifiedVieName = NameListToQuotedString(qualifiedViewName); if (!isFirstView) diff --git a/src/backend/distributed/deparser/qualify_collation_stmt.c b/src/backend/distributed/deparser/qualify_collation_stmt.c index dad3b7a0eca..36bde695766 100644 --- a/src/backend/distributed/deparser/qualify_collation_stmt.c +++ b/src/backend/distributed/deparser/qualify_collation_stmt.c @@ -83,7 +83,7 @@ QualifyDropCollationStmt(Node *node) List *names = NIL; List *name = NIL; - foreach_ptr(name, stmt->objects) + foreach_declared_ptr(name, stmt->objects) { names = lappend(names, QualifyCollationName(name)); } diff --git a/src/backend/distributed/deparser/qualify_domain.c b/src/backend/distributed/deparser/qualify_domain.c index 2e163dad072..acf48e6ffb0 100644 --- a/src/backend/distributed/deparser/qualify_domain.c +++ b/src/backend/distributed/deparser/qualify_domain.c @@ -67,7 +67,7 @@ QualifyDropDomainStmt(Node *node) DropStmt *stmt = castNode(DropStmt, node); TypeName *domainName = NULL; - foreach_ptr(domainName, stmt->objects) + foreach_declared_ptr(domainName, stmt->objects) { QualifyTypeName(domainName, stmt->missing_ok); } @@ -249,7 +249,7 @@ QualifyCollate(CollateClause *collClause, bool missing_ok) collClause->collname = NIL; char *name = NULL; - foreach_ptr(name, objName) + foreach_declared_ptr(name, objName) { collClause->collname = lappend(collClause->collname, makeString(name)); } diff --git a/src/backend/distributed/deparser/qualify_publication_stmt.c b/src/backend/distributed/deparser/qualify_publication_stmt.c index 73ffe3a3533..c47f52e159c 100644 --- a/src/backend/distributed/deparser/qualify_publication_stmt.c +++ b/src/backend/distributed/deparser/qualify_publication_stmt.c @@ -55,7 +55,7 @@ QualifyPublicationObjects(List *publicationObjects) { PublicationObjSpec *publicationObject = NULL; - foreach_ptr(publicationObject, publicationObjects) + foreach_declared_ptr(publicationObject, publicationObjects) { if (publicationObject->pubobjtype == PUBLICATIONOBJ_TABLE) { @@ -78,7 +78,7 @@ QualifyTables(List *tables) { RangeVar *rangeVar = NULL; - foreach_ptr(rangeVar, tables) + foreach_declared_ptr(rangeVar, tables) { QualifyPublicationRangeVar(rangeVar); } diff --git a/src/backend/distributed/deparser/qualify_sequence_stmt.c b/src/backend/distributed/deparser/qualify_sequence_stmt.c index 1a0ecc8319b..c56d0fda0bb 100644 --- a/src/backend/distributed/deparser/qualify_sequence_stmt.c +++ b/src/backend/distributed/deparser/qualify_sequence_stmt.c @@ -148,7 +148,7 @@ QualifyDropSequenceStmt(Node *node) List *objectNameListWithSchema = NIL; List *objectNameList = NULL; - foreach_ptr(objectNameList, stmt->objects) + foreach_declared_ptr(objectNameList, stmt->objects) { RangeVar *seq = makeRangeVarFromNameList(objectNameList); @@ -192,7 +192,7 @@ QualifyGrantOnSequenceStmt(Node *node) } List *qualifiedSequenceRangeVars = NIL; RangeVar *sequenceRangeVar = NULL; - foreach_ptr(sequenceRangeVar, stmt->objects) + foreach_declared_ptr(sequenceRangeVar, stmt->objects) { if (sequenceRangeVar->schemaname == NULL) { diff --git a/src/backend/distributed/deparser/qualify_statistics_stmt.c b/src/backend/distributed/deparser/qualify_statistics_stmt.c index ba8e8a76496..7a99e6dfad3 100644 --- a/src/backend/distributed/deparser/qualify_statistics_stmt.c +++ b/src/backend/distributed/deparser/qualify_statistics_stmt.c @@ -73,7 +73,7 @@ QualifyDropStatisticsStmt(Node *node) List *objectNameListWithSchema = NIL; List *objectNameList = NULL; - foreach_ptr(objectNameList, dropStatisticsStmt->objects) + foreach_declared_ptr(objectNameList, dropStatisticsStmt->objects) { RangeVar *stat = makeRangeVarFromNameList(objectNameList); diff --git a/src/backend/distributed/deparser/qualify_text_search_stmts.c b/src/backend/distributed/deparser/qualify_text_search_stmts.c index df1e140a421..451cb2fb06d 100644 --- a/src/backend/distributed/deparser/qualify_text_search_stmts.c +++ b/src/backend/distributed/deparser/qualify_text_search_stmts.c @@ -46,7 +46,7 @@ QualifyDropTextSearchConfigurationStmt(Node *node) List *qualifiedObjects = NIL; List *objName = NIL; - foreach_ptr(objName, stmt->objects) + foreach_declared_ptr(objName, stmt->objects) { char *schemaName = NULL; char *tsconfigName = NULL; @@ -87,7 +87,7 @@ QualifyDropTextSearchDictionaryStmt(Node *node) List *qualifiedObjects = NIL; List *objName = NIL; - foreach_ptr(objName, stmt->objects) + foreach_declared_ptr(objName, stmt->objects) { char *schemaName = NULL; char *tsdictName = NULL; @@ -141,7 +141,7 @@ QualifyAlterTextSearchConfigurationStmt(Node *node) bool useNewDicts = false; List *dicts = NULL; List *dictName = NIL; - foreach_ptr(dictName, stmt->dicts) + foreach_declared_ptr(dictName, stmt->dicts) { DeconstructQualifiedName(dictName, &schemaName, &objName); diff --git a/src/backend/distributed/deparser/qualify_view_stmt.c b/src/backend/distributed/deparser/qualify_view_stmt.c index af3fb280a8c..4f4daf71e8b 100644 --- a/src/backend/distributed/deparser/qualify_view_stmt.c +++ b/src/backend/distributed/deparser/qualify_view_stmt.c @@ -31,7 +31,7 @@ QualifyDropViewStmt(Node *node) List *qualifiedViewNames = NIL; List *possiblyQualifiedViewName = NULL; - foreach_ptr(possiblyQualifiedViewName, stmt->objects) + foreach_declared_ptr(possiblyQualifiedViewName, stmt->objects) { char *viewName = NULL; char *schemaName = NULL; diff --git a/src/backend/distributed/deparser/ruleutils_17.c b/src/backend/distributed/deparser/ruleutils_17.c new file mode 100644 index 00000000000..fa2a854b068 --- /dev/null +++ b/src/backend/distributed/deparser/ruleutils_17.c @@ -0,0 +1,9880 @@ +/*------------------------------------------------------------------------- + * + * ruleutils_16.c + * Functions to convert stored expressions/querytrees back to + * source text + * + * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/backend/distributed/deparser/ruleutils_16.c + * + * This needs to be closely in sync with the core code. + *------------------------------------------------------------------------- + */ +#include "pg_version_constants.h" + +#include "pg_config.h" + +#if (PG_VERSION_NUM >= PG_VERSION_17) && (PG_VERSION_NUM < PG_VERSION_18) + +#include "postgres.h" + +#include +#include +#include + +#include "access/amapi.h" +#include "access/htup_details.h" +#include "access/relation.h" +#include "access/table.h" +#include "catalog/pg_aggregate.h" +#include "catalog/pg_am.h" +#include "catalog/pg_authid.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_constraint.h" +#include "catalog/pg_depend.h" +#include "catalog/pg_extension.h" +#include "catalog/pg_foreign_data_wrapper.h" +#include "catalog/pg_language.h" +#include "catalog/pg_opclass.h" +#include "catalog/pg_operator.h" +#include "catalog/pg_partitioned_table.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_statistic_ext.h" +#include "catalog/pg_trigger.h" +#include "catalog/pg_type.h" +#include "commands/defrem.h" +#include "commands/extension.h" +#include "commands/tablespace.h" +#include "common/keywords.h" +#include "distributed/citus_nodefuncs.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/multi_router_planner.h" +#include "distributed/namespace_utils.h" +#include "executor/spi.h" +#include "foreign/foreign.h" +#include "funcapi.h" +#include "mb/pg_wchar.h" +#include "miscadmin.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/pathnodes.h" +#include "optimizer/optimizer.h" +#include "parser/parse_node.h" +#include "parser/parse_agg.h" +#include "parser/parse_func.h" +#include "parser/parse_node.h" +#include "parser/parse_oper.h" +#include "parser/parse_relation.h" +#include "parser/parser.h" +#include "parser/parsetree.h" +#include "rewrite/rewriteHandler.h" +#include "rewrite/rewriteManip.h" +#include "rewrite/rewriteSupport.h" +#include "utils/array.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/hsearch.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" +#include "utils/ruleutils.h" +#include "utils/snapmgr.h" +#include "utils/syscache.h" +#include "utils/typcache.h" +#include "utils/varlena.h" +#include "utils/xml.h" + + +/* ---------- + * Pretty formatting constants + * ---------- + */ + +/* Indent counts */ +#define PRETTYINDENT_STD 8 +#define PRETTYINDENT_JOIN 4 +#define PRETTYINDENT_VAR 4 + +#define PRETTYINDENT_LIMIT 40 /* wrap limit */ + +/* Pretty flags */ +#define PRETTYFLAG_PAREN 0x0001 +#define PRETTYFLAG_INDENT 0x0002 + +/* Default line length for pretty-print wrapping: 0 means wrap always */ +#define WRAP_COLUMN_DEFAULT 0 + +/* macros to test if pretty action needed */ +#define PRETTY_PAREN(context) ((context)->prettyFlags & PRETTYFLAG_PAREN) +#define PRETTY_INDENT(context) ((context)->prettyFlags & PRETTYFLAG_INDENT) + + +/* ---------- + * Local data types + * ---------- + */ + +/* Context info needed for invoking a recursive querytree display routine */ +typedef struct +{ + StringInfo buf; /* output buffer to append to */ + List *namespaces; /* List of deparse_namespace nodes */ + List *windowClause; /* Current query level's WINDOW clause */ + List *windowTList; /* targetlist for resolving WINDOW clause */ + int prettyFlags; /* enabling of pretty-print functions */ + int wrapColumn; /* max line length, or -1 for no limit */ + int indentLevel; /* current indent level for prettyprint */ + bool varprefix; /* true to print prefixes on Vars */ + Oid distrelid; /* the distributed table being modified, if valid */ + int64 shardid; /* a distributed table's shardid, if positive */ + ParseExprKind special_exprkind; /* set only for exprkinds needing special + * handling */ + Bitmapset *appendparents; /* if not null, map child Vars of these relids + * back to the parent rel */ +} deparse_context; + +/* + * Each level of query context around a subtree needs a level of Var namespace. + * A Var having varlevelsup=N refers to the N'th item (counting from 0) in + * the current context's namespaces list. + * + * The rangetable is the list of actual RTEs from the query tree, and the + * cte list is the list of actual CTEs. + * + * rtable_names holds the alias name to be used for each RTE (either a C + * string, or NULL for nameless RTEs such as unnamed joins). + * rtable_columns holds the column alias names to be used for each RTE. + * + * In some cases we need to make names of merged JOIN USING columns unique + * across the whole query, not only per-RTE. If so, unique_using is true + * and using_names is a list of C strings representing names already assigned + * to USING columns. + * + * When deparsing plan trees, there is always just a single item in the + * deparse_namespace list (since a plan tree never contains Vars with + * varlevelsup > 0). We store the PlanState node that is the immediate + * parent of the expression to be deparsed, as well as a list of that + * PlanState's ancestors. In addition, we store its outer and inner subplan + * state nodes, as well as their plan nodes' targetlists, and the index tlist + * if the current plan node might contain INDEX_VAR Vars. (These fields could + * be derived on-the-fly from the current PlanState, but it seems notationally + * clearer to set them up as separate fields.) + */ +typedef struct +{ + List *rtable; /* List of RangeTblEntry nodes */ + List *rtable_names; /* Parallel list of names for RTEs */ + List *rtable_columns; /* Parallel list of deparse_columns structs */ + List *subplans; /* List of Plan trees for SubPlans */ + List *ctes; /* List of CommonTableExpr nodes */ + AppendRelInfo **appendrels; /* Array of AppendRelInfo nodes, or NULL */ + /* Workspace for column alias assignment: */ + bool unique_using; /* Are we making USING names globally unique */ + List *using_names; /* List of assigned names for USING columns */ + /* Remaining fields are used only when deparsing a Plan tree: */ + Plan *plan; /* immediate parent of current expression */ + List *ancestors; /* ancestors of planstate */ + Plan *outer_plan; /* outer subnode, or NULL if none */ + Plan *inner_plan; /* inner subnode, or NULL if none */ + List *outer_tlist; /* referent for OUTER_VAR Vars */ + List *inner_tlist; /* referent for INNER_VAR Vars */ + List *index_tlist; /* referent for INDEX_VAR Vars */ + /* Special namespace representing a function signature: */ + char *funcname; + int numargs; + char **argnames; +} deparse_namespace; + +/* Callback signature for resolve_special_varno() */ +typedef void (*rsv_callback) (Node *node, deparse_context *context, + void *callback_arg); + +/* + * Per-relation data about column alias names. + * + * Selecting aliases is unreasonably complicated because of the need to dump + * rules/views whose underlying tables may have had columns added, deleted, or + * renamed since the query was parsed. We must nonetheless print the rule/view + * in a form that can be reloaded and will produce the same results as before. + * + * For each RTE used in the query, we must assign column aliases that are + * unique within that RTE. SQL does not require this of the original query, + * but due to factors such as *-expansion we need to be able to uniquely + * reference every column in a decompiled query. As long as we qualify all + * column references, per-RTE uniqueness is sufficient for that. + * + * However, we can't ensure per-column name uniqueness for unnamed join RTEs, + * since they just inherit column names from their input RTEs, and we can't + * rename the columns at the join level. Most of the time this isn't an issue + * because we don't need to reference the join's output columns as such; we + * can reference the input columns instead. That approach can fail for merged + * JOIN USING columns, however, so when we have one of those in an unnamed + * join, we have to make that column's alias globally unique across the whole + * query to ensure it can be referenced unambiguously. + * + * Another problem is that a JOIN USING clause requires the columns to be + * merged to have the same aliases in both input RTEs, and that no other + * columns in those RTEs or their children conflict with the USING names. + * To handle that, we do USING-column alias assignment in a recursive + * traversal of the query's jointree. When descending through a JOIN with + * USING, we preassign the USING column names to the child columns, overriding + * other rules for column alias assignment. We also mark each RTE with a list + * of all USING column names selected for joins containing that RTE, so that + * when we assign other columns' aliases later, we can avoid conflicts. + * + * Another problem is that if a JOIN's input tables have had columns added or + * deleted since the query was parsed, we must generate a column alias list + * for the join that matches the current set of input columns --- otherwise, a + * change in the number of columns in the left input would throw off matching + * of aliases to columns of the right input. Thus, positions in the printable + * column alias list are not necessarily one-for-one with varattnos of the + * JOIN, so we need a separate new_colnames[] array for printing purposes. + */ +typedef struct +{ + /* + * colnames is an array containing column aliases to use for columns that + * existed when the query was parsed. Dropped columns have NULL entries. + * This array can be directly indexed by varattno to get a Var's name. + * + * Non-NULL entries are guaranteed unique within the RTE, *except* when + * this is for an unnamed JOIN RTE. In that case we merely copy up names + * from the two input RTEs. + * + * During the recursive descent in set_using_names(), forcible assignment + * of a child RTE's column name is represented by pre-setting that element + * of the child's colnames array. So at that stage, NULL entries in this + * array just mean that no name has been preassigned, not necessarily that + * the column is dropped. + */ + int num_cols; /* length of colnames[] array */ + char **colnames; /* array of C strings and NULLs */ + + /* + * new_colnames is an array containing column aliases to use for columns + * that would exist if the query was re-parsed against the current + * definitions of its base tables. This is what to print as the column + * alias list for the RTE. This array does not include dropped columns, + * but it will include columns added since original parsing. Indexes in + * it therefore have little to do with current varattno values. As above, + * entries are unique unless this is for an unnamed JOIN RTE. (In such an + * RTE, we never actually print this array, but we must compute it anyway + * for possible use in computing column names of upper joins.) The + * parallel array is_new_col marks which of these columns are new since + * original parsing. Entries with is_new_col false must match the + * non-NULL colnames entries one-for-one. + */ + int num_new_cols; /* length of new_colnames[] array */ + char **new_colnames; /* array of C strings */ + bool *is_new_col; /* array of bool flags */ + + /* This flag tells whether we should actually print a column alias list */ + bool printaliases; + + /* This list has all names used as USING names in joins above this RTE */ + List *parentUsing; /* names assigned to parent merged columns */ + + /* + * If this struct is for a JOIN RTE, we fill these fields during the + * set_using_names() pass to describe its relationship to its child RTEs. + * + * leftattnos and rightattnos are arrays with one entry per existing + * output column of the join (hence, indexable by join varattno). For a + * simple reference to a column of the left child, leftattnos[i] is the + * child RTE's attno and rightattnos[i] is zero; and conversely for a + * column of the right child. But for merged columns produced by JOIN + * USING/NATURAL JOIN, both leftattnos[i] and rightattnos[i] are nonzero. + * Also, if the column has been dropped, both are zero. + * + * If it's a JOIN USING, usingNames holds the alias names selected for the + * merged columns (these might be different from the original USING list, + * if we had to modify names to achieve uniqueness). + */ + int leftrti; /* rangetable index of left child */ + int rightrti; /* rangetable index of right child */ + int *leftattnos; /* left-child varattnos of join cols, or 0 */ + int *rightattnos; /* right-child varattnos of join cols, or 0 */ + List *usingNames; /* names assigned to merged columns */ +} deparse_columns; + +/* This macro is analogous to rt_fetch(), but for deparse_columns structs */ +#define deparse_columns_fetch(rangetable_index, dpns) \ + ((deparse_columns *) list_nth((dpns)->rtable_columns, (rangetable_index)-1)) + +/* + * Entry in set_rtable_names' hash table + */ +typedef struct +{ + char name[NAMEDATALEN]; /* Hash key --- must be first */ + int counter; /* Largest addition used so far for name */ +} NameHashEntry; + + +/* ---------- + * Local functions + * + * Most of these functions used to use fixed-size buffers to build their + * results. Now, they take an (already initialized) StringInfo object + * as a parameter, and append their text output to its contents. + * ---------- + */ +static void set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, + Bitmapset *rels_used); +static void set_deparse_for_query(deparse_namespace *dpns, Query *query, + List *parent_namespaces); +static bool has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode); +static void set_using_names(deparse_namespace *dpns, Node *jtnode, + List *parentUsing); +static void set_relation_column_names(deparse_namespace *dpns, + RangeTblEntry *rte, + deparse_columns *colinfo); +static void set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, + deparse_columns *colinfo); +static bool colname_is_unique(const char *colname, deparse_namespace *dpns, + deparse_columns *colinfo); +static char *make_colname_unique(char *colname, deparse_namespace *dpns, + deparse_columns *colinfo); +static void expand_colnames_array_to(deparse_columns *colinfo, int n); +static void identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, + deparse_columns *colinfo); +static char *get_rtable_name(int rtindex, deparse_context *context); +static void set_deparse_plan(deparse_namespace *dpns, Plan *plan); +static Plan *find_recursive_union(deparse_namespace *dpns, + WorkTableScan *wtscan); +static void push_child_plan(deparse_namespace *dpns, Plan *plan, + deparse_namespace *save_dpns); +static void pop_child_plan(deparse_namespace *dpns, + deparse_namespace *save_dpns); +static void push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell, + deparse_namespace *save_dpns); +static void pop_ancestor_plan(deparse_namespace *dpns, + deparse_namespace *save_dpns); +static void get_query_def(Query *query, StringInfo buf, List *parentnamespace, + TupleDesc resultDesc, bool colNamesVisible, + int prettyFlags, int wrapColumn, int startIndent); +static void get_query_def_extended(Query *query, StringInfo buf, + List *parentnamespace, Oid distrelid, int64 shardid, + TupleDesc resultDesc, bool colNamesVisible, + int prettyFlags, int wrapColumn, + int startIndent); +static void get_values_def(List *values_lists, deparse_context *context); +static void get_with_clause(Query *query, deparse_context *context); +static void get_select_query_def(Query *query, deparse_context *context, + TupleDesc resultDesc, bool colNamesVisible); +static void get_insert_query_def(Query *query, deparse_context *context, + bool colNamesVisible); +static void get_update_query_def(Query *query, deparse_context *context, + bool colNamesVisible); +static void get_update_query_targetlist_def(Query *query, List *targetList, + deparse_context *context, + RangeTblEntry *rte); +static void get_delete_query_def(Query *query, deparse_context *context, + bool colNamesVisible); +static void get_merge_query_def(Query *query, deparse_context *context, + bool colNamesVisible); +static void get_utility_query_def(Query *query, deparse_context *context); +static void get_basic_select_query(Query *query, deparse_context *context, + TupleDesc resultDesc, bool colNamesVisible); +static void get_target_list(List *targetList, deparse_context *context, + TupleDesc resultDesc, bool colNamesVisible); +static void get_setop_query(Node *setOp, Query *query, + deparse_context *context, + TupleDesc resultDesc, bool colNamesVisible); +static Node *get_rule_sortgroupclause(Index ref, List *tlist, + bool force_colno, + deparse_context *context); +static void get_rule_groupingset(GroupingSet *gset, List *targetlist, + bool omit_parens, deparse_context *context); +static void get_rule_orderby(List *orderList, List *targetList, + bool force_colno, deparse_context *context); +static void get_rule_windowclause(Query *query, deparse_context *context); +static void get_rule_windowspec(WindowClause *wc, List *targetList, + deparse_context *context); +static char *get_variable(Var *var, int levelsup, bool istoplevel, + deparse_context *context); +static void get_special_variable(Node *node, deparse_context *context, + void *callback_arg); +static void resolve_special_varno(Node *node, deparse_context *context, + rsv_callback callback, void *callback_arg); +static Node *find_param_referent(Param *param, deparse_context *context, + deparse_namespace **dpns_p, ListCell **ancestor_cell_p); +static SubPlan *find_param_generator(Param *param, deparse_context *context, + int *column_p); +static SubPlan *find_param_generator_initplan(Param *param, Plan *plan, + int *column_p); +static void get_parameter(Param *param, deparse_context *context); +static const char *get_simple_binary_op_name(OpExpr *expr); +static bool isSimpleNode(Node *node, Node *parentNode, int prettyFlags); +static void appendContextKeyword(deparse_context *context, const char *str, + int indentBefore, int indentAfter, int indentPlus); +static void removeStringInfoSpaces(StringInfo str); +static void get_rule_expr(Node *node, deparse_context *context, + bool showimplicit); +static void get_rule_expr_toplevel(Node *node, deparse_context *context, + bool showimplicit); +static void get_rule_list_toplevel(List *lst, deparse_context *context, + bool showimplicit); +static void get_rule_expr_funccall(Node *node, deparse_context *context, + bool showimplicit); +static bool looks_like_function(Node *node); +static void get_oper_expr(OpExpr *expr, deparse_context *context); +static void get_func_expr(FuncExpr *expr, deparse_context *context, + bool showimplicit); +static void get_proc_expr(CallStmt *stmt, deparse_context *context, + bool showimplicit); +static void get_agg_expr(Aggref *aggref, deparse_context *context, + Aggref *original_aggref); +static void get_agg_expr_helper(Aggref *aggref, deparse_context *context, + Aggref *original_aggref, const char *funcname, + const char *options, bool is_json_objectagg); +static void get_agg_combine_expr(Node *node, deparse_context *context, + void *callback_arg); +static void get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context); +static void get_windowfunc_expr_helper(WindowFunc *wfunc, deparse_context *context, + const char *funcname, const char *options, + bool is_json_objectagg); +static bool get_func_sql_syntax(FuncExpr *expr, deparse_context *context); +static void get_coercion_expr(Node *arg, deparse_context *context, + Oid resulttype, int32 resulttypmod, + Node *parentNode); +static void get_const_expr(Const *constval, deparse_context *context, + int showtype); +static void get_const_collation(Const *constval, deparse_context *context); +static void get_json_format(JsonFormat *format, StringInfo buf); +static void get_json_returning(JsonReturning *returning, StringInfo buf, + bool json_format_by_default); +static void get_json_constructor(JsonConstructorExpr *ctor, + deparse_context *context, bool showimplicit); +static void get_json_constructor_options(JsonConstructorExpr *ctor, + StringInfo buf); +static void get_json_agg_constructor(JsonConstructorExpr *ctor, + deparse_context *context, + const char *funcname, + bool is_json_objectagg); +static void simple_quote_literal(StringInfo buf, const char *val); +static void get_sublink_expr(SubLink *sublink, deparse_context *context); +static void get_tablefunc(TableFunc *tf, deparse_context *context, + bool showimplicit); +static void get_from_clause(Query *query, const char *prefix, + deparse_context *context); +static void get_from_clause_item(Node *jtnode, Query *query, + deparse_context *context); +static void get_rte_alias(RangeTblEntry *rte, int varno, bool use_as, + deparse_context *context); +static void get_column_alias_list(deparse_columns *colinfo, + deparse_context *context); +static void get_from_clause_coldeflist(RangeTblFunction *rtfunc, + deparse_columns *colinfo, + deparse_context *context); +static void get_tablesample_def(TableSampleClause *tablesample, + deparse_context *context); +static void get_opclass_name(Oid opclass, Oid actual_datatype, + StringInfo buf); +static Node *processIndirection(Node *node, deparse_context *context); +static void printSubscripts(SubscriptingRef *aref, deparse_context *context); +static char *get_relation_name(Oid relid); +static char *generate_relation_or_shard_name(Oid relid, Oid distrelid, + int64 shardid, List *namespaces); +static char *generate_rte_shard_name(RangeTblEntry *rangeTableEntry); +static char *generate_fragment_name(char *schemaName, char *tableName); +static char *generate_function_name(Oid funcid, int nargs, + List *argnames, Oid *argtypes, + bool has_variadic, bool *use_variadic_p, + ParseExprKind special_exprkind); +static List *get_insert_column_names_list(List *targetList, StringInfo buf, deparse_context *context, RangeTblEntry *rte); +static void get_json_path_spec(Node *path_spec, deparse_context *context, + bool showimplicit); +static void get_json_table_columns(TableFunc *tf, JsonTablePathScan *scan, + deparse_context *context, + bool showimplicit); +static void get_json_table_nested_columns(TableFunc *tf, JsonTablePlan *plan, + deparse_context *context, + bool showimplicit, + bool needcomma); + +#define only_marker(rte) ((rte)->inh ? "" : "ONLY ") + + + +/* + * pg_get_query_def parses back one query tree, and outputs the resulting query + * string into given buffer. + */ +void +pg_get_query_def(Query *query, StringInfo buffer) +{ + get_query_def(query, buffer, NIL, NULL, false, 0, WRAP_COLUMN_DEFAULT, 0); +} + +/* + * get_merged_argument_list merges both the IN and OUT arguments lists into one and + * also eliminates the INOUT duplicates(present in both the lists). After merging both + * the lists, it returns all the named-arguments in a list(mergedNamedArgList) along + * with their types(mergedNamedArgTypes), final argument list(mergedArgumentList), and + * the total number of arguments(totalArguments). + */ +bool +get_merged_argument_list(CallStmt *stmt, List **mergedNamedArgList, + Oid **mergedNamedArgTypes, + List **mergedArgumentList, + int *totalArguments) +{ + + Oid functionOid = stmt->funcexpr->funcid; + List *namedArgList = NIL; + List *finalArgumentList = NIL; + Oid *finalArgTypes; + Oid *argTypes = NULL; + char *argModes = NULL; + char **argNames = NULL; + int argIndex = 0; + + HeapTuple proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(functionOid)); + if (!HeapTupleIsValid(proctup)) + { + elog(ERROR, "cache lookup failed for function %u", functionOid); + } + + int defArgs = get_func_arg_info(proctup, &argTypes, &argNames, &argModes); + ReleaseSysCache(proctup); + + if (argModes == NULL) + { + /* No OUT arguments */ + return false; + } + + /* + * Passed arguments Includes IN, OUT, INOUT (in both the lists) and VARIADIC arguments, + * which means INOUT arguments are double counted. + */ + int numberOfArgs = list_length(stmt->funcexpr->args) + list_length(stmt->outargs); + int totalInoutArgs = 0; + + /* Let's count INOUT arguments from the defined number of arguments */ + for (argIndex=0; argIndex < defArgs; ++argIndex) + { + if (argModes[argIndex] == PROARGMODE_INOUT) + totalInoutArgs++; + } + + /* Remove the duplicate INOUT counting */ + numberOfArgs = numberOfArgs - totalInoutArgs; + finalArgTypes = palloc0(sizeof(Oid) * numberOfArgs); + + ListCell *inArgCell = list_head(stmt->funcexpr->args); + ListCell *outArgCell = list_head(stmt->outargs); + + for (argIndex=0; argIndex < numberOfArgs; ++argIndex) + { + switch (argModes[argIndex]) + { + case PROARGMODE_IN: + case PROARGMODE_VARIADIC: + { + Node *arg = (Node *) lfirst(inArgCell); + + if (IsA(arg, NamedArgExpr)) + namedArgList = lappend(namedArgList, ((NamedArgExpr *) arg)->name); + finalArgTypes[argIndex] = exprType(arg); + finalArgumentList = lappend(finalArgumentList, arg); + inArgCell = lnext(stmt->funcexpr->args, inArgCell); + break; + } + + case PROARGMODE_OUT: + { + Node *arg = (Node *) lfirst(outArgCell); + + if (IsA(arg, NamedArgExpr)) + namedArgList = lappend(namedArgList, ((NamedArgExpr *) arg)->name); + finalArgTypes[argIndex] = exprType(arg); + finalArgumentList = lappend(finalArgumentList, arg); + outArgCell = lnext(stmt->outargs, outArgCell); + break; + } + + case PROARGMODE_INOUT: + { + Node *arg = (Node *) lfirst(inArgCell); + + if (IsA(arg, NamedArgExpr)) + namedArgList = lappend(namedArgList, ((NamedArgExpr *) arg)->name); + finalArgTypes[argIndex] = exprType(arg); + finalArgumentList = lappend(finalArgumentList, arg); + inArgCell = lnext(stmt->funcexpr->args, inArgCell); + outArgCell = lnext(stmt->outargs, outArgCell); + break; + } + + case PROARGMODE_TABLE: + default: + { + elog(ERROR, "Unhandled procedure argument mode[%d]", argModes[argIndex]); + break; + } + } + } + + /* + * After eliminating INOUT duplicates and merging OUT arguments, we now + * have the final list of arguments. + */ + if (defArgs != list_length(finalArgumentList)) + { + elog(ERROR, "Insufficient number of args passed[%d] for function[%s]", + list_length(finalArgumentList), + get_func_name(functionOid)); + } + + if (list_length(finalArgumentList) > FUNC_MAX_ARGS) + { + ereport(ERROR, + (errcode(ERRCODE_TOO_MANY_ARGUMENTS), + errmsg("too many arguments[%d] for function[%s]", + list_length(finalArgumentList), + get_func_name(functionOid)))); + } + + *mergedNamedArgList = namedArgList; + *mergedNamedArgTypes = finalArgTypes; + *mergedArgumentList = finalArgumentList; + *totalArguments = numberOfArgs; + + return true; +} + +/* + * pg_get_rule_expr deparses an expression and returns the result as a string. + */ +char * +pg_get_rule_expr(Node *expression) +{ + bool showImplicitCasts = true; + deparse_context context; + StringInfo buffer = makeStringInfo(); + + /* + * Set search_path to NIL so that all objects outside of pg_catalog will be + * schema-prefixed. pg_catalog will be added automatically when we call + * PushEmptySearchPath(). + */ + int saveNestLevel = PushEmptySearchPath(); + + context.buf = buffer; + context.namespaces = NIL; + context.windowClause = NIL; + context.windowTList = NIL; + context.varprefix = false; + context.prettyFlags = 0; + context.wrapColumn = WRAP_COLUMN_DEFAULT; + context.indentLevel = 0; + context.special_exprkind = EXPR_KIND_NONE; + context.distrelid = InvalidOid; + context.shardid = INVALID_SHARD_ID; + + get_rule_expr(expression, &context, showImplicitCasts); + + /* revert back to original search_path */ + PopEmptySearchPath(saveNestLevel); + + return buffer->data; +} + +/* + * set_rtable_names: select RTE aliases to be used in printing a query + * + * We fill in dpns->rtable_names with a list of names that is one-for-one with + * the already-filled dpns->rtable list. Each RTE name is unique among those + * in the new namespace plus any ancestor namespaces listed in + * parent_namespaces. + * + * If rels_used isn't NULL, only RTE indexes listed in it are given aliases. + * + * Note that this function is only concerned with relation names, not column + * names. + */ +static void +set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, + Bitmapset *rels_used) +{ + HASHCTL hash_ctl; + HTAB *names_hash; + NameHashEntry *hentry; + bool found; + int rtindex; + ListCell *lc; + + dpns->rtable_names = NIL; + /* nothing more to do if empty rtable */ + if (dpns->rtable == NIL) + return; + + /* + * We use a hash table to hold known names, so that this process is O(N) + * not O(N^2) for N names. + */ + hash_ctl.keysize = NAMEDATALEN; + hash_ctl.entrysize = sizeof(NameHashEntry); + hash_ctl.hcxt = CurrentMemoryContext; + names_hash = hash_create("set_rtable_names names", + list_length(dpns->rtable), + &hash_ctl, + HASH_ELEM | HASH_STRINGS | HASH_CONTEXT); + + /* Preload the hash table with names appearing in parent_namespaces */ + foreach(lc, parent_namespaces) + { + deparse_namespace *olddpns = (deparse_namespace *) lfirst(lc); + ListCell *lc2; + + foreach(lc2, olddpns->rtable_names) + { + char *oldname = (char *) lfirst(lc2); + + if (oldname == NULL) + continue; + hentry = (NameHashEntry *) hash_search(names_hash, + oldname, + HASH_ENTER, + &found); + /* we do not complain about duplicate names in parent namespaces */ + hentry->counter = 0; + } + } + + /* Now we can scan the rtable */ + rtindex = 1; + foreach(lc, dpns->rtable) + { + RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); + char *refname; + + /* Just in case this takes an unreasonable amount of time ... */ + CHECK_FOR_INTERRUPTS(); + + if (rels_used && !bms_is_member(rtindex, rels_used)) + { + /* Ignore unreferenced RTE */ + refname = NULL; + } + else if (rte->alias) + { + /* If RTE has a user-defined alias, prefer that */ + refname = rte->alias->aliasname; + } + else if (rte->rtekind == RTE_RELATION) + { + /* Use the current actual name of the relation */ + refname = get_rel_name(rte->relid); + } + else if (rte->rtekind == RTE_JOIN) + { + /* Unnamed join has no refname */ + refname = NULL; + } + else + { + /* Otherwise use whatever the parser assigned */ + refname = rte->eref->aliasname; + } + + /* + * If the selected name isn't unique, append digits to make it so, and + * make a new hash entry for it once we've got a unique name. For a + * very long input name, we might have to truncate to stay within + * NAMEDATALEN. + */ + if (refname) + { + hentry = (NameHashEntry *) hash_search(names_hash, + refname, + HASH_ENTER, + &found); + if (found) + { + /* Name already in use, must choose a new one */ + int refnamelen = strlen(refname); + char *modname = (char *) palloc(refnamelen + 16); + NameHashEntry *hentry2; + + do + { + hentry->counter++; + for (;;) + { + memcpy(modname, refname, refnamelen); + sprintf(modname + refnamelen, "_%d", hentry->counter); + if (strlen(modname) < NAMEDATALEN) + break; + /* drop chars from refname to keep all the digits */ + refnamelen = pg_mbcliplen(refname, refnamelen, + refnamelen - 1); + } + hentry2 = (NameHashEntry *) hash_search(names_hash, + modname, + HASH_ENTER, + &found); + } while (found); + hentry2->counter = 0; /* init new hash entry */ + refname = modname; + } + else + { + /* Name not previously used, need only initialize hentry */ + hentry->counter = 0; + } + } + + dpns->rtable_names = lappend(dpns->rtable_names, refname); + rtindex++; + } + + hash_destroy(names_hash); +} + +/* + * set_deparse_for_query: set up deparse_namespace for deparsing a Query tree + * + * For convenience, this is defined to initialize the deparse_namespace struct + * from scratch. + */ +static void +set_deparse_for_query(deparse_namespace *dpns, Query *query, + List *parent_namespaces) +{ + ListCell *lc; + ListCell *lc2; + + /* Initialize *dpns and fill rtable/ctes links */ + memset(dpns, 0, sizeof(deparse_namespace)); + dpns->rtable = query->rtable; + dpns->subplans = NIL; + dpns->ctes = query->cteList; + dpns->appendrels = NULL; + + /* Assign a unique relation alias to each RTE */ + set_rtable_names(dpns, parent_namespaces, NULL); + + /* Initialize dpns->rtable_columns to contain zeroed structs */ + dpns->rtable_columns = NIL; + while (list_length(dpns->rtable_columns) < list_length(dpns->rtable)) + dpns->rtable_columns = lappend(dpns->rtable_columns, + palloc0(sizeof(deparse_columns))); + + /* If it's a utility query, it won't have a jointree */ + if (query->jointree) + { + /* Detect whether global uniqueness of USING names is needed */ + dpns->unique_using = + has_dangerous_join_using(dpns, (Node *) query->jointree); + + /* + * Select names for columns merged by USING, via a recursive pass over + * the query jointree. + */ + set_using_names(dpns, (Node *) query->jointree, NIL); + } + + /* + * Now assign remaining column aliases for each RTE. We do this in a + * linear scan of the rtable, so as to process RTEs whether or not they + * are in the jointree (we mustn't miss NEW.*, INSERT target relations, + * etc). JOIN RTEs must be processed after their children, but this is + * okay because they appear later in the rtable list than their children + * (cf Asserts in identify_join_columns()). + */ + forboth(lc, dpns->rtable, lc2, dpns->rtable_columns) + { + RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); + deparse_columns *colinfo = (deparse_columns *) lfirst(lc2); + + if (rte->rtekind == RTE_JOIN) + set_join_column_names(dpns, rte, colinfo); + else + set_relation_column_names(dpns, rte, colinfo); + } +} + +/* + * has_dangerous_join_using: search jointree for unnamed JOIN USING + * + * Merged columns of a JOIN USING may act differently from either of the input + * columns, either because they are merged with COALESCE (in a FULL JOIN) or + * because an implicit coercion of the underlying input column is required. + * In such a case the column must be referenced as a column of the JOIN not as + * a column of either input. And this is problematic if the join is unnamed + * (alias-less): we cannot qualify the column's name with an RTE name, since + * there is none. (Forcibly assigning an alias to the join is not a solution, + * since that will prevent legal references to tables below the join.) + * To ensure that every column in the query is unambiguously referenceable, + * we must assign such merged columns names that are globally unique across + * the whole query, aliasing other columns out of the way as necessary. + * + * Because the ensuing re-aliasing is fairly damaging to the readability of + * the query, we don't do this unless we have to. So, we must pre-scan + * the join tree to see if we have to, before starting set_using_names(). + */ +static bool +has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode) +{ + if (IsA(jtnode, RangeTblRef)) + { + /* nothing to do here */ + } + else if (IsA(jtnode, FromExpr)) + { + FromExpr *f = (FromExpr *) jtnode; + ListCell *lc; + + foreach(lc, f->fromlist) + { + if (has_dangerous_join_using(dpns, (Node *) lfirst(lc))) + return true; + } + } + else if (IsA(jtnode, JoinExpr)) + { + JoinExpr *j = (JoinExpr *) jtnode; + + /* Is it an unnamed JOIN with USING? */ + if (j->alias == NULL && j->usingClause) + { + /* + * Yes, so check each join alias var to see if any of them are not + * simple references to underlying columns. If so, we have a + * dangerous situation and must pick unique aliases. + */ + RangeTblEntry *jrte = rt_fetch(j->rtindex, dpns->rtable); + + /* We need only examine the merged columns */ + for (int i = 0; i < jrte->joinmergedcols; i++) + { + Node *aliasvar = list_nth(jrte->joinaliasvars, i); + + if (!IsA(aliasvar, Var)) + return true; + } + } + + /* Nope, but inspect children */ + if (has_dangerous_join_using(dpns, j->larg)) + return true; + if (has_dangerous_join_using(dpns, j->rarg)) + return true; + } + else + elog(ERROR, "unrecognized node type: %d", + (int) nodeTag(jtnode)); + return false; +} + +/* + * set_using_names: select column aliases to be used for merged USING columns + * + * We do this during a recursive descent of the query jointree. + * dpns->unique_using must already be set to determine the global strategy. + * + * Column alias info is saved in the dpns->rtable_columns list, which is + * assumed to be filled with pre-zeroed deparse_columns structs. + * + * parentUsing is a list of all USING aliases assigned in parent joins of + * the current jointree node. (The passed-in list must not be modified.) + */ +static void +set_using_names(deparse_namespace *dpns, Node *jtnode, List *parentUsing) +{ + if (IsA(jtnode, RangeTblRef)) + { + /* nothing to do now */ + } + else if (IsA(jtnode, FromExpr)) + { + FromExpr *f = (FromExpr *) jtnode; + ListCell *lc; + + foreach(lc, f->fromlist) + set_using_names(dpns, (Node *) lfirst(lc), parentUsing); + } + else if (IsA(jtnode, JoinExpr)) + { + JoinExpr *j = (JoinExpr *) jtnode; + RangeTblEntry *rte = rt_fetch(j->rtindex, dpns->rtable); + deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns); + int *leftattnos; + int *rightattnos; + deparse_columns *leftcolinfo; + deparse_columns *rightcolinfo; + int i; + ListCell *lc; + + /* Get info about the shape of the join */ + identify_join_columns(j, rte, colinfo); + leftattnos = colinfo->leftattnos; + rightattnos = colinfo->rightattnos; + + /* Look up the not-yet-filled-in child deparse_columns structs */ + leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns); + rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns); + + /* + * If this join is unnamed, then we cannot substitute new aliases at + * this level, so any name requirements pushed down to here must be + * pushed down again to the children. + */ + if (rte->alias == NULL) + { + for (i = 0; i < colinfo->num_cols; i++) + { + char *colname = colinfo->colnames[i]; + + if (colname == NULL) + continue; + + /* Push down to left column, unless it's a system column */ + if (leftattnos[i] > 0) + { + expand_colnames_array_to(leftcolinfo, leftattnos[i]); + leftcolinfo->colnames[leftattnos[i] - 1] = colname; + } + + /* Same on the righthand side */ + if (rightattnos[i] > 0) + { + expand_colnames_array_to(rightcolinfo, rightattnos[i]); + rightcolinfo->colnames[rightattnos[i] - 1] = colname; + } + } + } + + /* + * If there's a USING clause, select the USING column names and push + * those names down to the children. We have two strategies: + * + * If dpns->unique_using is true, we force all USING names to be + * unique across the whole query level. In principle we'd only need + * the names of dangerous USING columns to be globally unique, but to + * safely assign all USING names in a single pass, we have to enforce + * the same uniqueness rule for all of them. However, if a USING + * column's name has been pushed down from the parent, we should use + * it as-is rather than making a uniqueness adjustment. This is + * necessary when we're at an unnamed join, and it creates no risk of + * ambiguity. Also, if there's a user-written output alias for a + * merged column, we prefer to use that rather than the input name; + * this simplifies the logic and seems likely to lead to less aliasing + * overall. + * + * If dpns->unique_using is false, we only need USING names to be + * unique within their own join RTE. We still need to honor + * pushed-down names, though. + * + * Though significantly different in results, these two strategies are + * implemented by the same code, with only the difference of whether + * to put assigned names into dpns->using_names. + */ + if (j->usingClause) + { + /* Copy the input parentUsing list so we don't modify it */ + parentUsing = list_copy(parentUsing); + + /* USING names must correspond to the first join output columns */ + expand_colnames_array_to(colinfo, list_length(j->usingClause)); + i = 0; + foreach(lc, j->usingClause) + { + char *colname = strVal(lfirst(lc)); + + /* Assert it's a merged column */ + Assert(leftattnos[i] != 0 && rightattnos[i] != 0); + + /* Adopt passed-down name if any, else select unique name */ + if (colinfo->colnames[i] != NULL) + colname = colinfo->colnames[i]; + else + { + /* Prefer user-written output alias if any */ + if (rte->alias && i < list_length(rte->alias->colnames)) + colname = strVal(list_nth(rte->alias->colnames, i)); + /* Make it appropriately unique */ + colname = make_colname_unique(colname, dpns, colinfo); + if (dpns->unique_using) + dpns->using_names = lappend(dpns->using_names, + colname); + /* Save it as output column name, too */ + colinfo->colnames[i] = colname; + } + + /* Remember selected names for use later */ + colinfo->usingNames = lappend(colinfo->usingNames, colname); + parentUsing = lappend(parentUsing, colname); + + /* Push down to left column, unless it's a system column */ + if (leftattnos[i] > 0) + { + expand_colnames_array_to(leftcolinfo, leftattnos[i]); + leftcolinfo->colnames[leftattnos[i] - 1] = colname; + } + + /* Same on the righthand side */ + if (rightattnos[i] > 0) + { + expand_colnames_array_to(rightcolinfo, rightattnos[i]); + rightcolinfo->colnames[rightattnos[i] - 1] = colname; + } + + i++; + } + } + + /* Mark child deparse_columns structs with correct parentUsing info */ + leftcolinfo->parentUsing = parentUsing; + rightcolinfo->parentUsing = parentUsing; + + /* Now recursively assign USING column names in children */ + set_using_names(dpns, j->larg, parentUsing); + set_using_names(dpns, j->rarg, parentUsing); + } + else + elog(ERROR, "unrecognized node type: %d", + (int) nodeTag(jtnode)); +} + +/* + * set_relation_column_names: select column aliases for a non-join RTE + * + * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed. + * If any colnames entries are already filled in, those override local + * choices. + */ +static void +set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte, + deparse_columns *colinfo) +{ + int ncolumns; + char **real_colnames; + bool changed_any; + bool has_anonymous; + int noldcolumns; + int i; + int j; + + /* + * Construct an array of the current "real" column names of the RTE. + * real_colnames[] will be indexed by physical column number, with NULL + * entries for dropped columns. + */ + if (rte->rtekind == RTE_RELATION || + GetRangeTblKind(rte) == CITUS_RTE_SHARD) + { + /* Relation --- look to the system catalogs for up-to-date info */ + Relation rel; + TupleDesc tupdesc; + + rel = relation_open(rte->relid, AccessShareLock); + tupdesc = RelationGetDescr(rel); + + ncolumns = tupdesc->natts; + real_colnames = (char **) palloc(ncolumns * sizeof(char *)); + + for (i = 0; i < ncolumns; i++) + { + Form_pg_attribute attr = TupleDescAttr(tupdesc, i); + + if (attr->attisdropped) + real_colnames[i] = NULL; + else + real_colnames[i] = pstrdup(NameStr(attr->attname)); + } + relation_close(rel, AccessShareLock); + } + else + { + /* Otherwise get the column names from eref or expandRTE() */ + List *colnames; + ListCell *lc; + + /* + * Functions returning composites have the annoying property that some + * of the composite type's columns might have been dropped since the + * query was parsed. If possible, use expandRTE() to handle that + * case, since it has the tedious logic needed to find out about + * dropped columns. However, if we're explaining a plan, then we + * don't have rte->functions because the planner thinks that won't be + * needed later, and that breaks expandRTE(). So in that case we have + * to rely on rte->eref, which may lead us to report a dropped + * column's old name; that seems close enough for EXPLAIN's purposes. + * + * For non-RELATION, non-FUNCTION RTEs, we can just look at rte->eref, + * which should be sufficiently up-to-date: no other RTE types can + * have columns get dropped from under them after parsing. + */ + if (rte->rtekind == RTE_FUNCTION && rte->functions != NIL) + { + /* Since we're not creating Vars, rtindex etc. don't matter */ + expandRTE(rte, 1, 0, -1, true /* include dropped */ , + &colnames, NULL); + } + else + colnames = rte->eref->colnames; + + ncolumns = list_length(colnames); + real_colnames = (char **) palloc(ncolumns * sizeof(char *)); + + i = 0; + foreach(lc, colnames) + { + /* + * If the column name we find here is an empty string, then it's a + * dropped column, so change to NULL. + */ + char *cname = strVal(lfirst(lc)); + + if (cname[0] == '\0') + cname = NULL; + real_colnames[i] = cname; + i++; + } + } + + /* + * Ensure colinfo->colnames has a slot for each column. (It could be long + * enough already, if we pushed down a name for the last column.) Note: + * it's possible that there are now more columns than there were when the + * query was parsed, ie colnames could be longer than rte->eref->colnames. + * We must assign unique aliases to the new columns too, else there could + * be unresolved conflicts when the view/rule is reloaded. + */ + expand_colnames_array_to(colinfo, ncolumns); + Assert(colinfo->num_cols == ncolumns); + + /* + * Make sufficiently large new_colnames and is_new_col arrays, too. + * + * Note: because we leave colinfo->num_new_cols zero until after the loop, + * colname_is_unique will not consult that array, which is fine because it + * would only be duplicate effort. + */ + colinfo->new_colnames = (char **) palloc(ncolumns * sizeof(char *)); + colinfo->is_new_col = (bool *) palloc(ncolumns * sizeof(bool)); + + /* + * Scan the columns, select a unique alias for each one, and store it in + * colinfo->colnames and colinfo->new_colnames. The former array has NULL + * entries for dropped columns, the latter omits them. Also mark + * new_colnames entries as to whether they are new since parse time; this + * is the case for entries beyond the length of rte->eref->colnames. + */ + noldcolumns = list_length(rte->eref->colnames); + changed_any = false; + has_anonymous = false; + j = 0; + for (i = 0; i < ncolumns; i++) + { + char *real_colname = real_colnames[i]; + char *colname = colinfo->colnames[i]; + + /* Skip dropped columns */ + if (real_colname == NULL) + { + Assert(colname == NULL); /* colnames[i] is already NULL */ + continue; + } + + /* If alias already assigned, that's what to use */ + if (colname == NULL) + { + /* If user wrote an alias, prefer that over real column name */ + if (rte->alias && i < list_length(rte->alias->colnames)) + colname = strVal(list_nth(rte->alias->colnames, i)); + else + colname = real_colname; + + /* Unique-ify and insert into colinfo */ + colname = make_colname_unique(colname, dpns, colinfo); + + colinfo->colnames[i] = colname; + } + + /* Put names of non-dropped columns in new_colnames[] too */ + colinfo->new_colnames[j] = colname; + /* And mark them as new or not */ + colinfo->is_new_col[j] = (i >= noldcolumns); + j++; + + /* Remember if any assigned aliases differ from "real" name */ + if (!changed_any && strcmp(colname, real_colname) != 0) + changed_any = true; + + /* + * Remember if there is a reference to an anonymous column as named by + * char * FigureColname(Node *node) + */ + if (!has_anonymous && strcmp(real_colname, "?column?") == 0) + has_anonymous = true; + } + + /* + * Set correct length for new_colnames[] array. (Note: if columns have + * been added, colinfo->num_cols includes them, which is not really quite + * right but is harmless, since any new columns must be at the end where + * they won't affect varattnos of pre-existing columns.) + */ + colinfo->num_new_cols = j; + + /* + * For a relation RTE, we need only print the alias column names if any + * are different from the underlying "real" names. For a function RTE, + * always emit a complete column alias list; this is to protect against + * possible instability of the default column names (eg, from altering + * parameter names). For tablefunc RTEs, we never print aliases, because + * the column names are part of the clause itself. For other RTE types, + * print if we changed anything OR if there were user-written column + * aliases (since the latter would be part of the underlying "reality"). + */ + if (rte->rtekind == RTE_RELATION) + colinfo->printaliases = changed_any; + else if (rte->rtekind == RTE_FUNCTION) + colinfo->printaliases = true; + else if (rte->rtekind == RTE_TABLEFUNC) + colinfo->printaliases = false; + else if (rte->alias && rte->alias->colnames != NIL) + colinfo->printaliases = true; + else + colinfo->printaliases = changed_any || has_anonymous; +} + +/* + * set_join_column_names: select column aliases for a join RTE + * + * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed. + * If any colnames entries are already filled in, those override local + * choices. Also, names for USING columns were already chosen by + * set_using_names(). We further expect that column alias selection has been + * completed for both input RTEs. + */ +static void +set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, + deparse_columns *colinfo) +{ + deparse_columns *leftcolinfo; + deparse_columns *rightcolinfo; + bool changed_any; + int noldcolumns; + int nnewcolumns; + Bitmapset *leftmerged = NULL; + Bitmapset *rightmerged = NULL; + int i; + int j; + int ic; + int jc; + + /* Look up the previously-filled-in child deparse_columns structs */ + leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns); + rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns); + + /* + * Ensure colinfo->colnames has a slot for each column. (It could be long + * enough already, if we pushed down a name for the last column.) Note: + * it's possible that one or both inputs now have more columns than there + * were when the query was parsed, but we'll deal with that below. We + * only need entries in colnames for pre-existing columns. + */ + noldcolumns = list_length(rte->eref->colnames); + expand_colnames_array_to(colinfo, noldcolumns); + Assert(colinfo->num_cols == noldcolumns); + + /* + * Scan the join output columns, select an alias for each one, and store + * it in colinfo->colnames. If there are USING columns, set_using_names() + * already selected their names, so we can start the loop at the first + * non-merged column. + */ + changed_any = false; + for (i = list_length(colinfo->usingNames); i < noldcolumns; i++) + { + char *colname = colinfo->colnames[i]; + char *real_colname; + + /* Join column must refer to at least one input column */ + Assert(colinfo->leftattnos[i] != 0 || colinfo->rightattnos[i] != 0); + + /* Get the child column name */ + if (colinfo->leftattnos[i] > 0) + real_colname = leftcolinfo->colnames[colinfo->leftattnos[i] - 1]; + else if (colinfo->rightattnos[i] > 0) + real_colname = rightcolinfo->colnames[colinfo->rightattnos[i] - 1]; + else + { + /* We're joining system columns --- use eref name */ + real_colname = strVal(list_nth(rte->eref->colnames, i)); + } + /* If child col has been dropped, no need to assign a join colname */ + if (real_colname == NULL) + { + colinfo->colnames[i] = NULL; + continue; + } + + /* In an unnamed join, just report child column names as-is */ + if (rte->alias == NULL) + { + colinfo->colnames[i] = real_colname; + continue; + } + + /* If alias already assigned, that's what to use */ + if (colname == NULL) + { + /* If user wrote an alias, prefer that over real column name */ + if (rte->alias && i < list_length(rte->alias->colnames)) + colname = strVal(list_nth(rte->alias->colnames, i)); + else + colname = real_colname; + + /* Unique-ify and insert into colinfo */ + colname = make_colname_unique(colname, dpns, colinfo); + + colinfo->colnames[i] = colname; + } + + /* Remember if any assigned aliases differ from "real" name */ + if (!changed_any && strcmp(colname, real_colname) != 0) + changed_any = true; + } + + /* + * Calculate number of columns the join would have if it were re-parsed + * now, and create storage for the new_colnames and is_new_col arrays. + * + * Note: colname_is_unique will be consulting new_colnames[] during the + * loops below, so its not-yet-filled entries must be zeroes. + */ + nnewcolumns = leftcolinfo->num_new_cols + rightcolinfo->num_new_cols - + list_length(colinfo->usingNames); + colinfo->num_new_cols = nnewcolumns; + colinfo->new_colnames = (char **) palloc0(nnewcolumns * sizeof(char *)); + colinfo->is_new_col = (bool *) palloc0(nnewcolumns * sizeof(bool)); + + /* + * Generating the new_colnames array is a bit tricky since any new columns + * added since parse time must be inserted in the right places. This code + * must match the parser, which will order a join's columns as merged + * columns first (in USING-clause order), then non-merged columns from the + * left input (in attnum order), then non-merged columns from the right + * input (ditto). If one of the inputs is itself a join, its columns will + * be ordered according to the same rule, which means newly-added columns + * might not be at the end. We can figure out what's what by consulting + * the leftattnos and rightattnos arrays plus the input is_new_col arrays. + * + * In these loops, i indexes leftattnos/rightattnos (so it's join varattno + * less one), j indexes new_colnames/is_new_col, and ic/jc have similar + * meanings for the current child RTE. + */ + + /* Handle merged columns; they are first and can't be new */ + i = j = 0; + while (i < noldcolumns && + colinfo->leftattnos[i] != 0 && + colinfo->rightattnos[i] != 0) + { + /* column name is already determined and known unique */ + colinfo->new_colnames[j] = colinfo->colnames[i]; + colinfo->is_new_col[j] = false; + + /* build bitmapsets of child attnums of merged columns */ + if (colinfo->leftattnos[i] > 0) + leftmerged = bms_add_member(leftmerged, colinfo->leftattnos[i]); + if (colinfo->rightattnos[i] > 0) + rightmerged = bms_add_member(rightmerged, colinfo->rightattnos[i]); + + i++, j++; + } + + /* Handle non-merged left-child columns */ + ic = 0; + for (jc = 0; jc < leftcolinfo->num_new_cols; jc++) + { + char *child_colname = leftcolinfo->new_colnames[jc]; + + if (!leftcolinfo->is_new_col[jc]) + { + /* Advance ic to next non-dropped old column of left child */ + while (ic < leftcolinfo->num_cols && + leftcolinfo->colnames[ic] == NULL) + ic++; + Assert(ic < leftcolinfo->num_cols); + ic++; + /* If it is a merged column, we already processed it */ + if (bms_is_member(ic, leftmerged)) + continue; + /* Else, advance i to the corresponding existing join column */ + while (i < colinfo->num_cols && + colinfo->colnames[i] == NULL) + i++; + Assert(i < colinfo->num_cols); + Assert(ic == colinfo->leftattnos[i]); + /* Use the already-assigned name of this column */ + colinfo->new_colnames[j] = colinfo->colnames[i]; + i++; + } + else + { + /* + * Unique-ify the new child column name and assign, unless we're + * in an unnamed join, in which case just copy + */ + if (rte->alias != NULL) + { + colinfo->new_colnames[j] = + make_colname_unique(child_colname, dpns, colinfo); + if (!changed_any && + strcmp(colinfo->new_colnames[j], child_colname) != 0) + changed_any = true; + } + else + colinfo->new_colnames[j] = child_colname; + } + + colinfo->is_new_col[j] = leftcolinfo->is_new_col[jc]; + j++; + } + + /* Handle non-merged right-child columns in exactly the same way */ + ic = 0; + for (jc = 0; jc < rightcolinfo->num_new_cols; jc++) + { + char *child_colname = rightcolinfo->new_colnames[jc]; + + if (!rightcolinfo->is_new_col[jc]) + { + /* Advance ic to next non-dropped old column of right child */ + while (ic < rightcolinfo->num_cols && + rightcolinfo->colnames[ic] == NULL) + ic++; + Assert(ic < rightcolinfo->num_cols); + ic++; + /* If it is a merged column, we already processed it */ + if (bms_is_member(ic, rightmerged)) + continue; + /* Else, advance i to the corresponding existing join column */ + while (i < colinfo->num_cols && + colinfo->colnames[i] == NULL) + i++; + Assert(i < colinfo->num_cols); + Assert(ic == colinfo->rightattnos[i]); + /* Use the already-assigned name of this column */ + colinfo->new_colnames[j] = colinfo->colnames[i]; + i++; + } + else + { + /* + * Unique-ify the new child column name and assign, unless we're + * in an unnamed join, in which case just copy + */ + if (rte->alias != NULL) + { + colinfo->new_colnames[j] = + make_colname_unique(child_colname, dpns, colinfo); + if (!changed_any && + strcmp(colinfo->new_colnames[j], child_colname) != 0) + changed_any = true; + } + else + colinfo->new_colnames[j] = child_colname; + } + + colinfo->is_new_col[j] = rightcolinfo->is_new_col[jc]; + j++; + } + + /* Assert we processed the right number of columns */ +#ifdef USE_ASSERT_CHECKING + for (int col_index = 0; col_index < colinfo->num_cols; col_index++) + { + /* + * In the above processing-loops, "i" advances only if + * the column is not new, check if this is a new column. + */ + if (colinfo->is_new_col[col_index]) + i++; + } + Assert(i == colinfo->num_cols); + Assert(j == nnewcolumns); +#endif + + /* + * For a named join, print column aliases if we changed any from the child + * names. Unnamed joins cannot print aliases. + */ + if (rte->alias != NULL) + colinfo->printaliases = changed_any; + else + colinfo->printaliases = false; +} + +/* + * colname_is_unique: is colname distinct from already-chosen column names? + * + * dpns is query-wide info, colinfo is for the column's RTE + */ +static bool +colname_is_unique(const char *colname, deparse_namespace *dpns, + deparse_columns *colinfo) +{ + int i; + ListCell *lc; + + /* Check against already-assigned column aliases within RTE */ + for (i = 0; i < colinfo->num_cols; i++) + { + char *oldname = colinfo->colnames[i]; + + if (oldname && strcmp(oldname, colname) == 0) + return false; + } + + /* + * If we're building a new_colnames array, check that too (this will be + * partially but not completely redundant with the previous checks) + */ + for (i = 0; i < colinfo->num_new_cols; i++) + { + char *oldname = colinfo->new_colnames[i]; + + if (oldname && strcmp(oldname, colname) == 0) + return false; + } + + /* Also check against USING-column names that must be globally unique */ + foreach(lc, dpns->using_names) + { + char *oldname = (char *) lfirst(lc); + + if (strcmp(oldname, colname) == 0) + return false; + } + + /* Also check against names already assigned for parent-join USING cols */ + foreach(lc, colinfo->parentUsing) + { + char *oldname = (char *) lfirst(lc); + + if (strcmp(oldname, colname) == 0) + return false; + } + + return true; +} + +/* + * make_colname_unique: modify colname if necessary to make it unique + * + * dpns is query-wide info, colinfo is for the column's RTE + */ +static char * +make_colname_unique(char *colname, deparse_namespace *dpns, + deparse_columns *colinfo) +{ + /* + * If the selected name isn't unique, append digits to make it so. For a + * very long input name, we might have to truncate to stay within + * NAMEDATALEN. + */ + if (!colname_is_unique(colname, dpns, colinfo)) + { + int colnamelen = strlen(colname); + char *modname = (char *) palloc(colnamelen + 16); + int i = 0; + + do + { + i++; + for (;;) + { + memcpy(modname, colname, colnamelen); + sprintf(modname + colnamelen, "_%d", i); + if (strlen(modname) < NAMEDATALEN) + break; + /* drop chars from colname to keep all the digits */ + colnamelen = pg_mbcliplen(colname, colnamelen, + colnamelen - 1); + } + } while (!colname_is_unique(modname, dpns, colinfo)); + colname = modname; + } + return colname; +} + +/* + * expand_colnames_array_to: make colinfo->colnames at least n items long + * + * Any added array entries are initialized to zero. + */ +static void +expand_colnames_array_to(deparse_columns *colinfo, int n) +{ + if (n > colinfo->num_cols) + { + if (colinfo->colnames == NULL) + colinfo->colnames = palloc0_array(char *, n); + else + { + colinfo->colnames = repalloc0_array(colinfo->colnames, char *, colinfo->num_cols, n); + } + colinfo->num_cols = n; + } +} + +/* + * identify_join_columns: figure out where columns of a join come from + * + * Fills the join-specific fields of the colinfo struct, except for + * usingNames which is filled later. + */ +static void +identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, + deparse_columns *colinfo) +{ + int numjoincols; + int jcolno; + int rcolno; + ListCell *lc; + + /* Extract left/right child RT indexes */ + if (IsA(j->larg, RangeTblRef)) + colinfo->leftrti = ((RangeTblRef *) j->larg)->rtindex; + else if (IsA(j->larg, JoinExpr)) + colinfo->leftrti = ((JoinExpr *) j->larg)->rtindex; + else + elog(ERROR, "unrecognized node type in jointree: %d", + (int) nodeTag(j->larg)); + if (IsA(j->rarg, RangeTblRef)) + colinfo->rightrti = ((RangeTblRef *) j->rarg)->rtindex; + else if (IsA(j->rarg, JoinExpr)) + colinfo->rightrti = ((JoinExpr *) j->rarg)->rtindex; + else + elog(ERROR, "unrecognized node type in jointree: %d", + (int) nodeTag(j->rarg)); + + /* Assert children will be processed earlier than join in second pass */ + Assert(colinfo->leftrti < j->rtindex); + Assert(colinfo->rightrti < j->rtindex); + + /* Initialize result arrays with zeroes */ + numjoincols = list_length(jrte->joinaliasvars); + Assert(numjoincols == list_length(jrte->eref->colnames)); + colinfo->leftattnos = (int *) palloc0(numjoincols * sizeof(int)); + colinfo->rightattnos = (int *) palloc0(numjoincols * sizeof(int)); + + /* + * Deconstruct RTE's joinleftcols/joinrightcols into desired format. + * Recall that the column(s) merged due to USING are the first column(s) + * of the join output. We need not do anything special while scanning + * joinleftcols, but while scanning joinrightcols we must distinguish + * merged from unmerged columns. + */ + jcolno = 0; + foreach(lc, jrte->joinleftcols) + { + int leftattno = lfirst_int(lc); + + colinfo->leftattnos[jcolno++] = leftattno; + } + rcolno = 0; + foreach(lc, jrte->joinrightcols) + { + int rightattno = lfirst_int(lc); + + if (rcolno < jrte->joinmergedcols) /* merged column? */ + colinfo->rightattnos[rcolno] = rightattno; + else + colinfo->rightattnos[jcolno++] = rightattno; + rcolno++; + } + Assert(jcolno == numjoincols); +} + +/* + * get_rtable_name: convenience function to get a previously assigned RTE alias + * + * The RTE must belong to the topmost namespace level in "context". + */ +static char * +get_rtable_name(int rtindex, deparse_context *context) +{ + deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); + + Assert(rtindex > 0 && rtindex <= list_length(dpns->rtable_names)); + return (char *) list_nth(dpns->rtable_names, rtindex - 1); +} + +/* + * set_deparse_plan: set up deparse_namespace to parse subexpressions + * of a given Plan node + * + * This sets the plan, outer_planstate, inner_planstate, outer_tlist, + * inner_tlist, and index_tlist fields. Caller is responsible for adjusting + * the ancestors list if necessary. Note that the rtable and ctes fields do + * not need to change when shifting attention to different plan nodes in a + * single plan tree. + */ +static void +set_deparse_plan(deparse_namespace *dpns, Plan *plan) +{ + dpns->plan = plan; + + /* + * We special-case Append and MergeAppend to pretend that the first child + * plan is the OUTER referent; we have to interpret OUTER Vars in their + * tlists according to one of the children, and the first one is the most + * natural choice. + */ + if (IsA(plan, Append)) + dpns->outer_plan = linitial(((Append *) plan)->appendplans); + else if (IsA(plan, MergeAppend)) + dpns->outer_plan = linitial(((MergeAppend *) plan)->mergeplans); + else + dpns->outer_plan = outerPlan(plan); + + if (dpns->outer_plan) + dpns->outer_tlist = dpns->outer_plan->targetlist; + else + dpns->outer_tlist = NIL; + + /* + * For a SubqueryScan, pretend the subplan is INNER referent. (We don't + * use OUTER because that could someday conflict with the normal meaning.) + * Likewise, for a CteScan, pretend the subquery's plan is INNER referent. + * For a WorkTableScan, locate the parent RecursiveUnion plan node and use + * that as INNER referent. + * + * For MERGE, pretend the ModifyTable's source plan (its outer plan) is + * INNER referent. This is the join from the target relation to the data + * source, and all INNER_VAR Vars in other parts of the query refer to its + * targetlist. + * + * For ON CONFLICT .. UPDATE we just need the inner tlist to point to the + * excluded expression's tlist. (Similar to the SubqueryScan we don't want + * to reuse OUTER, it's used for RETURNING in some modify table cases, + * although not INSERT .. CONFLICT). + */ + if (IsA(plan, SubqueryScan)) + dpns->inner_plan = ((SubqueryScan *) plan)->subplan; + else if (IsA(plan, CteScan)) + dpns->inner_plan = list_nth(dpns->subplans, + ((CteScan *) plan)->ctePlanId - 1); + else if (IsA(plan, WorkTableScan)) + dpns->inner_plan = find_recursive_union(dpns, + (WorkTableScan *) plan); + else if (IsA(plan, ModifyTable)) + { + if (((ModifyTable *) plan)->operation == CMD_MERGE) + dpns->inner_plan = outerPlan(plan); + else + dpns->inner_plan = plan; + } + else + dpns->inner_plan = innerPlan(plan); + + if (IsA(plan, ModifyTable) && ((ModifyTable *) plan)->operation == CMD_INSERT) + dpns->inner_tlist = ((ModifyTable *) plan)->exclRelTlist; + else if (dpns->inner_plan) + dpns->inner_tlist = dpns->inner_plan->targetlist; + else + dpns->inner_tlist = NIL; + + /* Set up referent for INDEX_VAR Vars, if needed */ + if (IsA(plan, IndexOnlyScan)) + dpns->index_tlist = ((IndexOnlyScan *) plan)->indextlist; + else if (IsA(plan, ForeignScan)) + dpns->index_tlist = ((ForeignScan *) plan)->fdw_scan_tlist; + else if (IsA(plan, CustomScan)) + dpns->index_tlist = ((CustomScan *) plan)->custom_scan_tlist; + else + dpns->index_tlist = NIL; +} + +/* + * Locate the ancestor plan node that is the RecursiveUnion generating + * the WorkTableScan's work table. We can match on wtParam, since that + * should be unique within the plan tree. + */ +static Plan * +find_recursive_union(deparse_namespace *dpns, WorkTableScan *wtscan) +{ + ListCell *lc; + + foreach(lc, dpns->ancestors) + { + Plan *ancestor = (Plan *) lfirst(lc); + + if (IsA(ancestor, RecursiveUnion) && + ((RecursiveUnion *) ancestor)->wtParam == wtscan->wtParam) + return ancestor; + } + elog(ERROR, "could not find RecursiveUnion for WorkTableScan with wtParam %d", + wtscan->wtParam); + return NULL; +} + +/* + * push_child_plan: temporarily transfer deparsing attention to a child plan + * + * When expanding an OUTER_VAR or INNER_VAR reference, we must adjust the + * deparse context in case the referenced expression itself uses + * OUTER_VAR/INNER_VAR. We modify the top stack entry in-place to avoid + * affecting levelsup issues (although in a Plan tree there really shouldn't + * be any). + * + * Caller must provide a local deparse_namespace variable to save the + * previous state for pop_child_plan. + */ +static void +push_child_plan(deparse_namespace *dpns, Plan *plan, + deparse_namespace *save_dpns) +{ + /* Save state for restoration later */ + *save_dpns = *dpns; + + /* Link current plan node into ancestors list */ + dpns->ancestors = lcons(dpns->plan, dpns->ancestors); + + /* Set attention on selected child */ + set_deparse_plan(dpns, plan); +} + +/* + * pop_child_plan: undo the effects of push_child_plan + */ +static void +pop_child_plan(deparse_namespace *dpns, deparse_namespace *save_dpns) +{ + List *ancestors; + + /* Get rid of ancestors list cell added by push_child_plan */ + ancestors = list_delete_first(dpns->ancestors); + + /* Restore fields changed by push_child_plan */ + *dpns = *save_dpns; + + /* Make sure dpns->ancestors is right (may be unnecessary) */ + dpns->ancestors = ancestors; +} + +/* + * push_ancestor_plan: temporarily transfer deparsing attention to an + * ancestor plan + * + * When expanding a Param reference, we must adjust the deparse context + * to match the plan node that contains the expression being printed; + * otherwise we'd fail if that expression itself contains a Param or + * OUTER_VAR/INNER_VAR/INDEX_VAR variable. + * + * The target ancestor is conveniently identified by the ListCell holding it + * in dpns->ancestors. + * + * Caller must provide a local deparse_namespace variable to save the + * previous state for pop_ancestor_plan. + */ +static void +push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell, + deparse_namespace *save_dpns) +{ + Plan *plan = (Plan *) lfirst(ancestor_cell); + + /* Save state for restoration later */ + *save_dpns = *dpns; + + /* Build a new ancestor list with just this node's ancestors */ + dpns->ancestors = + list_copy_tail(dpns->ancestors, + list_cell_number(dpns->ancestors, ancestor_cell) + 1); + + /* Set attention on selected ancestor */ + set_deparse_plan(dpns, plan); +} + +/* + * pop_ancestor_plan: undo the effects of push_ancestor_plan + */ +static void +pop_ancestor_plan(deparse_namespace *dpns, deparse_namespace *save_dpns) +{ + /* Free the ancestor list made in push_ancestor_plan */ + list_free(dpns->ancestors); + + /* Restore fields changed by push_ancestor_plan */ + *dpns = *save_dpns; +} + +/* ---------- + * deparse_shard_query - Parse back a query for execution on a shard + * + * Builds an SQL string to perform the provided query on a specific shard and + * places this string into the provided buffer. + * ---------- + */ +void +deparse_shard_query(Query *query, Oid distrelid, int64 shardid, + StringInfo buffer) +{ + get_query_def_extended(query, buffer, NIL, distrelid, shardid, NULL, + false, + 0, WRAP_COLUMN_DEFAULT, 0); +} + +/* ---------- + * get_query_def - Parse back one query parsetree + * + * query: parsetree to be displayed + * buf: output text is appended to buf + * parentnamespace: list (initially empty) of outer-level deparse_namespace's + * resultDesc: if not NULL, the output tuple descriptor for the view + * represented by a SELECT query. We use the column names from it + * to label SELECT output columns, in preference to names in the query + * colNamesVisible: true if the surrounding context cares about the output + * column names at all (as, for example, an EXISTS() context does not); + * when false, we can suppress dummy column labels such as "?column?" + * prettyFlags: bitmask of PRETTYFLAG_XXX options + * wrapColumn: maximum line length, or -1 to disable wrapping + * startIndent: initial indentation amount + * ---------- + */ +static void +get_query_def(Query *query, StringInfo buf, List *parentnamespace, + TupleDesc resultDesc, bool colNamesVisible, + int prettyFlags, int wrapColumn, int startIndent) +{ + get_query_def_extended(query, buf, parentnamespace, InvalidOid, 0, resultDesc, + colNamesVisible, + prettyFlags, wrapColumn, startIndent); +} + +/* ---------- + * get_query_def_extended - Parse back one query parsetree, optionally + * with extension using a shard identifier. + * + * If distrelid is valid and shardid is positive, the provided shardid is added + * any time the provided relid is deparsed, so that the query may be executed + * on a placement for the given shard. + * ---------- + */ +static void +get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace, + Oid distrelid, int64 shardid, TupleDesc resultDesc, + bool colNamesVisible, + int prettyFlags, int wrapColumn, int startIndent) +{ + deparse_context context; + deparse_namespace dpns; + + /* Guard against excessively long or deeply-nested queries */ + CHECK_FOR_INTERRUPTS(); + check_stack_depth(); + + /* + * Before we begin to examine the query, acquire locks on referenced + * relations, and fix up deleted columns in JOIN RTEs. This ensures + * consistent results. Note we assume it's OK to scribble on the passed + * querytree! + * + * We are only deparsing the query (we are not about to execute it), so we + * only need AccessShareLock on the relations it mentions. + */ + AcquireRewriteLocks(query, false, false); + + /* + * Set search_path to NIL so that all objects outside of pg_catalog will be + * schema-prefixed. pg_catalog will be added automatically when we call + * PushEmptySearchPath(). + */ + int saveNestLevel = PushEmptySearchPath(); + + context.buf = buf; + context.namespaces = lcons(&dpns, list_copy(parentnamespace)); + context.windowClause = NIL; + context.windowTList = NIL; + context.varprefix = (parentnamespace != NIL || + list_length(query->rtable) != 1); + context.prettyFlags = prettyFlags; + context.wrapColumn = wrapColumn; + context.indentLevel = startIndent; + context.special_exprkind = EXPR_KIND_NONE; + context.appendparents = NULL; + context.distrelid = distrelid; + context.shardid = shardid; + + set_deparse_for_query(&dpns, query, parentnamespace); + + switch (query->commandType) + { + case CMD_SELECT: + get_select_query_def(query, &context, resultDesc, colNamesVisible); + break; + + case CMD_UPDATE: + get_update_query_def(query, &context, colNamesVisible); + break; + + case CMD_INSERT: + get_insert_query_def(query, &context, colNamesVisible); + break; + + case CMD_DELETE: + get_delete_query_def(query, &context, colNamesVisible); + break; + + case CMD_MERGE: + get_merge_query_def(query, &context, colNamesVisible); + break; + + case CMD_NOTHING: + appendStringInfoString(buf, "NOTHING"); + break; + + case CMD_UTILITY: + get_utility_query_def(query, &context); + break; + + default: + elog(ERROR, "unrecognized query command type: %d", + query->commandType); + break; + } + + /* revert back to original search_path */ + PopEmptySearchPath(saveNestLevel); +} + +/* ---------- + * get_values_def - Parse back a VALUES list + * ---------- + */ +static void +get_values_def(List *values_lists, deparse_context *context) +{ + StringInfo buf = context->buf; + bool first_list = true; + ListCell *vtl; + + appendStringInfoString(buf, "VALUES "); + + foreach(vtl, values_lists) + { + List *sublist = (List *) lfirst(vtl); + bool first_col = true; + ListCell *lc; + + if (first_list) + first_list = false; + else + appendStringInfoString(buf, ", "); + + appendStringInfoChar(buf, '('); + foreach(lc, sublist) + { + Node *col = (Node *) lfirst(lc); + + if (first_col) + first_col = false; + else + appendStringInfoChar(buf, ','); + + /* + * Print the value. Whole-row Vars need special treatment. + */ + get_rule_expr_toplevel(col, context, false); + } + appendStringInfoChar(buf, ')'); + } +} + +/* ---------- + * get_with_clause - Parse back a WITH clause + * ---------- + */ +static void +get_with_clause(Query *query, deparse_context *context) +{ + StringInfo buf = context->buf; + const char *sep; + ListCell *l; + + if (query->cteList == NIL) + return; + + if (PRETTY_INDENT(context)) + { + context->indentLevel += PRETTYINDENT_STD; + appendStringInfoChar(buf, ' '); + } + + if (query->hasRecursive) + sep = "WITH RECURSIVE "; + else + sep = "WITH "; + foreach(l, query->cteList) + { + CommonTableExpr *cte = (CommonTableExpr *) lfirst(l); + + appendStringInfoString(buf, sep); + appendStringInfoString(buf, quote_identifier(cte->ctename)); + if (cte->aliascolnames) + { + bool first = true; + ListCell *col; + + appendStringInfoChar(buf, '('); + foreach(col, cte->aliascolnames) + { + if (first) + first = false; + else + appendStringInfoString(buf, ", "); + appendStringInfoString(buf, + quote_identifier(strVal(lfirst(col)))); + } + appendStringInfoChar(buf, ')'); + } + appendStringInfoString(buf, " AS "); + switch (cte->ctematerialized) + { + case CTEMaterializeDefault: + break; + case CTEMaterializeAlways: + appendStringInfoString(buf, "MATERIALIZED "); + break; + case CTEMaterializeNever: + appendStringInfoString(buf, "NOT MATERIALIZED "); + break; + } + appendStringInfoChar(buf, '('); + if (PRETTY_INDENT(context)) + appendContextKeyword(context, "", 0, 0, 0); + get_query_def((Query *) cte->ctequery, buf, context->namespaces, NULL, + true, + context->prettyFlags, context->wrapColumn, + context->indentLevel); + if (PRETTY_INDENT(context)) + appendContextKeyword(context, "", 0, 0, 0); + appendStringInfoChar(buf, ')'); + + if (cte->search_clause) + { + bool first = true; + ListCell *lc; + + appendStringInfo(buf, " SEARCH %s FIRST BY ", + cte->search_clause->search_breadth_first ? "BREADTH" : "DEPTH"); + + foreach(lc, cte->search_clause->search_col_list) + { + if (first) + first = false; + else + appendStringInfoString(buf, ", "); + appendStringInfoString(buf, + quote_identifier(strVal(lfirst(lc)))); + } + + appendStringInfo(buf, " SET %s", quote_identifier(cte->search_clause->search_seq_column)); + } + + if (cte->cycle_clause) + { + bool first = true; + ListCell *lc; + + appendStringInfoString(buf, " CYCLE "); + + foreach(lc, cte->cycle_clause->cycle_col_list) + { + if (first) + first = false; + else + appendStringInfoString(buf, ", "); + appendStringInfoString(buf, + quote_identifier(strVal(lfirst(lc)))); + } + + appendStringInfo(buf, " SET %s", quote_identifier(cte->cycle_clause->cycle_mark_column)); + + { + Const *cmv = castNode(Const, cte->cycle_clause->cycle_mark_value); + Const *cmd = castNode(Const, cte->cycle_clause->cycle_mark_default); + + if (!(cmv->consttype == BOOLOID && !cmv->constisnull && DatumGetBool(cmv->constvalue) == true && + cmd->consttype == BOOLOID && !cmd->constisnull && DatumGetBool(cmd->constvalue) == false)) + { + appendStringInfoString(buf, " TO "); + get_rule_expr(cte->cycle_clause->cycle_mark_value, context, false); + appendStringInfoString(buf, " DEFAULT "); + get_rule_expr(cte->cycle_clause->cycle_mark_default, context, false); + } + } + + appendStringInfo(buf, " USING %s", quote_identifier(cte->cycle_clause->cycle_path_column)); + } + + sep = ", "; + } + + if (PRETTY_INDENT(context)) + { + context->indentLevel -= PRETTYINDENT_STD; + appendContextKeyword(context, "", 0, 0, 0); + } + else + appendStringInfoChar(buf, ' '); +} + +/* ---------- + * get_select_query_def - Parse back a SELECT parsetree + * ---------- + */ +static void +get_select_query_def(Query *query, deparse_context *context, + TupleDesc resultDesc, bool colNamesVisible) +{ + StringInfo buf = context->buf; + List *save_windowclause; + List *save_windowtlist; + bool force_colno; + ListCell *l; + + /* Insert the WITH clause if given */ + get_with_clause(query, context); + + /* Set up context for possible window functions */ + save_windowclause = context->windowClause; + context->windowClause = query->windowClause; + save_windowtlist = context->windowTList; + context->windowTList = query->targetList; + + /* + * If the Query node has a setOperations tree, then it's the top level of + * a UNION/INTERSECT/EXCEPT query; only the WITH, ORDER BY and LIMIT + * fields are interesting in the top query itself. + */ + if (query->setOperations) + { + get_setop_query(query->setOperations, query, context, resultDesc, + colNamesVisible); + /* ORDER BY clauses must be simple in this case */ + force_colno = true; + } + else + { + get_basic_select_query(query, context, resultDesc, colNamesVisible); + force_colno = false; + } + + /* Add the ORDER BY clause if given */ + if (query->sortClause != NIL) + { + appendContextKeyword(context, " ORDER BY ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); + get_rule_orderby(query->sortClause, query->targetList, + force_colno, context); + } + + /* + * Add the LIMIT/OFFSET clauses if given. If non-default options, use the + * standard spelling of LIMIT. + */ + if (query->limitOffset != NULL) + { + appendContextKeyword(context, " OFFSET ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); + get_rule_expr(query->limitOffset, context, false); + } + if (query->limitCount != NULL) + { + if (query->limitOption == LIMIT_OPTION_WITH_TIES) + { + // had to add '(' and ')' here because it fails with casting + appendContextKeyword(context, " FETCH FIRST (", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); + get_rule_expr(query->limitCount, context, false); + appendStringInfoString(buf, ") ROWS WITH TIES"); + } + else + { + appendContextKeyword(context, " LIMIT ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); + if (IsA(query->limitCount, Const) && + ((Const *) query->limitCount)->constisnull) + appendStringInfoString(buf, "ALL"); + else + get_rule_expr(query->limitCount, context, false); + } + } + + /* Add FOR [KEY] UPDATE/SHARE clauses if present */ + if (query->hasForUpdate) + { + foreach(l, query->rowMarks) + { + RowMarkClause *rc = (RowMarkClause *) lfirst(l); + + /* don't print implicit clauses */ + if (rc->pushedDown) + continue; + + switch (rc->strength) + { + case LCS_NONE: + /* we intentionally throw an error for LCS_NONE */ + elog(ERROR, "unrecognized LockClauseStrength %d", + (int) rc->strength); + break; + case LCS_FORKEYSHARE: + appendContextKeyword(context, " FOR KEY SHARE", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); + break; + case LCS_FORSHARE: + appendContextKeyword(context, " FOR SHARE", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); + break; + case LCS_FORNOKEYUPDATE: + appendContextKeyword(context, " FOR NO KEY UPDATE", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); + break; + case LCS_FORUPDATE: + appendContextKeyword(context, " FOR UPDATE", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); + break; + } + + appendStringInfo(buf, " OF %s", + quote_identifier(get_rtable_name(rc->rti, + context))); + if (rc->waitPolicy == LockWaitError) + appendStringInfoString(buf, " NOWAIT"); + else if (rc->waitPolicy == LockWaitSkip) + appendStringInfoString(buf, " SKIP LOCKED"); + } + } + + context->windowClause = save_windowclause; + context->windowTList = save_windowtlist; +} + +/* + * Detect whether query looks like SELECT ... FROM VALUES(); + * if so, return the VALUES RTE. Otherwise return NULL. + */ +static RangeTblEntry * +get_simple_values_rte(Query *query, TupleDesc resultDesc) +{ + RangeTblEntry *result = NULL; + ListCell *lc; + int colno; + + /* + * We want to return true even if the Query also contains OLD or NEW rule + * RTEs. So the idea is to scan the rtable and see if there is only one + * inFromCl RTE that is a VALUES RTE. + */ + foreach(lc, query->rtable) + { + RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); + + if (rte->rtekind == RTE_VALUES && rte->inFromCl) + { + if (result) + return NULL; /* multiple VALUES (probably not possible) */ + result = rte; + } + else if (rte->rtekind == RTE_RELATION && !rte->inFromCl) + continue; /* ignore rule entries */ + else + return NULL; /* something else -> not simple VALUES */ + } + + /* + * We don't need to check the targetlist in any great detail, because + * parser/analyze.c will never generate a "bare" VALUES RTE --- they only + * appear inside auto-generated sub-queries with very restricted + * structure. However, DefineView might have modified the tlist by + * injecting new column aliases; so compare tlist resnames against the + * RTE's names to detect that. + */ + if (result) + { + ListCell *lcn; + + if (list_length(query->targetList) != list_length(result->eref->colnames)) + return NULL; /* this probably cannot happen */ + colno = 0; + forboth(lc, query->targetList, lcn, result->eref->colnames) + { + TargetEntry *tle = (TargetEntry *) lfirst(lc); + char *cname = strVal(lfirst(lcn)); + char *colname; + + if (tle->resjunk) + return NULL; /* this probably cannot happen */ + /* compute name that get_target_list would use for column */ + colno++; + if (resultDesc && colno <= resultDesc->natts) + colname = NameStr(TupleDescAttr(resultDesc, colno - 1)->attname); + else + colname = tle->resname; + + /* does it match the VALUES RTE? */ + if (colname == NULL || strcmp(colname, cname) != 0) + return NULL; /* column name has been changed */ + } + } + + return result; +} + +static void +get_basic_select_query(Query *query, deparse_context *context, + TupleDesc resultDesc, bool colNamesVisible) +{ + StringInfo buf = context->buf; + RangeTblEntry *values_rte; + char *sep; + ListCell *l; + + if (PRETTY_INDENT(context)) + { + context->indentLevel += PRETTYINDENT_STD; + appendStringInfoChar(buf, ' '); + } + + /* + * If the query looks like SELECT * FROM (VALUES ...), then print just the + * VALUES part. This reverses what transformValuesClause() did at parse + * time. + */ + values_rte = get_simple_values_rte(query, resultDesc); + if (values_rte) + { + get_values_def(values_rte->values_lists, context); + return; + } + + /* + * Build up the query string - first we say SELECT + */ + if (query->isReturn) + appendStringInfoString(buf, "RETURN"); + else + appendStringInfoString(buf, "SELECT"); + + /* Add the DISTINCT clause if given */ + if (query->distinctClause != NIL) + { + if (query->hasDistinctOn) + { + appendStringInfoString(buf, " DISTINCT ON ("); + sep = ""; + foreach(l, query->distinctClause) + { + SortGroupClause *srt = (SortGroupClause *) lfirst(l); + + appendStringInfoString(buf, sep); + get_rule_sortgroupclause(srt->tleSortGroupRef, query->targetList, + false, context); + sep = ", "; + } + appendStringInfoChar(buf, ')'); + } + else + appendStringInfoString(buf, " DISTINCT"); + } + + /* Then we tell what to select (the targetlist) */ + get_target_list(query->targetList, context, resultDesc, colNamesVisible); + + /* Add the FROM clause if needed */ + get_from_clause(query, " FROM ", context); + + /* Add the WHERE clause if given */ + if (query->jointree->quals != NULL) + { + appendContextKeyword(context, " WHERE ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); + get_rule_expr(query->jointree->quals, context, false); + } + + /* Add the GROUP BY clause if given */ + if (query->groupClause != NULL || query->groupingSets != NULL) + { + ParseExprKind save_exprkind; + + appendContextKeyword(context, " GROUP BY ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); + if (query->groupDistinct) + appendStringInfoString(buf, "DISTINCT "); + + save_exprkind = context->special_exprkind; + context->special_exprkind = EXPR_KIND_GROUP_BY; + + if (query->groupingSets == NIL) + { + sep = ""; + foreach(l, query->groupClause) + { + SortGroupClause *grp = (SortGroupClause *) lfirst(l); + + appendStringInfoString(buf, sep); + get_rule_sortgroupclause(grp->tleSortGroupRef, query->targetList, + false, context); + sep = ", "; + } + } + else + { + sep = ""; + foreach(l, query->groupingSets) + { + GroupingSet *grp = lfirst(l); + + appendStringInfoString(buf, sep); + get_rule_groupingset(grp, query->targetList, true, context); + sep = ", "; + } + } + + context->special_exprkind = save_exprkind; + } + + /* Add the HAVING clause if given */ + if (query->havingQual != NULL) + { + appendContextKeyword(context, " HAVING ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); + get_rule_expr(query->havingQual, context, false); + } + + /* Add the WINDOW clause if needed */ + if (query->windowClause != NIL) + get_rule_windowclause(query, context); +} + +/* ---------- + * get_target_list - Parse back a SELECT target list + * + * This is also used for RETURNING lists in INSERT/UPDATE/DELETE/MERGE. + * + * resultDesc and colNamesVisible are as for get_query_def() + * ---------- + */ +static void +get_target_list(List *targetList, deparse_context *context, + TupleDesc resultDesc, bool colNamesVisible) +{ + StringInfo buf = context->buf; + StringInfoData targetbuf; + bool last_was_multiline = false; + char *sep; + int colno; + ListCell *l; + + /* we use targetbuf to hold each TLE's text temporarily */ + initStringInfo(&targetbuf); + + sep = " "; + colno = 0; + foreach(l, targetList) + { + TargetEntry *tle = (TargetEntry *) lfirst(l); + char *colname; + char *attname; + + if (tle->resjunk) + continue; /* ignore junk entries */ + + appendStringInfoString(buf, sep); + sep = ", "; + colno++; + + /* + * Put the new field text into targetbuf so we can decide after we've + * got it whether or not it needs to go on a new line. + */ + resetStringInfo(&targetbuf); + context->buf = &targetbuf; + + /* + * We special-case Var nodes rather than using get_rule_expr. This is + * needed because get_rule_expr will display a whole-row Var as + * "foo.*", which is the preferred notation in most contexts, but at + * the top level of a SELECT list it's not right (the parser will + * expand that notation into multiple columns, yielding behavior + * different from a whole-row Var). We need to call get_variable + * directly so that we can tell it to do the right thing, and so that + * we can get the attribute name which is the default AS label. + */ + if (tle->expr && (IsA(tle->expr, Var))) + { + attname = get_variable((Var *) tle->expr, 0, true, context); + } + else + { + get_rule_expr((Node *) tle->expr, context, true); + + /* + * When colNamesVisible is true, we should always show the + * assigned column name explicitly. Otherwise, show it only if + * it's not FigureColname's fallback. + */ + attname = colNamesVisible ? NULL : "?column?"; + } + + /* + * Figure out what the result column should be called. In the context + * of a view, use the view's tuple descriptor (so as to pick up the + * effects of any column RENAME that's been done on the view). + * Otherwise, just use what we can find in the TLE. + */ + if (resultDesc && colno <= resultDesc->natts) + colname = NameStr(TupleDescAttr(resultDesc, colno - 1)->attname); + else + colname = tle->resname; + + /* Show AS unless the column's name is correct as-is */ + if (colname) /* resname could be NULL */ + { + if (attname == NULL || strcmp(attname, colname) != 0) + appendStringInfo(&targetbuf, " AS %s", quote_identifier(colname)); + } + + /* Restore context's output buffer */ + context->buf = buf; + + /* Consider line-wrapping if enabled */ + if (PRETTY_INDENT(context) && context->wrapColumn >= 0) + { + int leading_nl_pos; + + /* Does the new field start with a new line? */ + if (targetbuf.len > 0 && targetbuf.data[0] == '\n') + leading_nl_pos = 0; + else + leading_nl_pos = -1; + + /* If so, we shouldn't add anything */ + if (leading_nl_pos >= 0) + { + /* instead, remove any trailing spaces currently in buf */ + removeStringInfoSpaces(buf); + } + else + { + char *trailing_nl; + + /* Locate the start of the current line in the output buffer */ + trailing_nl = strrchr(buf->data, '\n'); + if (trailing_nl == NULL) + trailing_nl = buf->data; + else + trailing_nl++; + + /* + * Add a newline, plus some indentation, if the new field is + * not the first and either the new field would cause an + * overflow or the last field used more than one line. + */ + if (colno > 1 && + ((strlen(trailing_nl) + targetbuf.len > context->wrapColumn) || + last_was_multiline)) + appendContextKeyword(context, "", -PRETTYINDENT_STD, + PRETTYINDENT_STD, PRETTYINDENT_VAR); + } + + /* Remember this field's multiline status for next iteration */ + last_was_multiline = + (strchr(targetbuf.data + leading_nl_pos + 1, '\n') != NULL); + } + + /* Add the new field */ + appendStringInfoString(buf, targetbuf.data); + } + + /* clean up */ + pfree(targetbuf.data); +} + +static void +get_setop_query(Node *setOp, Query *query, deparse_context *context, + TupleDesc resultDesc, bool colNamesVisible) +{ + StringInfo buf = context->buf; + bool need_paren; + + /* Guard against excessively long or deeply-nested queries */ + CHECK_FOR_INTERRUPTS(); + check_stack_depth(); + + if (IsA(setOp, RangeTblRef)) + { + RangeTblRef *rtr = (RangeTblRef *) setOp; + RangeTblEntry *rte = rt_fetch(rtr->rtindex, query->rtable); + Query *subquery = rte->subquery; + + Assert(subquery != NULL); + Assert(subquery->setOperations == NULL); + /* Need parens if WITH, ORDER BY, FOR UPDATE, or LIMIT; see gram.y */ + need_paren = (subquery->cteList || + subquery->sortClause || + subquery->rowMarks || + subquery->limitOffset || + subquery->limitCount); + if (need_paren) + appendStringInfoChar(buf, '('); + get_query_def(subquery, buf, context->namespaces, resultDesc, + colNamesVisible, + context->prettyFlags, context->wrapColumn, + context->indentLevel); + if (need_paren) + appendStringInfoChar(buf, ')'); + } + else if (IsA(setOp, SetOperationStmt)) + { + SetOperationStmt *op = (SetOperationStmt *) setOp; + int subindent; + + /* + * We force parens when nesting two SetOperationStmts, except when the + * lefthand input is another setop of the same kind. Syntactically, + * we could omit parens in rather more cases, but it seems best to use + * parens to flag cases where the setop operator changes. If we use + * parens, we also increase the indentation level for the child query. + * + * There are some cases in which parens are needed around a leaf query + * too, but those are more easily handled at the next level down (see + * code above). + */ + if (IsA(op->larg, SetOperationStmt)) + { + SetOperationStmt *lop = (SetOperationStmt *) op->larg; + + if (op->op == lop->op && op->all == lop->all) + need_paren = false; + else + need_paren = true; + } + else + need_paren = false; + + if (need_paren) + { + appendStringInfoChar(buf, '('); + subindent = PRETTYINDENT_STD; + appendContextKeyword(context, "", subindent, 0, 0); + } + else + subindent = 0; + + get_setop_query(op->larg, query, context, resultDesc, colNamesVisible); + + if (need_paren) + appendContextKeyword(context, ") ", -subindent, 0, 0); + else if (PRETTY_INDENT(context)) + appendContextKeyword(context, "", -subindent, 0, 0); + else + appendStringInfoChar(buf, ' '); + + switch (op->op) + { + case SETOP_UNION: + appendStringInfoString(buf, "UNION "); + break; + case SETOP_INTERSECT: + appendStringInfoString(buf, "INTERSECT "); + break; + case SETOP_EXCEPT: + appendStringInfoString(buf, "EXCEPT "); + break; + default: + elog(ERROR, "unrecognized set op: %d", + (int) op->op); + } + if (op->all) + appendStringInfoString(buf, "ALL "); + + /* Always parenthesize if RHS is another setop */ + need_paren = IsA(op->rarg, SetOperationStmt); + + /* + * The indentation code here is deliberately a bit different from that + * for the lefthand input, because we want the line breaks in + * different places. + */ + if (need_paren) + { + appendStringInfoChar(buf, '('); + subindent = PRETTYINDENT_STD; + } + else + subindent = 0; + appendContextKeyword(context, "", subindent, 0, 0); + + get_setop_query(op->rarg, query, context, resultDesc, false); + + if (PRETTY_INDENT(context)) + context->indentLevel -= subindent; + if (need_paren) + appendContextKeyword(context, ")", 0, 0, 0); + } + else + { + elog(ERROR, "unrecognized node type: %d", + (int) nodeTag(setOp)); + } +} + +/* + * Display a sort/group clause. + * + * Also returns the expression tree, so caller need not find it again. + */ +static Node * +get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno, + deparse_context *context) +{ + StringInfo buf = context->buf; + TargetEntry *tle; + Node *expr; + + tle = get_sortgroupref_tle(ref, tlist); + expr = (Node *) tle->expr; + + /* + * Use column-number form if requested by caller. Otherwise, if + * expression is a constant, force it to be dumped with an explicit cast + * as decoration --- this is because a simple integer constant is + * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we + * dump it without any decoration. If it's anything more complex than a + * simple Var, then force extra parens around it, to ensure it can't be + * misinterpreted as a cube() or rollup() construct. + */ + if (force_colno) + { + Assert(!tle->resjunk); + appendStringInfo(buf, "%d", tle->resno); + } + else if (expr && IsA(expr, Const)) + get_const_expr((Const *) expr, context, 1); + else if (!expr || IsA(expr, Var)) + get_rule_expr(expr, context, true); + else + { + /* + * We must force parens for function-like expressions even if + * PRETTY_PAREN is off, since those are the ones in danger of + * misparsing. For other expressions we need to force them only if + * PRETTY_PAREN is on, since otherwise the expression will output them + * itself. (We can't skip the parens.) + */ + bool need_paren = (PRETTY_PAREN(context) + || IsA(expr, FuncExpr) + || IsA(expr, Aggref) + || IsA(expr, WindowFunc) + || IsA(expr, JsonConstructorExpr)); + + if (need_paren) + appendStringInfoChar(context->buf, '('); + get_rule_expr(expr, context, true); + if (need_paren) + appendStringInfoChar(context->buf, ')'); + } + + return expr; +} + +/* + * Display a GroupingSet + */ +static void +get_rule_groupingset(GroupingSet *gset, List *targetlist, + bool omit_parens, deparse_context *context) +{ + ListCell *l; + StringInfo buf = context->buf; + bool omit_child_parens = true; + char *sep = ""; + + switch (gset->kind) + { + case GROUPING_SET_EMPTY: + appendStringInfoString(buf, "()"); + return; + + case GROUPING_SET_SIMPLE: + { + if (!omit_parens || list_length(gset->content) != 1) + appendStringInfoChar(buf, '('); + + foreach(l, gset->content) + { + Index ref = lfirst_int(l); + + appendStringInfoString(buf, sep); + get_rule_sortgroupclause(ref, targetlist, + false, context); + sep = ", "; + } + + if (!omit_parens || list_length(gset->content) != 1) + appendStringInfoChar(buf, ')'); + } + return; + + case GROUPING_SET_ROLLUP: + appendStringInfoString(buf, "ROLLUP("); + break; + case GROUPING_SET_CUBE: + appendStringInfoString(buf, "CUBE("); + break; + case GROUPING_SET_SETS: + appendStringInfoString(buf, "GROUPING SETS ("); + omit_child_parens = false; + break; + } + + foreach(l, gset->content) + { + appendStringInfoString(buf, sep); + get_rule_groupingset(lfirst(l), targetlist, omit_child_parens, context); + sep = ", "; + } + + appendStringInfoChar(buf, ')'); +} + +/* + * Display an ORDER BY list. + */ +static void +get_rule_orderby(List *orderList, List *targetList, + bool force_colno, deparse_context *context) +{ + StringInfo buf = context->buf; + const char *sep; + ListCell *l; + + sep = ""; + foreach(l, orderList) + { + SortGroupClause *srt = (SortGroupClause *) lfirst(l); + Node *sortexpr; + Oid sortcoltype; + TypeCacheEntry *typentry; + + appendStringInfoString(buf, sep); + sortexpr = get_rule_sortgroupclause(srt->tleSortGroupRef, targetList, + force_colno, context); + sortcoltype = exprType(sortexpr); + /* See whether operator is default < or > for datatype */ + typentry = lookup_type_cache(sortcoltype, + TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); + if (srt->sortop == typentry->lt_opr) + { + /* ASC is default, so emit nothing for it */ + if (srt->nulls_first) + appendStringInfoString(buf, " NULLS FIRST"); + } + else if (srt->sortop == typentry->gt_opr) + { + appendStringInfoString(buf, " DESC"); + /* DESC defaults to NULLS FIRST */ + if (!srt->nulls_first) + appendStringInfoString(buf, " NULLS LAST"); + } + else + { + appendStringInfo(buf, " USING %s", + generate_operator_name(srt->sortop, + sortcoltype, + sortcoltype)); + /* be specific to eliminate ambiguity */ + if (srt->nulls_first) + appendStringInfoString(buf, " NULLS FIRST"); + else + appendStringInfoString(buf, " NULLS LAST"); + } + sep = ", "; + } +} + +/* + * Display a WINDOW clause. + * + * Note that the windowClause list might contain only anonymous window + * specifications, in which case we should print nothing here. + */ +static void +get_rule_windowclause(Query *query, deparse_context *context) +{ + StringInfo buf = context->buf; + const char *sep; + ListCell *l; + + sep = NULL; + foreach(l, query->windowClause) + { + WindowClause *wc = (WindowClause *) lfirst(l); + + if (wc->name == NULL) + continue; /* ignore anonymous windows */ + + if (sep == NULL) + appendContextKeyword(context, " WINDOW ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); + else + appendStringInfoString(buf, sep); + + appendStringInfo(buf, "%s AS ", quote_identifier(wc->name)); + + get_rule_windowspec(wc, query->targetList, context); + + sep = ", "; + } +} + +/* + * Display a window definition + */ +static void +get_rule_windowspec(WindowClause *wc, List *targetList, + deparse_context *context) +{ + StringInfo buf = context->buf; + bool needspace = false; + const char *sep; + ListCell *l; + + appendStringInfoChar(buf, '('); + if (wc->refname) + { + appendStringInfoString(buf, quote_identifier(wc->refname)); + needspace = true; + } + /* partition clauses are always inherited, so only print if no refname */ + if (wc->partitionClause && !wc->refname) + { + if (needspace) + appendStringInfoChar(buf, ' '); + appendStringInfoString(buf, "PARTITION BY "); + sep = ""; + foreach(l, wc->partitionClause) + { + SortGroupClause *grp = (SortGroupClause *) lfirst(l); + + appendStringInfoString(buf, sep); + get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, + false, context); + sep = ", "; + } + needspace = true; + } + /* print ordering clause only if not inherited */ + if (wc->orderClause && !wc->copiedOrder) + { + if (needspace) + appendStringInfoChar(buf, ' '); + appendStringInfoString(buf, "ORDER BY "); + get_rule_orderby(wc->orderClause, targetList, false, context); + needspace = true; + } + /* framing clause is never inherited, so print unless it's default */ + if (wc->frameOptions & FRAMEOPTION_NONDEFAULT) + { + if (needspace) + appendStringInfoChar(buf, ' '); + if (wc->frameOptions & FRAMEOPTION_RANGE) + appendStringInfoString(buf, "RANGE "); + else if (wc->frameOptions & FRAMEOPTION_ROWS) + appendStringInfoString(buf, "ROWS "); + else if (wc->frameOptions & FRAMEOPTION_GROUPS) + appendStringInfoString(buf, "GROUPS "); + else + Assert(false); + if (wc->frameOptions & FRAMEOPTION_BETWEEN) + appendStringInfoString(buf, "BETWEEN "); + if (wc->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) + appendStringInfoString(buf, "UNBOUNDED PRECEDING "); + else if (wc->frameOptions & FRAMEOPTION_START_CURRENT_ROW) + appendStringInfoString(buf, "CURRENT ROW "); + else if (wc->frameOptions & FRAMEOPTION_START_OFFSET) + { + get_rule_expr(wc->startOffset, context, false); + if (wc->frameOptions & FRAMEOPTION_START_OFFSET_PRECEDING) + appendStringInfoString(buf, " PRECEDING "); + else if (wc->frameOptions & FRAMEOPTION_START_OFFSET_FOLLOWING) + appendStringInfoString(buf, " FOLLOWING "); + else + Assert(false); + } + else + Assert(false); + if (wc->frameOptions & FRAMEOPTION_BETWEEN) + { + appendStringInfoString(buf, "AND "); + if (wc->frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING) + appendStringInfoString(buf, "UNBOUNDED FOLLOWING "); + else if (wc->frameOptions & FRAMEOPTION_END_CURRENT_ROW) + appendStringInfoString(buf, "CURRENT ROW "); + else if (wc->frameOptions & FRAMEOPTION_END_OFFSET) + { + get_rule_expr(wc->endOffset, context, false); + if (wc->frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING) + appendStringInfoString(buf, " PRECEDING "); + else if (wc->frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING) + appendStringInfoString(buf, " FOLLOWING "); + else + Assert(false); + } + else + Assert(false); + } + if (wc->frameOptions & FRAMEOPTION_EXCLUDE_CURRENT_ROW) + appendStringInfoString(buf, "EXCLUDE CURRENT ROW "); + else if (wc->frameOptions & FRAMEOPTION_EXCLUDE_GROUP) + appendStringInfoString(buf, "EXCLUDE GROUP "); + else if (wc->frameOptions & FRAMEOPTION_EXCLUDE_TIES) + appendStringInfoString(buf, "EXCLUDE TIES "); + /* we will now have a trailing space; remove it */ + buf->len--; + } + appendStringInfoChar(buf, ')'); +} + +/* ---------- + * get_insert_query_def - Parse back an INSERT parsetree + * ---------- + */ +static void +get_insert_query_def(Query *query, deparse_context *context, + bool colNamesVisible) +{ + StringInfo buf = context->buf; + RangeTblEntry *select_rte = NULL; + RangeTblEntry *values_rte = NULL; + RangeTblEntry *rte; + ListCell *l; + List *strippedexprs = NIL; + + /* Insert the WITH clause if given */ + get_with_clause(query, context); + + /* + * If it's an INSERT ... SELECT or multi-row VALUES, there will be a + * single RTE for the SELECT or VALUES. Plain VALUES has neither. + */ + foreach(l, query->rtable) + { + rte = (RangeTblEntry *) lfirst(l); + + if (rte->rtekind == RTE_SUBQUERY) + { + if (select_rte) + elog(ERROR, "too many subquery RTEs in INSERT"); + select_rte = rte; + } + + if (rte->rtekind == RTE_VALUES) + { + if (values_rte) + elog(ERROR, "too many values RTEs in INSERT"); + values_rte = rte; + } + } + if (select_rte && values_rte) + elog(ERROR, "both subquery and values RTEs in INSERT"); + + /* + * Start the query with INSERT INTO relname + */ + rte = rt_fetch(query->resultRelation, query->rtable); + Assert(rte->rtekind == RTE_RELATION); + + if (PRETTY_INDENT(context)) + { + context->indentLevel += PRETTYINDENT_STD; + appendStringInfoChar(buf, ' '); + } + appendStringInfo(buf, "INSERT INTO %s", + generate_relation_or_shard_name(rte->relid, + context->distrelid, + context->shardid, NIL)); + + /* Print the relation alias, if needed; INSERT requires explicit AS */ + get_rte_alias(rte, query->resultRelation, true, context); + + /* always want a space here */ + appendStringInfoChar(buf, ' '); + + /* + * Add the insert-column-names list. Any indirection decoration needed on + * the column names can be inferred from the top targetlist. + */ + if (query->targetList) + { + strippedexprs = get_insert_column_names_list(query->targetList, + buf, context, rte); + } + + if (query->override) + { + if (query->override == OVERRIDING_SYSTEM_VALUE) + appendStringInfoString(buf, "OVERRIDING SYSTEM VALUE "); + else if (query->override == OVERRIDING_USER_VALUE) + appendStringInfoString(buf, "OVERRIDING USER VALUE "); + } + + if (select_rte) + { + /* Add the SELECT */ + get_query_def(select_rte->subquery, buf, context->namespaces, NULL, + false, + context->prettyFlags, context->wrapColumn, + context->indentLevel); + } + else if (values_rte) + { + /* Add the multi-VALUES expression lists */ + get_values_def(values_rte->values_lists, context); + } + else if (strippedexprs) + { + /* Add the single-VALUES expression list */ + appendContextKeyword(context, "VALUES (", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); + get_rule_list_toplevel(strippedexprs, context, false); + appendStringInfoChar(buf, ')'); + } + else + { + /* No expressions, so it must be DEFAULT VALUES */ + appendStringInfoString(buf, "DEFAULT VALUES"); + } + + /* Add ON CONFLICT if present */ + if (query->onConflict) + { + OnConflictExpr *confl = query->onConflict; + + appendStringInfoString(buf, " ON CONFLICT"); + + if (confl->arbiterElems) + { + /* Add the single-VALUES expression list */ + appendStringInfoChar(buf, '('); + get_rule_expr((Node *) confl->arbiterElems, context, false); + appendStringInfoChar(buf, ')'); + + /* Add a WHERE clause (for partial indexes) if given */ + if (confl->arbiterWhere != NULL) + { + bool save_varprefix; + + /* + * Force non-prefixing of Vars, since parser assumes that they + * belong to target relation. WHERE clause does not use + * InferenceElem, so this is separately required. + */ + save_varprefix = context->varprefix; + context->varprefix = false; + + appendContextKeyword(context, " WHERE ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); + get_rule_expr(confl->arbiterWhere, context, false); + + context->varprefix = save_varprefix; + } + } + else if (OidIsValid(confl->constraint)) + { + char *constraint = get_constraint_name(confl->constraint); + int64 shardId = context->shardid; + + if (shardId > 0) + { + AppendShardIdToName(&constraint, shardId); + } + + if (!constraint) + elog(ERROR, "cache lookup failed for constraint %u", + confl->constraint); + appendStringInfo(buf, " ON CONSTRAINT %s", + quote_identifier(constraint)); + } + + if (confl->action == ONCONFLICT_NOTHING) + { + appendStringInfoString(buf, " DO NOTHING"); + } + else + { + appendStringInfoString(buf, " DO UPDATE SET "); + /* Deparse targetlist */ + get_update_query_targetlist_def(query, confl->onConflictSet, + context, rte); + + /* Add a WHERE clause if given */ + if (confl->onConflictWhere != NULL) + { + appendContextKeyword(context, " WHERE ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); + get_rule_expr(confl->onConflictWhere, context, false); + } + } + } + + /* Add RETURNING if present */ + if (query->returningList) + { + appendContextKeyword(context, " RETURNING", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); + get_target_list(query->returningList, context, NULL, colNamesVisible); + } +} + +/* ---------- + * get_update_query_def - Parse back an UPDATE parsetree + * ---------- + */ +static void +get_update_query_def(Query *query, deparse_context *context, + bool colNamesVisible) +{ + StringInfo buf = context->buf; + RangeTblEntry *rte; + + /* Insert the WITH clause if given */ + get_with_clause(query, context); + + /* + * Start the query with UPDATE relname SET + */ + rte = rt_fetch(query->resultRelation, query->rtable); + + if (PRETTY_INDENT(context)) + { + appendStringInfoChar(buf, ' '); + context->indentLevel += PRETTYINDENT_STD; + } + + /* if it's a shard, do differently */ + if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) + { + char *fragmentSchemaName = NULL; + char *fragmentTableName = NULL; + + ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); + + /* use schema and table name from the remote alias */ + appendStringInfo(buf, "UPDATE %s%s", + only_marker(rte), + generate_fragment_name(fragmentSchemaName, fragmentTableName)); + + if(rte->eref != NULL) + appendStringInfo(buf, " %s", + quote_identifier(get_rtable_name(query->resultRelation, context))); + } + else + { + appendStringInfo(buf, "UPDATE %s%s", + only_marker(rte), + generate_relation_or_shard_name(rte->relid, + context->distrelid, + context->shardid, NIL)); + + /* Print the relation alias, if needed */ + get_rte_alias(rte, query->resultRelation, false, context); + } + + appendStringInfoString(buf, " SET "); + + /* Deparse targetlist */ + get_update_query_targetlist_def(query, query->targetList, context, rte); + + /* Add the FROM clause if needed */ + get_from_clause(query, " FROM ", context); + + /* Add a WHERE clause if given */ + if (query->jointree->quals != NULL) + { + appendContextKeyword(context, " WHERE ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); + get_rule_expr(query->jointree->quals, context, false); + } + + /* Add RETURNING if present */ + if (query->returningList) + { + appendContextKeyword(context, " RETURNING", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); + get_target_list(query->returningList, context, NULL, colNamesVisible); + } +} + +/* ---------- + * get_update_query_targetlist_def - Parse back an UPDATE targetlist + * ---------- + */ +static void +get_update_query_targetlist_def(Query *query, List *targetList, + deparse_context *context, RangeTblEntry *rte) +{ + StringInfo buf = context->buf; + ListCell *l; + ListCell *next_ma_cell; + int remaining_ma_columns; + const char *sep; + SubLink *cur_ma_sublink; + List *ma_sublinks; + + /* + * Prepare to deal with MULTIEXPR assignments: collect the source SubLinks + * into a list. We expect them to appear, in ID order, in resjunk tlist + * entries. + */ + ma_sublinks = NIL; + if (query->hasSubLinks) /* else there can't be any */ + { + foreach(l, targetList) + { + TargetEntry *tle = (TargetEntry *) lfirst(l); + + if (tle->resjunk && IsA(tle->expr, SubLink)) + { + SubLink *sl = (SubLink *) tle->expr; + + if (sl->subLinkType == MULTIEXPR_SUBLINK) + { + ma_sublinks = lappend(ma_sublinks, sl); + Assert(sl->subLinkId == list_length(ma_sublinks)); + } + } + } + } + next_ma_cell = list_head(ma_sublinks); + cur_ma_sublink = NULL; + remaining_ma_columns = 0; + + /* Add the comma separated list of 'attname = value' */ + sep = ""; + foreach(l, targetList) + { + TargetEntry *tle = (TargetEntry *) lfirst(l); + Node *expr; + + if (tle->resjunk) + continue; /* ignore junk entries */ + + /* Emit separator (OK whether we're in multiassignment or not) */ + appendStringInfoString(buf, sep); + sep = ", "; + + /* + * Check to see if we're starting a multiassignment group: if so, + * output a left paren. + */ + if (next_ma_cell != NULL && cur_ma_sublink == NULL) + { + /* + * We must dig down into the expr to see if it's a PARAM_MULTIEXPR + * Param. That could be buried under FieldStores and + * SubscriptingRefs and CoerceToDomains (cf processIndirection()), + * and underneath those there could be an implicit type coercion. + * Because we would ignore implicit type coercions anyway, we + * don't need to be as careful as processIndirection() is about + * descending past implicit CoerceToDomains. + */ + expr = (Node *) tle->expr; + while (expr) + { + if (IsA(expr, FieldStore)) + { + FieldStore *fstore = (FieldStore *) expr; + + expr = (Node *) linitial(fstore->newvals); + } + else if (IsA(expr, SubscriptingRef)) + { + SubscriptingRef *sbsref = (SubscriptingRef *) expr; + + if (sbsref->refassgnexpr == NULL) + break; + expr = (Node *) sbsref->refassgnexpr; + } + else if (IsA(expr, CoerceToDomain)) + { + CoerceToDomain *cdomain = (CoerceToDomain *) expr; + + if (cdomain->coercionformat != COERCE_IMPLICIT_CAST) + break; + expr = (Node *) cdomain->arg; + } + else + break; + } + expr = strip_implicit_coercions(expr); + + if (expr && IsA(expr, Param) && + ((Param *) expr)->paramkind == PARAM_MULTIEXPR) + { + cur_ma_sublink = (SubLink *) lfirst(next_ma_cell); + next_ma_cell = lnext(ma_sublinks, next_ma_cell); + remaining_ma_columns = count_nonjunk_tlist_entries( + ((Query *) cur_ma_sublink->subselect)->targetList); + Assert(((Param *) expr)->paramid == + ((cur_ma_sublink->subLinkId << 16) | 1)); + appendStringInfoChar(buf, '('); + } + } + + /* + * Put out name of target column; look in the catalogs, not at + * tle->resname, since resname will fail to track RENAME. + */ + appendStringInfoString(buf, + quote_identifier(get_attname(rte->relid, + tle->resno, + false))); + + /* + * Print any indirection needed (subfields or subscripts), and strip + * off the top-level nodes representing the indirection assignments. + */ + expr = processIndirection((Node *) tle->expr, context); + + /* + * If we're in a multiassignment, skip printing anything more, unless + * this is the last column; in which case, what we print should be the + * sublink, not the Param. + */ + if (cur_ma_sublink != NULL) + { + if (--remaining_ma_columns > 0) + continue; /* not the last column of multiassignment */ + appendStringInfoChar(buf, ')'); + expr = (Node *) cur_ma_sublink; + cur_ma_sublink = NULL; + } + + appendStringInfoString(buf, " = "); + + get_rule_expr(expr, context, false); + } +} + +/* ---------- + * get_delete_query_def - Parse back a DELETE parsetree + * ---------- + */ +static void +get_delete_query_def(Query *query, deparse_context *context, + bool colNamesVisible) +{ + StringInfo buf = context->buf; + RangeTblEntry *rte; + + /* Insert the WITH clause if given */ + get_with_clause(query, context); + + /* + * Start the query with DELETE FROM relname + */ + rte = rt_fetch(query->resultRelation, query->rtable); + + if (PRETTY_INDENT(context)) + { + appendStringInfoChar(buf, ' '); + context->indentLevel += PRETTYINDENT_STD; + } + + /* if it's a shard, do differently */ + if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) + { + char *fragmentSchemaName = NULL; + char *fragmentTableName = NULL; + + ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); + + /* use schema and table name from the remote alias */ + appendStringInfo(buf, "DELETE FROM %s%s", + only_marker(rte), + generate_fragment_name(fragmentSchemaName, fragmentTableName)); + + if(rte->eref != NULL) + appendStringInfo(buf, " %s", + quote_identifier(get_rtable_name(query->resultRelation, context))); + } + else + { + appendStringInfo(buf, "DELETE FROM %s%s", + only_marker(rte), + generate_relation_or_shard_name(rte->relid, + context->distrelid, + context->shardid, NIL)); + + /* Print the relation alias, if needed */ + get_rte_alias(rte, query->resultRelation, false, context); + } + + /* Add the USING clause if given */ + get_from_clause(query, " USING ", context); + + /* Add a WHERE clause if given */ + if (query->jointree->quals != NULL) + { + appendContextKeyword(context, " WHERE ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); + get_rule_expr(query->jointree->quals, context, false); + } + + /* Add RETURNING if present */ + if (query->returningList) + { + appendContextKeyword(context, " RETURNING", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); + get_target_list(query->returningList, context, NULL, colNamesVisible); + } +} + + +/* ---------- + * get_merge_query_def - Parse back a MERGE parsetree + * ---------- + */ +static void +get_merge_query_def(Query *query, deparse_context *context, + bool colNamesVisible) +{ + StringInfo buf = context->buf; + RangeTblEntry *rte; + ListCell *lc; + bool haveNotMatchedBySource; + + /* Insert the WITH clause if given */ + get_with_clause(query, context); + + /* + * Start the query with MERGE INTO relname + */ + rte = ExtractResultRelationRTE(query); + + if (PRETTY_INDENT(context)) + { + appendStringInfoChar(buf, ' '); + context->indentLevel += PRETTYINDENT_STD; + } + + /* if it's a shard, do differently */ + if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) + { + char *fragmentSchemaName = NULL; + char *fragmentTableName = NULL; + + ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); + + /* use schema and table name from the remote alias */ + appendStringInfo(buf, "MERGE INTO %s%s", + only_marker(rte), + generate_fragment_name(fragmentSchemaName, fragmentTableName)); + + if(rte->eref != NULL) + appendStringInfo(buf, " %s", + quote_identifier(get_rtable_name(query->resultRelation, context))); + } + else + { + appendStringInfo(buf, "MERGE INTO %s%s", + only_marker(rte), + generate_relation_or_shard_name(rte->relid, + context->distrelid, + context->shardid, NIL)); + + if (rte->alias != NULL) + appendStringInfo(buf, " %s", + quote_identifier(get_rtable_name(query->resultRelation, context))); + } + + /* Print the source relation and join clause */ + get_from_clause(query, " USING ", context); + appendContextKeyword(context, " ON ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); + get_rule_expr(query->mergeJoinCondition, context, false); + + /* + * Test for any NOT MATCHED BY SOURCE actions. If there are none, then + * any NOT MATCHED BY TARGET actions are output as "WHEN NOT MATCHED", per + * SQL standard. Otherwise, we have a non-SQL-standard query, so output + * "BY SOURCE" / "BY TARGET" qualifiers for all NOT MATCHED actions, to be + * more explicit. + */ + haveNotMatchedBySource = false; + foreach(lc, query->mergeActionList) + { + MergeAction *action = lfirst_node(MergeAction, lc); + + if (action->matchKind == MERGE_WHEN_NOT_MATCHED_BY_SOURCE) + { + haveNotMatchedBySource = true; + break; + } + } + + /* Print each merge action */ + foreach(lc, query->mergeActionList) + { + MergeAction *action = lfirst_node(MergeAction, lc); + + appendContextKeyword(context, " WHEN ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); + switch (action->matchKind) + { + case MERGE_WHEN_MATCHED: + appendStringInfoString(buf, "MATCHED"); + break; + case MERGE_WHEN_NOT_MATCHED_BY_SOURCE: + appendStringInfoString(buf, "NOT MATCHED BY SOURCE"); + break; + case MERGE_WHEN_NOT_MATCHED_BY_TARGET: + if (haveNotMatchedBySource) + appendStringInfoString(buf, "NOT MATCHED BY TARGET"); + else + appendStringInfoString(buf, "NOT MATCHED"); + break; + default: + elog(ERROR, "unrecognized matchKind: %d", + (int) action->matchKind); + } + + if (action->qual) + { + appendContextKeyword(context, " AND ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 3); + get_rule_expr(action->qual, context, false); + } + appendContextKeyword(context, " THEN ", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 3); + + if (action->commandType == CMD_INSERT) + { + /* This generally matches get_insert_query_def() */ + List *strippedexprs = NIL; + const char *sep = ""; + ListCell *lc2; + + appendStringInfoString(buf, "INSERT"); + + if (action->targetList) + appendStringInfoString(buf, " ("); + foreach(lc2, action->targetList) + { + TargetEntry *tle = (TargetEntry *) lfirst(lc2); + + Assert(!tle->resjunk); + + appendStringInfoString(buf, sep); + sep = ", "; + + appendStringInfoString(buf, + quote_identifier(get_attname(rte->relid, + tle->resno, + false))); + strippedexprs = lappend(strippedexprs, + processIndirection((Node *) tle->expr, + context)); + } + if (action->targetList) + appendStringInfoChar(buf, ')'); + + if (action->override) + { + if (action->override == OVERRIDING_SYSTEM_VALUE) + appendStringInfoString(buf, " OVERRIDING SYSTEM VALUE"); + else if (action->override == OVERRIDING_USER_VALUE) + appendStringInfoString(buf, " OVERRIDING USER VALUE"); + } + + if (strippedexprs) + { + appendContextKeyword(context, " VALUES (", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 4); + get_rule_list_toplevel(strippedexprs, context, false); + appendStringInfoChar(buf, ')'); + } + else + appendStringInfoString(buf, " DEFAULT VALUES"); + } + else if (action->commandType == CMD_UPDATE) + { + appendStringInfoString(buf, "UPDATE SET "); + get_update_query_targetlist_def(query, action->targetList, + context, rte); + } + else if (action->commandType == CMD_DELETE) + appendStringInfoString(buf, "DELETE"); + else if (action->commandType == CMD_NOTHING) + appendStringInfoString(buf, "DO NOTHING"); + } + + /* Add RETURNING if present */ + if (query->returningList) + { + appendContextKeyword(context, " RETURNING", + -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); + get_target_list(query->returningList, context, NULL, colNamesVisible); + } + + ereport(DEBUG1, (errmsg("", buf->data))); +} + + +/* ---------- + * get_utility_query_def - Parse back a UTILITY parsetree + * ---------- + */ +static void +get_utility_query_def(Query *query, deparse_context *context) +{ + StringInfo buf = context->buf; + + if (query->utilityStmt && IsA(query->utilityStmt, NotifyStmt)) + { + NotifyStmt *stmt = (NotifyStmt *) query->utilityStmt; + + appendContextKeyword(context, "", + 0, PRETTYINDENT_STD, 1); + appendStringInfo(buf, "NOTIFY %s", + quote_identifier(stmt->conditionname)); + if (stmt->payload) + { + appendStringInfoString(buf, ", "); + simple_quote_literal(buf, stmt->payload); + } + } + else if (query->utilityStmt && IsA(query->utilityStmt, TruncateStmt)) + { + TruncateStmt *stmt = (TruncateStmt *) query->utilityStmt; + List *relationList = stmt->relations; + ListCell *relationCell = NULL; + + appendContextKeyword(context, "", + 0, PRETTYINDENT_STD, 1); + + appendStringInfo(buf, "TRUNCATE TABLE"); + + foreach(relationCell, relationList) + { + RangeVar *relationVar = (RangeVar *) lfirst(relationCell); + Oid relationId = RangeVarGetRelid(relationVar, NoLock, false); + char *relationName = generate_relation_or_shard_name(relationId, + context->distrelid, + context->shardid, NIL); + appendStringInfo(buf, " %s", relationName); + + if (lnext(relationList, relationCell) != NULL) + { + appendStringInfo(buf, ","); + } + } + + if (stmt->restart_seqs) + { + appendStringInfo(buf, " RESTART IDENTITY"); + } + + if (stmt->behavior == DROP_CASCADE) + { + appendStringInfo(buf, " CASCADE"); + } + } + else + { + /* Currently only NOTIFY utility commands can appear in rules */ + elog(ERROR, "unexpected utility statement type"); + } +} + +/* + * Display a Var appropriately. + * + * In some cases (currently only when recursing into an unnamed join) + * the Var's varlevelsup has to be interpreted with respect to a context + * above the current one; levelsup indicates the offset. + * + * If istoplevel is true, the Var is at the top level of a SELECT's + * targetlist, which means we need special treatment of whole-row Vars. + * Instead of the normal "tab.*", we'll print "tab.*::typename", which is a + * dirty hack to prevent "tab.*" from being expanded into multiple columns. + * (The parser will strip the useless coercion, so no inefficiency is added in + * dump and reload.) We used to print just "tab" in such cases, but that is + * ambiguous and will yield the wrong result if "tab" is also a plain column + * name in the query. + * + * Returns the attname of the Var, or NULL if the Var has no attname (because + * it is a whole-row Var or a subplan output reference). + */ +static char * +get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context) +{ + StringInfo buf = context->buf; + RangeTblEntry *rte; + AttrNumber attnum; + int varno; + AttrNumber varattno; + int netlevelsup; + deparse_namespace *dpns; + deparse_columns *colinfo; + char *refname; + char *attname; + + /* Find appropriate nesting depth */ + netlevelsup = var->varlevelsup + levelsup; + if (netlevelsup >= list_length(context->namespaces)) + elog(ERROR, "bogus varlevelsup: %d offset %d", + var->varlevelsup, levelsup); + dpns = (deparse_namespace *) list_nth(context->namespaces, + netlevelsup); + + varno = var->varno; + varattno = var->varattno; + + + if (var->varnosyn > 0 && var->varnosyn <= list_length(dpns->rtable) && dpns->plan == NULL) { + rte = rt_fetch(var->varnosyn, dpns->rtable); + + /* + * if the rte var->varnosyn points to is not a regular table and it is a join + * then the correct relname will be found with var->varnosyn and var->varattnosyn + */ + if (rte->rtekind == RTE_JOIN && rte->relid == 0 && var->varnosyn != var->varno) { + varno = var->varnosyn; + varattno = var->varattnosyn; + } + } + + /* + * Try to find the relevant RTE in this rtable. In a plan tree, it's + * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig + * down into the subplans, or INDEX_VAR, which is resolved similarly. Also + * find the aliases previously assigned for this RTE. + */ + if (varno >= 1 && varno <= list_length(dpns->rtable)) + { + + /* + * We might have been asked to map child Vars to some parent relation. + */ + if (context->appendparents && dpns->appendrels) + { + + int pvarno = varno; + AttrNumber pvarattno = varattno; + AppendRelInfo *appinfo = dpns->appendrels[pvarno]; + bool found = false; + + /* Only map up to inheritance parents, not UNION ALL appendrels */ + while (appinfo && + rt_fetch(appinfo->parent_relid, + dpns->rtable)->rtekind == RTE_RELATION) + { + found = false; + if (pvarattno > 0) /* system columns stay as-is */ + { + if (pvarattno > appinfo->num_child_cols) + break; /* safety check */ + pvarattno = appinfo->parent_colnos[pvarattno - 1]; + if (pvarattno == 0) + break; /* Var is local to child */ + } + + pvarno = appinfo->parent_relid; + found = true; + + /* If the parent is itself a child, continue up. */ + Assert(pvarno > 0 && pvarno <= list_length(dpns->rtable)); + appinfo = dpns->appendrels[pvarno]; + } + + /* + * If we found an ancestral rel, and that rel is included in + * appendparents, print that column not the original one. + */ + if (found && bms_is_member(pvarno, context->appendparents)) + { + varno = pvarno; + varattno = pvarattno; + } + } + + rte = rt_fetch(varno, dpns->rtable); + refname = (char *) list_nth(dpns->rtable_names, varno - 1); + colinfo = deparse_columns_fetch(varno, dpns); + attnum = varattno; + } + else + { + resolve_special_varno((Node *) var, context, get_special_variable, + NULL); + return NULL; + } + + /* + * The planner will sometimes emit Vars referencing resjunk elements of a + * subquery's target list (this is currently only possible if it chooses + * to generate a "physical tlist" for a SubqueryScan or CteScan node). + * Although we prefer to print subquery-referencing Vars using the + * subquery's alias, that's not possible for resjunk items since they have + * no alias. So in that case, drill down to the subplan and print the + * contents of the referenced tlist item. This works because in a plan + * tree, such Vars can only occur in a SubqueryScan or CteScan node, and + * we'll have set dpns->inner_plan to reference the child plan node. + */ + if ((rte->rtekind == RTE_SUBQUERY || rte->rtekind == RTE_CTE) && + attnum > list_length(rte->eref->colnames) && + dpns->inner_plan) + { + TargetEntry *tle; + deparse_namespace save_dpns; + + tle = get_tle_by_resno(dpns->inner_tlist, attnum); + if (!tle) + elog(ERROR, "invalid attnum %d for relation \"%s\"", + attnum, rte->eref->aliasname); + + Assert(netlevelsup == 0); + push_child_plan(dpns, dpns->inner_plan, &save_dpns); + + /* + * Force parentheses because our caller probably assumed a Var is a + * simple expression. + */ + if (!IsA(tle->expr, Var)) + appendStringInfoChar(buf, '('); + get_rule_expr((Node *) tle->expr, context, true); + if (!IsA(tle->expr, Var)) + appendStringInfoChar(buf, ')'); + + pop_child_plan(dpns, &save_dpns); + return NULL; + } + + /* + * If it's an unnamed join, look at the expansion of the alias variable. + * If it's a simple reference to one of the input vars, then recursively + * print the name of that var instead. When it's not a simple reference, + * we have to just print the unqualified join column name. (This can only + * happen with "dangerous" merged columns in a JOIN USING; we took pains + * previously to make the unqualified column name unique in such cases.) + * + * This wouldn't work in decompiling plan trees, because we don't store + * joinaliasvars lists after planning; but a plan tree should never + * contain a join alias variable. + */ + if (rte->rtekind == RTE_JOIN && rte->alias == NULL) + { + if (rte->joinaliasvars == NIL) + elog(ERROR, "cannot decompile join alias var in plan tree"); + if (attnum > 0) + { + Var *aliasvar; + + aliasvar = (Var *) list_nth(rte->joinaliasvars, attnum - 1); + /* we intentionally don't strip implicit coercions here */ + if (aliasvar && IsA(aliasvar, Var)) + { + return get_variable(aliasvar, var->varlevelsup + levelsup, + istoplevel, context); + } + } + + /* + * Unnamed join has no refname. (Note: since it's unnamed, there is + * no way the user could have referenced it to create a whole-row Var + * for it. So we don't have to cover that case below.) + */ + Assert(refname == NULL); + } + + if (attnum == InvalidAttrNumber) + attname = NULL; + else if (attnum > 0) + { + /* Get column name to use from the colinfo struct */ + if (attnum > colinfo->num_cols) + elog(ERROR, "invalid attnum %d for relation \"%s\"", + attnum, rte->eref->aliasname); + attname = colinfo->colnames[attnum - 1]; + + /* + * If we find a Var referencing a dropped column, it seems better to + * print something (anything) than to fail. In general this should + * not happen, but it used to be possible for some cases involving + * functions returning named composite types, and perhaps there are + * still bugs out there. + */ + if (attname == NULL) + attname = "?dropped?column?"; + } + else if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) + { + /* System column on a Citus shard */ + attname = get_attname(rte->relid, attnum, false); + } + else + { + /* System column - name is fixed, get it from the catalog */ + attname = get_rte_attribute_name(rte, attnum); + } + + if (refname && (context->varprefix || attname == NULL)) + { + appendStringInfoString(buf, quote_identifier(refname)); + appendStringInfoChar(buf, '.'); + } + if (attname) + appendStringInfoString(buf, quote_identifier(attname)); + else + { + appendStringInfoChar(buf, '*'); + + if (istoplevel) + { + if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) + { + /* use rel.*::shard_name instead of rel.*::table_name */ + appendStringInfo(buf, "::%s", + generate_rte_shard_name(rte)); + } + else + { + appendStringInfo(buf, "::%s", + format_type_with_typemod(var->vartype, + var->vartypmod)); + } + } + } + + return attname; +} + +/* + * Deparse a Var which references OUTER_VAR, INNER_VAR, or INDEX_VAR. This + * routine is actually a callback for get_special_varno, which handles finding + * the correct TargetEntry. We get the expression contained in that + * TargetEntry and just need to deparse it, a job we can throw back on + * get_rule_expr. + */ +static void +get_special_variable(Node *node, deparse_context *context, void *callback_arg) +{ + StringInfo buf = context->buf; + + /* + * For a non-Var referent, force parentheses because our caller probably + * assumed a Var is a simple expression. + */ + if (!IsA(node, Var)) + appendStringInfoChar(buf, '('); + get_rule_expr(node, context, true); + if (!IsA(node, Var)) + appendStringInfoChar(buf, ')'); +} + +/* + * Chase through plan references to special varnos (OUTER_VAR, INNER_VAR, + * INDEX_VAR) until we find a real Var or some kind of non-Var node; then, + * invoke the callback provided. + */ +static void +resolve_special_varno(Node *node, deparse_context *context, rsv_callback callback, void *callback_arg) +{ + Var *var; + deparse_namespace *dpns; + + /* This function is recursive, so let's be paranoid. */ + check_stack_depth(); + + /* If it's not a Var, invoke the callback. */ + if (!IsA(node, Var)) + { + (*callback) (node, context, callback_arg); + return; + } + + /* Find appropriate nesting depth */ + var = (Var *) node; + dpns = (deparse_namespace *) list_nth(context->namespaces, + var->varlevelsup); + + /* + * It's a special RTE, so recurse. + */ + if (var->varno == OUTER_VAR && dpns->outer_tlist) + { + TargetEntry *tle; + deparse_namespace save_dpns; + Bitmapset *save_appendparents; + + tle = get_tle_by_resno(dpns->outer_tlist, var->varattno); + if (!tle) + elog(ERROR, "bogus varattno for OUTER_VAR var: %d", var->varattno); + + /* If we're descending to the first child of an Append or MergeAppend, + * update appendparents. This will affect deparsing of all Vars + * appearing within the eventually-resolved subexpression. + */ + save_appendparents = context->appendparents; + + if (IsA(dpns->plan, Append)) + context->appendparents = bms_union(context->appendparents, + ((Append *) dpns->plan)->apprelids); + else if (IsA(dpns->plan, MergeAppend)) + context->appendparents = bms_union(context->appendparents, + ((MergeAppend *) dpns->plan)->apprelids); + + push_child_plan(dpns, dpns->outer_plan, &save_dpns); + resolve_special_varno((Node *) tle->expr, context, + callback, callback_arg); + pop_child_plan(dpns, &save_dpns); + context->appendparents = save_appendparents; + return; + } + else if (var->varno == INNER_VAR && dpns->inner_tlist) + { + TargetEntry *tle; + deparse_namespace save_dpns; + + tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); + if (!tle) + elog(ERROR, "bogus varattno for INNER_VAR var: %d", var->varattno); + + push_child_plan(dpns, dpns->inner_plan, &save_dpns); + resolve_special_varno((Node *) tle->expr, context, callback, callback_arg); + pop_child_plan(dpns, &save_dpns); + return; + } + else if (var->varno == INDEX_VAR && dpns->index_tlist) + { + TargetEntry *tle; + + tle = get_tle_by_resno(dpns->index_tlist, var->varattno); + if (!tle) + elog(ERROR, "bogus varattno for INDEX_VAR var: %d", var->varattno); + + resolve_special_varno((Node *) tle->expr, context, callback, callback_arg); + return; + } + else if (var->varno < 1 || var->varno > list_length(dpns->rtable)) + elog(ERROR, "bogus varno: %d", var->varno); + + /* Not special. Just invoke the callback. */ + (*callback) (node, context, callback_arg); +} + +/* + * Get the name of a field of an expression of composite type. The + * expression is usually a Var, but we handle other cases too. + * + * levelsup is an extra offset to interpret the Var's varlevelsup correctly. + * + * This is fairly straightforward when the expression has a named composite + * type; we need only look up the type in the catalogs. However, the type + * could also be RECORD. Since no actual table or view column is allowed to + * have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE + * or to a subquery output. We drill down to find the ultimate defining + * expression and attempt to infer the field name from it. We ereport if we + * can't determine the name. + * + * Similarly, a PARAM of type RECORD has to refer to some expression of + * a determinable composite type. + */ +static const char * +get_name_for_var_field(Var *var, int fieldno, + int levelsup, deparse_context *context) +{ + RangeTblEntry *rte; + AttrNumber attnum; + int netlevelsup; + deparse_namespace *dpns; + int varno; + AttrNumber varattno; + TupleDesc tupleDesc; + Node *expr; + + /* + * If it's a RowExpr that was expanded from a whole-row Var, use the + * column names attached to it. (We could let get_expr_result_tupdesc() + * handle this, but it's much cheaper to just pull out the name we need.) + */ + if (IsA(var, RowExpr)) + { + RowExpr *r = (RowExpr *) var; + + if (fieldno > 0 && fieldno <= list_length(r->colnames)) + return strVal(list_nth(r->colnames, fieldno - 1)); + } + + /* + * If it's a Param of type RECORD, try to find what the Param refers to. + */ + if (IsA(var, Param)) + { + Param *param = (Param *) var; + ListCell *ancestor_cell; + + expr = find_param_referent(param, context, &dpns, &ancestor_cell); + if (expr) + { + /* Found a match, so recurse to decipher the field name */ + deparse_namespace save_dpns; + const char *result; + + push_ancestor_plan(dpns, ancestor_cell, &save_dpns); + result = get_name_for_var_field((Var *) expr, fieldno, + 0, context); + pop_ancestor_plan(dpns, &save_dpns); + return result; + } + } + + /* + * If it's a Var of type RECORD, we have to find what the Var refers to; + * if not, we can use get_expr_result_tupdesc(). + */ + if (!IsA(var, Var) || + var->vartype != RECORDOID) + { + tupleDesc = get_expr_result_tupdesc((Node *) var, false); + /* Got the tupdesc, so we can extract the field name */ + Assert(fieldno >= 1 && fieldno <= tupleDesc->natts); + return NameStr(TupleDescAttr(tupleDesc, fieldno - 1)->attname); + } + + /* Find appropriate nesting depth */ + netlevelsup = var->varlevelsup + levelsup; + if (netlevelsup >= list_length(context->namespaces)) + elog(ERROR, "bogus varlevelsup: %d offset %d", + var->varlevelsup, levelsup); + dpns = (deparse_namespace *) list_nth(context->namespaces, + netlevelsup); + + varno = var->varno; + varattno = var->varattno; + + if (var->varnosyn > 0 && var->varnosyn <= list_length(dpns->rtable) && dpns->plan == NULL) { + rte = rt_fetch(var->varnosyn, dpns->rtable); + + /* + * if the rte var->varnosyn points to is not a regular table and it is a join + * then the correct relname will be found with var->varnosyn and var->varattnosyn + */ + if (rte->rtekind == RTE_JOIN && rte->relid == 0 && var->varnosyn != var->varno) { + varno = var->varnosyn; + varattno = var->varattnosyn; + } + } + + /* + * Try to find the relevant RTE in this rtable. In a plan tree, it's + * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig + * down into the subplans, or INDEX_VAR, which is resolved similarly. + */ + if (varno >= 1 && varno <= list_length(dpns->rtable)) + { + rte = rt_fetch(varno, dpns->rtable); + attnum = varattno; + } + else if (varno == OUTER_VAR && dpns->outer_tlist) + { + TargetEntry *tle; + deparse_namespace save_dpns; + const char *result; + + tle = get_tle_by_resno(dpns->outer_tlist, varattno); + if (!tle) + elog(ERROR, "bogus varattno for OUTER_VAR var: %d", varattno); + + Assert(netlevelsup == 0); + push_child_plan(dpns, dpns->outer_plan, &save_dpns); + + result = get_name_for_var_field((Var *) tle->expr, fieldno, + levelsup, context); + + pop_child_plan(dpns, &save_dpns); + return result; + } + else if (varno == INNER_VAR && dpns->inner_tlist) + { + TargetEntry *tle; + deparse_namespace save_dpns; + const char *result; + + tle = get_tle_by_resno(dpns->inner_tlist, varattno); + if (!tle) + elog(ERROR, "bogus varattno for INNER_VAR var: %d", varattno); + + Assert(netlevelsup == 0); + push_child_plan(dpns, dpns->inner_plan, &save_dpns); + + result = get_name_for_var_field((Var *) tle->expr, fieldno, + levelsup, context); + + pop_child_plan(dpns, &save_dpns); + return result; + } + else if (varno == INDEX_VAR && dpns->index_tlist) + { + TargetEntry *tle; + const char *result; + + tle = get_tle_by_resno(dpns->index_tlist, varattno); + if (!tle) + elog(ERROR, "bogus varattno for INDEX_VAR var: %d", varattno); + + Assert(netlevelsup == 0); + + result = get_name_for_var_field((Var *) tle->expr, fieldno, + levelsup, context); + + return result; + } + else + { + elog(ERROR, "bogus varno: %d", varno); + return NULL; /* keep compiler quiet */ + } + + if (attnum == InvalidAttrNumber) + { + /* Var is whole-row reference to RTE, so select the right field */ + return get_rte_attribute_name(rte, fieldno); + } + + /* + * This part has essentially the same logic as the parser's + * expandRecordVariable() function, but we are dealing with a different + * representation of the input context, and we only need one field name + * not a TupleDesc. Also, we need special cases for finding subquery and + * CTE subplans when deparsing Plan trees. + */ + expr = (Node *) var; /* default if we can't drill down */ + + switch (rte->rtekind) + { + case RTE_RELATION: + case RTE_VALUES: + case RTE_NAMEDTUPLESTORE: + case RTE_RESULT: + + /* + * This case should not occur: a column of a table or values list + * shouldn't have type RECORD. Fall through and fail (most + * likely) at the bottom. + */ + break; + case RTE_SUBQUERY: + /* Subselect-in-FROM: examine sub-select's output expr */ + { + if (rte->subquery) + { + TargetEntry *ste = get_tle_by_resno(rte->subquery->targetList, + attnum); + + if (ste == NULL || ste->resjunk) + elog(ERROR, "subquery %s does not have attribute %d", + rte->eref->aliasname, attnum); + expr = (Node *) ste->expr; + if (IsA(expr, Var)) + { + /* + * Recurse into the sub-select to see what its Var + * refers to. We have to build an additional level of + * namespace to keep in step with varlevelsup in the + * subselect; furthermore, the subquery RTE might be + * from an outer query level, in which case the + * namespace for the subselect must have that outer + * level as parent namespace. + */ + List *save_nslist = context->namespaces; + List *parent_namespaces; + deparse_namespace mydpns; + const char *result; + + parent_namespaces = list_copy_tail(context->namespaces, + netlevelsup); + + set_deparse_for_query(&mydpns, rte->subquery, + parent_namespaces); + + context->namespaces = lcons(&mydpns, + parent_namespaces); + + result = get_name_for_var_field((Var *) expr, fieldno, + 0, context); + + context->namespaces = save_nslist; + + return result; + } + /* else fall through to inspect the expression */ + } + else + { + /* + * We're deparsing a Plan tree so we don't have complete + * RTE entries (in particular, rte->subquery is NULL). But + * the only place we'd see a Var directly referencing a + * SUBQUERY RTE is in a SubqueryScan plan node, and we can + * look into the child plan's tlist instead. + */ + TargetEntry *tle; + deparse_namespace save_dpns; + const char *result; + + if (!dpns->inner_plan) + elog(ERROR, "failed to find plan for subquery %s", + rte->eref->aliasname); + tle = get_tle_by_resno(dpns->inner_tlist, attnum); + if (!tle) + elog(ERROR, "bogus varattno for subquery var: %d", + attnum); + Assert(netlevelsup == 0); + push_child_plan(dpns, dpns->inner_plan, &save_dpns); + + result = get_name_for_var_field((Var *) tle->expr, fieldno, + levelsup, context); + + pop_child_plan(dpns, &save_dpns); + return result; + } + } + break; + case RTE_JOIN: + /* Join RTE --- recursively inspect the alias variable */ + if (rte->joinaliasvars == NIL) + elog(ERROR, "cannot decompile join alias var in plan tree"); + Assert(attnum > 0 && attnum <= list_length(rte->joinaliasvars)); + expr = (Node *) list_nth(rte->joinaliasvars, attnum - 1); + Assert(expr != NULL); + /* we intentionally don't strip implicit coercions here */ + if (IsA(expr, Var)) + return get_name_for_var_field((Var *) expr, fieldno, + var->varlevelsup + levelsup, + context); + /* else fall through to inspect the expression */ + break; + case RTE_FUNCTION: + case RTE_TABLEFUNC: + + /* + * We couldn't get here unless a function is declared with one of + * its result columns as RECORD, which is not allowed. + */ + break; + case RTE_CTE: + /* CTE reference: examine subquery's output expr */ + { + CommonTableExpr *cte = NULL; + Index ctelevelsup; + ListCell *lc; + + /* + * Try to find the referenced CTE using the namespace stack. + */ + ctelevelsup = rte->ctelevelsup + netlevelsup; + if (ctelevelsup >= list_length(context->namespaces)) + lc = NULL; + else + { + deparse_namespace *ctedpns; + + ctedpns = (deparse_namespace *) + list_nth(context->namespaces, ctelevelsup); + foreach(lc, ctedpns->ctes) + { + cte = (CommonTableExpr *) lfirst(lc); + if (strcmp(cte->ctename, rte->ctename) == 0) + break; + } + } + if (lc != NULL) + { + Query *ctequery = (Query *) cte->ctequery; + TargetEntry *ste = get_tle_by_resno(GetCTETargetList(cte), + attnum); + + if (ste == NULL || ste->resjunk) + elog(ERROR, "CTE %s does not have attribute %d", + rte->eref->aliasname, attnum); + expr = (Node *) ste->expr; + if (IsA(expr, Var)) + { + /* + * Recurse into the CTE to see what its Var refers to. + * We have to build an additional level of namespace + * to keep in step with varlevelsup in the CTE; + * furthermore it could be an outer CTE (compare + * SUBQUERY case above). + */ + List *save_nslist = context->namespaces; + List *parent_namespaces; + deparse_namespace mydpns; + const char *result; + + parent_namespaces = list_copy_tail(context->namespaces, + ctelevelsup); + + set_deparse_for_query(&mydpns, ctequery, + parent_namespaces); + + context->namespaces = lcons(&mydpns, parent_namespaces); + + result = get_name_for_var_field((Var *) expr, fieldno, + 0, context); + + context->namespaces = save_nslist; + + return result; + } + /* else fall through to inspect the expression */ + } + else + { + /* + * We're deparsing a Plan tree so we don't have a CTE + * list. But the only places we'd see a Var directly + * referencing a CTE RTE are in CteScan or WorkTableScan + * plan nodes. For those cases, set_deparse_plan arranged + * for dpns->inner_plan to be the plan node that emits the + * CTE or RecursiveUnion result, and we can look at its + * tlist instead. + */ + TargetEntry *tle; + deparse_namespace save_dpns; + const char *result; + + if (!dpns->inner_plan) + elog(ERROR, "failed to find plan for CTE %s", + rte->eref->aliasname); + tle = get_tle_by_resno(dpns->inner_tlist, attnum); + if (!tle) + elog(ERROR, "bogus varattno for subquery var: %d", + attnum); + Assert(netlevelsup == 0); + push_child_plan(dpns, dpns->inner_plan, &save_dpns); + + result = get_name_for_var_field((Var *) tle->expr, fieldno, + levelsup, context); + + pop_child_plan(dpns, &save_dpns); + return result; + } + } + break; + } + + /* + * We now have an expression we can't expand any more, so see if + * get_expr_result_tupdesc() can do anything with it. + */ + tupleDesc = get_expr_result_tupdesc(expr, false); + /* Got the tupdesc, so we can extract the field name */ + Assert(fieldno >= 1 && fieldno <= tupleDesc->natts); + return NameStr(TupleDescAttr(tupleDesc, fieldno - 1)->attname); +} + +/* + * Try to find the referenced expression for a PARAM_EXEC Param that might + * reference a parameter supplied by an upper NestLoop or SubPlan plan node. + * + * If successful, return the expression and set *dpns_p and *ancestor_cell_p + * appropriately for calling push_ancestor_plan(). If no referent can be + * found, return NULL. + */ +static Node * +find_param_referent(Param *param, deparse_context *context, + deparse_namespace **dpns_p, ListCell **ancestor_cell_p) +{ + /* Initialize output parameters to prevent compiler warnings */ + *dpns_p = NULL; + *ancestor_cell_p = NULL; + + /* + * If it's a PARAM_EXEC parameter, look for a matching NestLoopParam or + * SubPlan argument. This will necessarily be in some ancestor of the + * current expression's Plan. + */ + if (param->paramkind == PARAM_EXEC) + { + deparse_namespace *dpns; + Plan *child_plan; + ListCell *lc; + + dpns = (deparse_namespace *) linitial(context->namespaces); + child_plan = dpns->plan; + + foreach(lc, dpns->ancestors) + { + Node *ancestor = (Node *) lfirst(lc); + ListCell *lc2; + + /* + * NestLoops transmit params to their inner child only. + */ + if (IsA(ancestor, NestLoop) && + child_plan == innerPlan(ancestor)) + { + NestLoop *nl = (NestLoop *) ancestor; + + foreach(lc2, nl->nestParams) + { + NestLoopParam *nlp = (NestLoopParam *) lfirst(lc2); + + if (nlp->paramno == param->paramid) + { + /* Found a match, so return it */ + *dpns_p = dpns; + *ancestor_cell_p = lc; + return (Node *) nlp->paramval; + } + } + } + + /* + * Check to see if we're crawling up from a subplan. + */ + if(IsA(ancestor, SubPlan)) + { + SubPlan *subplan = (SubPlan *) ancestor; + ListCell *lc3; + ListCell *lc4; + + /* Matched subplan, so check its arguments */ + forboth(lc3, subplan->parParam, lc4, subplan->args) + { + int paramid = lfirst_int(lc3); + Node *arg = (Node *) lfirst(lc4); + + if (paramid == param->paramid) + { + /* + * Found a match, so return it. But, since Vars in + * the arg are to be evaluated in the surrounding + * context, we have to point to the next ancestor item + * that is *not* a SubPlan. + */ + ListCell *rest; + + for_each_cell(rest, dpns->ancestors, + lnext(dpns->ancestors, lc)) + { + Node *ancestor2 = (Node *) lfirst(rest); + + if (!IsA(ancestor2, SubPlan)) + { + *dpns_p = dpns; + *ancestor_cell_p = rest; + return arg; + } + } + elog(ERROR, "SubPlan cannot be outermost ancestor"); + } + } + + /* SubPlan isn't a kind of Plan, so skip the rest */ + continue; + } + + /* + * We need not consider the ancestor's initPlan list, since + * initplans never have any parParams. + */ + + /* No luck, crawl up to next ancestor */ + child_plan = (Plan *) ancestor; + } + } + + /* No referent found */ + return NULL; +} + +/* + * Try to find a subplan/initplan that emits the value for a PARAM_EXEC Param. + * + * If successful, return the generating subplan/initplan and set *column_p + * to the subplan's 0-based output column number. + * Otherwise, return NULL. + */ +static SubPlan * +find_param_generator(Param *param, deparse_context *context, int *column_p) +{ + /* Initialize output parameter to prevent compiler warnings */ + *column_p = 0; + + /* + * If it's a PARAM_EXEC parameter, search the current plan node as well as + * ancestor nodes looking for a subplan or initplan that emits the value + * for the Param. It could appear in the setParams of an initplan or + * MULTIEXPR_SUBLINK subplan, or in the paramIds of an ancestral SubPlan. + */ + if (param->paramkind == PARAM_EXEC) + { + SubPlan *result; + deparse_namespace *dpns; + ListCell *lc; + + dpns = (deparse_namespace *) linitial(context->namespaces); + + /* First check the innermost plan node's initplans */ + result = find_param_generator_initplan(param, dpns->plan, column_p); + if (result) + return result; + + /* + * The plan's targetlist might contain MULTIEXPR_SUBLINK SubPlans, + * which can be referenced by Params elsewhere in the targetlist. + * (Such Params should always be in the same targetlist, so there's no + * need to do this work at upper plan nodes.) + */ + foreach_node(TargetEntry, tle, dpns->plan->targetlist) + { + if (tle->expr && IsA(tle->expr, SubPlan)) + { + SubPlan *subplan = (SubPlan *) tle->expr; + + if (subplan->subLinkType == MULTIEXPR_SUBLINK) + { + foreach_int(paramid, subplan->setParam) + { + if (paramid == param->paramid) + { + /* Found a match, so return it. */ + *column_p = foreach_current_index(paramid); + return subplan; + } + } + } + } + } + + /* No luck, so check the ancestor nodes */ + foreach(lc, dpns->ancestors) + { + Node *ancestor = (Node *) lfirst(lc); + + /* + * If ancestor is a SubPlan, check the paramIds it provides. + */ + if (IsA(ancestor, SubPlan)) + { + SubPlan *subplan = (SubPlan *) ancestor; + + foreach_int(paramid, subplan->paramIds) + { + if (paramid == param->paramid) + { + /* Found a match, so return it. */ + *column_p = foreach_current_index(paramid); + return subplan; + } + } + + /* SubPlan isn't a kind of Plan, so skip the rest */ + continue; + } + + /* + * Otherwise, it's some kind of Plan node, so check its initplans. + */ + result = find_param_generator_initplan(param, (Plan *) ancestor, + column_p); + if (result) + return result; + + /* No luck, crawl up to next ancestor */ + } + } + + /* No generator found */ + return NULL; +} + +/* + * Subroutine for find_param_generator: search one Plan node's initplans + */ +static SubPlan * +find_param_generator_initplan(Param *param, Plan *plan, int *column_p) +{ + foreach_node(SubPlan, subplan, plan->initPlan) + { + foreach_int(paramid, subplan->setParam) + { + if (paramid == param->paramid) + { + /* Found a match, so return it. */ + *column_p = foreach_current_index(paramid); + return subplan; + } + } + } + return NULL; +} + +/* + * Display a Param appropriately. + */ +static void +get_parameter(Param *param, deparse_context *context) +{ + Node *expr; + deparse_namespace *dpns; + ListCell *ancestor_cell; + SubPlan *subplan; + int column; + + /* + * If it's a PARAM_EXEC parameter, try to locate the expression from which + * the parameter was computed. This stanza handles only cases in which + * the Param represents an input to the subplan we are currently in. + */ + expr = find_param_referent(param, context, &dpns, &ancestor_cell); + if (expr) + { + /* Found a match, so print it */ + deparse_namespace save_dpns; + bool save_varprefix; + bool need_paren; + + /* Switch attention to the ancestor plan node */ + push_ancestor_plan(dpns, ancestor_cell, &save_dpns); + + /* + * Force prefixing of Vars, since they won't belong to the relation + * being scanned in the original plan node. + */ + save_varprefix = context->varprefix; + context->varprefix = true; + + /* + * A Param's expansion is typically a Var, Aggref, GroupingFunc, or + * upper-level Param, which wouldn't need extra parentheses. + * Otherwise, insert parens to ensure the expression looks atomic. + */ + need_paren = !(IsA(expr, Var) || + IsA(expr, Aggref) || + IsA(expr, GroupingFunc) || + IsA(expr, Param)); + if (need_paren) + appendStringInfoChar(context->buf, '('); + + get_rule_expr(expr, context, false); + + if (need_paren) + appendStringInfoChar(context->buf, ')'); + + context->varprefix = save_varprefix; + + pop_ancestor_plan(dpns, &save_dpns); + + return; + } + + /* + * Alternatively, maybe it's a subplan output, which we print as a + * reference to the subplan. (We could drill down into the subplan and + * print the relevant targetlist expression, but that has been deemed too + * confusing since it would violate normal SQL scope rules. Also, we're + * relying on this reference to show that the testexpr containing the + * Param has anything to do with that subplan at all.) + */ + subplan = find_param_generator(param, context, &column); + if (subplan) + { + appendStringInfo(context->buf, "(%s%s).col%d", + subplan->useHashTable ? "hashed " : "", + subplan->plan_name, column + 1); + + return; + } + + /* + * If it's an external parameter, see if the outermost namespace provides + * function argument names. + */ + if (param->paramkind == PARAM_EXTERN && context->namespaces != NIL) + { + dpns = llast(context->namespaces); + if (dpns->argnames && + param->paramid > 0 && + param->paramid <= dpns->numargs) + { + char *argname = dpns->argnames[param->paramid - 1]; + + if (argname) + { + bool should_qualify = false; + ListCell *lc; + + /* + * Qualify the parameter name if there are any other deparse + * namespaces with range tables. This avoids qualifying in + * trivial cases like "RETURN a + b", but makes it safe in all + * other cases. + */ + foreach(lc, context->namespaces) + { + deparse_namespace *depns = lfirst(lc); + + if (depns->rtable_names != NIL) + { + should_qualify = true; + break; + } + } + if (should_qualify) + { + appendStringInfoString(context->buf, quote_identifier(dpns->funcname)); + appendStringInfoChar(context->buf, '.'); + } + + appendStringInfoString(context->buf, quote_identifier(argname)); + return; + } + } + } + + /* + * Not PARAM_EXEC, or couldn't find referent: for base types just print $N. + * For composite types, add cast to the parameter to ease remote node detect + * the type. + * + * It's a bug if we get here for anything except PARAM_EXTERN Params, but + * in production builds printing $N seems more useful than failing. + */ + Assert(param->paramkind == PARAM_EXTERN); + + if (param->paramtype >= FirstNormalObjectId) + { + char *typeName = format_type_with_typemod(param->paramtype, param->paramtypmod); + + appendStringInfo(context->buf, "$%d::%s", param->paramid, typeName); + } + else + { + appendStringInfo(context->buf, "$%d", param->paramid); + } +} + +/* + * get_simple_binary_op_name + * + * helper function for isSimpleNode + * will return single char binary operator name, or NULL if it's not + */ +static const char * +get_simple_binary_op_name(OpExpr *expr) +{ + List *args = expr->args; + + if (list_length(args) == 2) + { + /* binary operator */ + Node *arg1 = (Node *) linitial(args); + Node *arg2 = (Node *) lsecond(args); + const char *op; + + op = generate_operator_name(expr->opno, exprType(arg1), exprType(arg2)); + if (strlen(op) == 1) + return op; + } + return NULL; +} + +/* + * isSimpleNode - check if given node is simple (doesn't need parenthesizing) + * + * true : simple in the context of parent node's type + * false : not simple + */ +static bool +isSimpleNode(Node *node, Node *parentNode, int prettyFlags) +{ + if (!node) + return false; + + switch (nodeTag(node)) + { + case T_Var: + case T_Const: + case T_Param: + case T_CoerceToDomainValue: + case T_SetToDefault: + case T_CurrentOfExpr: + /* single words: always simple */ + return true; + + case T_SubscriptingRef: + case T_ArrayExpr: + case T_RowExpr: + case T_CoalesceExpr: + case T_MinMaxExpr: + case T_SQLValueFunction: + case T_XmlExpr: + case T_NextValueExpr: + case T_NullIfExpr: + case T_Aggref: + case T_GroupingFunc: + case T_WindowFunc: + case T_MergeSupportFunc: + case T_FuncExpr: + case T_JsonConstructorExpr: + case T_JsonExpr: + /* function-like: name(..) or name[..] */ + return true; + + /* CASE keywords act as parentheses */ + case T_CaseExpr: + return true; + + case T_FieldSelect: + + /* + * appears simple since . has top precedence, unless parent is + * T_FieldSelect itself! + */ + return !IsA(parentNode, FieldSelect); + + case T_FieldStore: + + /* + * treat like FieldSelect (probably doesn't matter) + */ + return !IsA(parentNode, FieldStore); + + case T_CoerceToDomain: + /* maybe simple, check args */ + return isSimpleNode((Node *) ((CoerceToDomain *) node)->arg, + node, prettyFlags); + case T_RelabelType: + return isSimpleNode((Node *) ((RelabelType *) node)->arg, + node, prettyFlags); + case T_CoerceViaIO: + return isSimpleNode((Node *) ((CoerceViaIO *) node)->arg, + node, prettyFlags); + case T_ArrayCoerceExpr: + return isSimpleNode((Node *) ((ArrayCoerceExpr *) node)->arg, + node, prettyFlags); + case T_ConvertRowtypeExpr: + return isSimpleNode((Node *) ((ConvertRowtypeExpr *) node)->arg, + node, prettyFlags); + + case T_OpExpr: + { + /* depends on parent node type; needs further checking */ + if (prettyFlags & PRETTYFLAG_PAREN && IsA(parentNode, OpExpr)) + { + const char *op; + const char *parentOp; + bool is_lopriop; + bool is_hipriop; + bool is_lopriparent; + bool is_hipriparent; + + op = get_simple_binary_op_name((OpExpr *) node); + if (!op) + return false; + + /* We know only the basic operators + - and * / % */ + is_lopriop = (strchr("+-", *op) != NULL); + is_hipriop = (strchr("*/%", *op) != NULL); + if (!(is_lopriop || is_hipriop)) + return false; + + parentOp = get_simple_binary_op_name((OpExpr *) parentNode); + if (!parentOp) + return false; + + is_lopriparent = (strchr("+-", *parentOp) != NULL); + is_hipriparent = (strchr("*/%", *parentOp) != NULL); + if (!(is_lopriparent || is_hipriparent)) + return false; + + if (is_hipriop && is_lopriparent) + return true; /* op binds tighter than parent */ + + if (is_lopriop && is_hipriparent) + return false; + + /* + * Operators are same priority --- can skip parens only if + * we have (a - b) - c, not a - (b - c). + */ + if (node == (Node *) linitial(((OpExpr *) parentNode)->args)) + return true; + + return false; + } + /* else do the same stuff as for T_SubLink et al. */ + } + /* FALLTHROUGH */ + + case T_SubLink: + case T_NullTest: + case T_BooleanTest: + case T_DistinctExpr: + case T_JsonIsPredicate: + switch (nodeTag(parentNode)) + { + case T_FuncExpr: + { + /* special handling for casts and COERCE_SQL_SYNTAX */ + CoercionForm type = ((FuncExpr *) parentNode)->funcformat; + + if (type == COERCE_EXPLICIT_CAST || + type == COERCE_IMPLICIT_CAST || + type == COERCE_SQL_SYNTAX) + return false; + return true; /* own parentheses */ + } + case T_BoolExpr: /* lower precedence */ + case T_SubscriptingRef: /* other separators */ + case T_ArrayExpr: /* other separators */ + case T_RowExpr: /* other separators */ + case T_CoalesceExpr: /* own parentheses */ + case T_MinMaxExpr: /* own parentheses */ + case T_XmlExpr: /* own parentheses */ + case T_NullIfExpr: /* other separators */ + case T_Aggref: /* own parentheses */ + case T_GroupingFunc: /* own parentheses */ + case T_WindowFunc: /* own parentheses */ + case T_CaseExpr: /* other separators */ + return true; + default: + return false; + } + + case T_BoolExpr: + switch (nodeTag(parentNode)) + { + case T_BoolExpr: + if (prettyFlags & PRETTYFLAG_PAREN) + { + BoolExprType type; + BoolExprType parentType; + + type = ((BoolExpr *) node)->boolop; + parentType = ((BoolExpr *) parentNode)->boolop; + switch (type) + { + case NOT_EXPR: + case AND_EXPR: + if (parentType == AND_EXPR || parentType == OR_EXPR) + return true; + break; + case OR_EXPR: + if (parentType == OR_EXPR) + return true; + break; + } + } + return false; + case T_FuncExpr: + { + /* special handling for casts and COERCE_SQL_SYNTAX */ + CoercionForm type = ((FuncExpr *) parentNode)->funcformat; + + if (type == COERCE_EXPLICIT_CAST || + type == COERCE_IMPLICIT_CAST || + type == COERCE_SQL_SYNTAX) + return false; + return true; /* own parentheses */ + } + case T_SubscriptingRef: /* other separators */ + case T_ArrayExpr: /* other separators */ + case T_RowExpr: /* other separators */ + case T_CoalesceExpr: /* own parentheses */ + case T_MinMaxExpr: /* own parentheses */ + case T_XmlExpr: /* own parentheses */ + case T_NullIfExpr: /* other separators */ + case T_Aggref: /* own parentheses */ + case T_GroupingFunc: /* own parentheses */ + case T_WindowFunc: /* own parentheses */ + case T_CaseExpr: /* other separators */ + case T_JsonExpr: /* own parentheses */ + return true; + default: + return false; + } + + case T_JsonValueExpr: + /* maybe simple, check args */ + return isSimpleNode((Node *) ((JsonValueExpr *) node)->raw_expr, + node, prettyFlags); + + default: + break; + } + /* those we don't know: in dubio complexo */ + return false; +} + +/* + * appendContextKeyword - append a keyword to buffer + * + * If prettyPrint is enabled, perform a line break, and adjust indentation. + * Otherwise, just append the keyword. + */ +static void +appendContextKeyword(deparse_context *context, const char *str, + int indentBefore, int indentAfter, int indentPlus) +{ + StringInfo buf = context->buf; + + if (PRETTY_INDENT(context)) + { + int indentAmount; + + context->indentLevel += indentBefore; + + /* remove any trailing spaces currently in the buffer ... */ + removeStringInfoSpaces(buf); + /* ... then add a newline and some spaces */ + appendStringInfoChar(buf, '\n'); + + if (context->indentLevel < PRETTYINDENT_LIMIT) + indentAmount = Max(context->indentLevel, 0) + indentPlus; + else + { + /* + * If we're indented more than PRETTYINDENT_LIMIT characters, try + * to conserve horizontal space by reducing the per-level + * indentation. For best results the scale factor here should + * divide all the indent amounts that get added to indentLevel + * (PRETTYINDENT_STD, etc). It's important that the indentation + * not grow unboundedly, else deeply-nested trees use O(N^2) + * whitespace; so we also wrap modulo PRETTYINDENT_LIMIT. + */ + indentAmount = PRETTYINDENT_LIMIT + + (context->indentLevel - PRETTYINDENT_LIMIT) / + (PRETTYINDENT_STD / 2); + indentAmount %= PRETTYINDENT_LIMIT; + /* scale/wrap logic affects indentLevel, but not indentPlus */ + indentAmount += indentPlus; + } + appendStringInfoSpaces(buf, indentAmount); + + appendStringInfoString(buf, str); + + context->indentLevel += indentAfter; + if (context->indentLevel < 0) + context->indentLevel = 0; + } + else + appendStringInfoString(buf, str); +} + +/* + * removeStringInfoSpaces - delete trailing spaces from a buffer. + * + * Possibly this should move to stringinfo.c at some point. + */ +static void +removeStringInfoSpaces(StringInfo str) +{ + while (str->len > 0 && str->data[str->len - 1] == ' ') + str->data[--(str->len)] = '\0'; +} + +/* + * get_rule_expr_paren - deparse expr using get_rule_expr, + * embracing the string with parentheses if necessary for prettyPrint. + * + * Never embrace if prettyFlags=0, because it's done in the calling node. + * + * Any node that does *not* embrace its argument node by sql syntax (with + * parentheses, non-operator keywords like CASE/WHEN/ON, or comma etc) should + * use get_rule_expr_paren instead of get_rule_expr so parentheses can be + * added. + */ +static void +get_rule_expr_paren(Node *node, deparse_context *context, + bool showimplicit, Node *parentNode) +{ + bool need_paren; + + need_paren = PRETTY_PAREN(context) && + !isSimpleNode(node, parentNode, context->prettyFlags); + + if (need_paren) + appendStringInfoChar(context->buf, '('); + + get_rule_expr(node, context, showimplicit); + + if (need_paren) + appendStringInfoChar(context->buf, ')'); +} + +static void +get_json_behavior(JsonBehavior *behavior, deparse_context *context, + const char *on) +{ + /* + * The order of array elements must correspond to the order of + * JsonBehaviorType members. + */ + const char *behavior_names[] = + { + " NULL", + " ERROR", + " EMPTY", + " TRUE", + " FALSE", + " UNKNOWN", + " EMPTY ARRAY", + " EMPTY OBJECT", + " DEFAULT " + }; + + if ((int) behavior->btype < 0 || behavior->btype >= lengthof(behavior_names)) + elog(ERROR, "invalid json behavior type: %d", behavior->btype); + + appendStringInfoString(context->buf, behavior_names[behavior->btype]); + + if (behavior->btype == JSON_BEHAVIOR_DEFAULT) + get_rule_expr(behavior->expr, context, false); + + appendStringInfo(context->buf, " ON %s", on); +} + +/* + * get_json_expr_options + * + * Parse back common options for JSON_QUERY, JSON_VALUE, JSON_EXISTS and + * JSON_TABLE columns. + */ +static void +get_json_expr_options(JsonExpr *jsexpr, deparse_context *context, + JsonBehaviorType default_behavior) +{ + if (jsexpr->op == JSON_QUERY_OP) + { + if (jsexpr->wrapper == JSW_CONDITIONAL) + appendStringInfoString(context->buf, " WITH CONDITIONAL WRAPPER"); + else if (jsexpr->wrapper == JSW_UNCONDITIONAL) + appendStringInfoString(context->buf, " WITH UNCONDITIONAL WRAPPER"); + /* The default */ + else if (jsexpr->wrapper == JSW_NONE || jsexpr->wrapper == JSW_UNSPEC) + appendStringInfoString(context->buf, " WITHOUT WRAPPER"); + + if (jsexpr->omit_quotes) + appendStringInfoString(context->buf, " OMIT QUOTES"); + /* The default */ + else + appendStringInfoString(context->buf, " KEEP QUOTES"); + } + + if (jsexpr->on_empty && jsexpr->on_empty->btype != default_behavior) + get_json_behavior(jsexpr->on_empty, context, "EMPTY"); + + if (jsexpr->on_error && jsexpr->on_error->btype != default_behavior) + get_json_behavior(jsexpr->on_error, context, "ERROR"); +} + +/* ---------- + * get_rule_expr - Parse back an expression + * + * Note: showimplicit determines whether we display any implicit cast that + * is present at the top of the expression tree. It is a passed argument, + * not a field of the context struct, because we change the value as we + * recurse down into the expression. In general we suppress implicit casts + * when the result type is known with certainty (eg, the arguments of an + * OR must be boolean). We display implicit casts for arguments of functions + * and operators, since this is needed to be certain that the same function + * or operator will be chosen when the expression is re-parsed. + * ---------- + */ +static void +get_rule_expr(Node *node, deparse_context *context, + bool showimplicit) +{ + StringInfo buf = context->buf; + + if (node == NULL) + return; + + /* Guard against excessively long or deeply-nested queries */ + CHECK_FOR_INTERRUPTS(); + check_stack_depth(); + + /* + * Each level of get_rule_expr must emit an indivisible term + * (parenthesized if necessary) to ensure result is reparsed into the same + * expression tree. The only exception is that when the input is a List, + * we emit the component items comma-separated with no surrounding + * decoration; this is convenient for most callers. + */ + switch (nodeTag(node)) + { + case T_Var: + (void) get_variable((Var *) node, 0, false, context); + break; + + case T_Const: + get_const_expr((Const *) node, context, 0); + break; + + case T_Param: + get_parameter((Param *) node, context); + break; + + case T_Aggref: + get_agg_expr((Aggref *) node, context, (Aggref *) node); + break; + + case T_GroupingFunc: + { + GroupingFunc *gexpr = (GroupingFunc *) node; + + appendStringInfoString(buf, "GROUPING("); + get_rule_expr((Node *) gexpr->args, context, true); + appendStringInfoChar(buf, ')'); + } + break; + + case T_WindowFunc: + get_windowfunc_expr((WindowFunc *) node, context); + break; + + case T_MergeSupportFunc: + appendStringInfoString(buf, "MERGE_ACTION()"); + break; + + case T_SubscriptingRef: + { + SubscriptingRef *sbsref = (SubscriptingRef *) node; + bool need_parens; + + /* + * If the argument is a CaseTestExpr, we must be inside a + * FieldStore, ie, we are assigning to an element of an array + * within a composite column. Since we already punted on + * displaying the FieldStore's target information, just punt + * here too, and display only the assignment source + * expression. + */ + if (IsA(sbsref->refexpr, CaseTestExpr)) + { + Assert(sbsref->refassgnexpr); + get_rule_expr((Node *) sbsref->refassgnexpr, + context, showimplicit); + break; + } + + /* + * Parenthesize the argument unless it's a simple Var or a + * FieldSelect. (In particular, if it's another + * SubscriptingRef, we *must* parenthesize to avoid + * confusion.) + */ + need_parens = !IsA(sbsref->refexpr, Var) && + !IsA(sbsref->refexpr, FieldSelect); + if (need_parens) + appendStringInfoChar(buf, '('); + get_rule_expr((Node *) sbsref->refexpr, context, showimplicit); + if (need_parens) + appendStringInfoChar(buf, ')'); + + /* + * If there's a refassgnexpr, we want to print the node in the + * format "container[subscripts] := refassgnexpr". This is + * not legal SQL, so decompilation of INSERT or UPDATE + * statements should always use processIndirection as part of + * the statement-level syntax. We should only see this when + * EXPLAIN tries to print the targetlist of a plan resulting + * from such a statement. + */ + if (sbsref->refassgnexpr) + { + Node *refassgnexpr; + + /* + * Use processIndirection to print this node's subscripts + * as well as any additional field selections or + * subscripting in immediate descendants. It returns the + * RHS expr that is actually being "assigned". + */ + refassgnexpr = processIndirection(node, context); + appendStringInfoString(buf, " := "); + get_rule_expr(refassgnexpr, context, showimplicit); + } + else + { + /* Just an ordinary container fetch, so print subscripts */ + printSubscripts(sbsref, context); + } + } + break; + + case T_FuncExpr: + get_func_expr((FuncExpr *) node, context, showimplicit); + break; + + case T_NamedArgExpr: + { + NamedArgExpr *na = (NamedArgExpr *) node; + + appendStringInfo(buf, "%s => ", quote_identifier(na->name)); + get_rule_expr((Node *) na->arg, context, showimplicit); + } + break; + + case T_OpExpr: + get_oper_expr((OpExpr *) node, context); + break; + + case T_DistinctExpr: + { + DistinctExpr *expr = (DistinctExpr *) node; + List *args = expr->args; + Node *arg1 = (Node *) linitial(args); + Node *arg2 = (Node *) lsecond(args); + + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, '('); + get_rule_expr_paren(arg1, context, true, node); + appendStringInfoString(buf, " IS DISTINCT FROM "); + get_rule_expr_paren(arg2, context, true, node); + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, ')'); + } + break; + + case T_NullIfExpr: + { + NullIfExpr *nullifexpr = (NullIfExpr *) node; + + appendStringInfoString(buf, "NULLIF("); + get_rule_expr((Node *) nullifexpr->args, context, true); + appendStringInfoChar(buf, ')'); + } + break; + + case T_ScalarArrayOpExpr: + { + ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node; + List *args = expr->args; + Node *arg1 = (Node *) linitial(args); + Node *arg2 = (Node *) lsecond(args); + + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, '('); + get_rule_expr_paren(arg1, context, true, node); + appendStringInfo(buf, " %s %s (", + generate_operator_name(expr->opno, + exprType(arg1), + get_base_element_type(exprType(arg2))), + expr->useOr ? "ANY" : "ALL"); + get_rule_expr_paren(arg2, context, true, node); + + /* + * There's inherent ambiguity in "x op ANY/ALL (y)" when y is + * a bare sub-SELECT. Since we're here, the sub-SELECT must + * be meant as a scalar sub-SELECT yielding an array value to + * be used in ScalarArrayOpExpr; but the grammar will + * preferentially interpret such a construct as an ANY/ALL + * SubLink. To prevent misparsing the output that way, insert + * a dummy coercion (which will be stripped by parse analysis, + * so no inefficiency is added in dump and reload). This is + * indeed most likely what the user wrote to get the construct + * accepted in the first place. + */ + if (IsA(arg2, SubLink) && + ((SubLink *) arg2)->subLinkType == EXPR_SUBLINK) + appendStringInfo(buf, "::%s", + format_type_with_typemod(exprType(arg2), + exprTypmod(arg2))); + appendStringInfoChar(buf, ')'); + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, ')'); + } + break; + + case T_BoolExpr: + { + BoolExpr *expr = (BoolExpr *) node; + Node *first_arg = linitial(expr->args); + ListCell *arg; + + switch (expr->boolop) + { + case AND_EXPR: + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, '('); + get_rule_expr_paren(first_arg, context, + false, node); + for_each_from(arg, expr->args, 1) + { + appendStringInfoString(buf, " AND "); + get_rule_expr_paren((Node *) lfirst(arg), context, + false, node); + } + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, ')'); + break; + + case OR_EXPR: + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, '('); + get_rule_expr_paren(first_arg, context, + false, node); + for_each_from(arg, expr->args, 1) + { + appendStringInfoString(buf, " OR "); + get_rule_expr_paren((Node *) lfirst(arg), context, + false, node); + } + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, ')'); + break; + + case NOT_EXPR: + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, '('); + appendStringInfoString(buf, "NOT "); + get_rule_expr_paren(first_arg, context, + false, node); + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, ')'); + break; + + default: + elog(ERROR, "unrecognized boolop: %d", + (int) expr->boolop); + } + } + break; + + case T_SubLink: + get_sublink_expr((SubLink *) node, context); + break; + + case T_SubPlan: + { + SubPlan *subplan = (SubPlan *) node; + + /* + * We cannot see an already-planned subplan in rule deparsing, + * only while EXPLAINing a query plan. We don't try to + * reconstruct the original SQL, just reference the subplan + * that appears elsewhere in EXPLAIN's result. It does seem + * useful to show the subLinkType and testexpr (if any), and + * we also note whether the subplan will be hashed. + */ + switch (subplan->subLinkType) + { + case EXISTS_SUBLINK: + appendStringInfoString(buf, "EXISTS("); + Assert(subplan->testexpr == NULL); + break; + case ALL_SUBLINK: + appendStringInfoString(buf, "(ALL "); + Assert(subplan->testexpr != NULL); + break; + case ANY_SUBLINK: + appendStringInfoString(buf, "(ANY "); + Assert(subplan->testexpr != NULL); + break; + case ROWCOMPARE_SUBLINK: + /* Parenthesizing the testexpr seems sufficient */ + appendStringInfoChar(buf, '('); + Assert(subplan->testexpr != NULL); + break; + case EXPR_SUBLINK: + /* No need to decorate these subplan references */ + appendStringInfoChar(buf, '('); + Assert(subplan->testexpr == NULL); + break; + case MULTIEXPR_SUBLINK: + /* MULTIEXPR isn't executed in the normal way */ + appendStringInfoString(buf, "(rescan "); + Assert(subplan->testexpr == NULL); + break; + case ARRAY_SUBLINK: + appendStringInfoString(buf, "ARRAY("); + Assert(subplan->testexpr == NULL); + break; + case CTE_SUBLINK: + /* This case is unreachable within expressions */ + appendStringInfoString(buf, "CTE("); + Assert(subplan->testexpr == NULL); + break; + } + + if (subplan->testexpr != NULL) + { + deparse_namespace *dpns; + + /* + * Push SubPlan into ancestors list while deparsing + * testexpr, so that we can handle PARAM_EXEC references + * to the SubPlan's paramIds. (This makes it look like + * the SubPlan is an "ancestor" of the current plan node, + * which is a little weird, but it does no harm.) In this + * path, we don't need to mention the SubPlan explicitly, + * because the referencing Params will show its existence. + */ + dpns = (deparse_namespace *) linitial(context->namespaces); + dpns->ancestors = lcons(subplan, dpns->ancestors); + + get_rule_expr(subplan->testexpr, context, showimplicit); + appendStringInfoChar(buf, ')'); + + dpns->ancestors = list_delete_first(dpns->ancestors); + } + else + { + /* No referencing Params, so show the SubPlan's name */ + if (subplan->useHashTable) + appendStringInfo(buf, "hashed %s)", subplan->plan_name); + else + appendStringInfo(buf, "%s)", subplan->plan_name); + } + } + break; + + case T_AlternativeSubPlan: + { + AlternativeSubPlan *asplan = (AlternativeSubPlan *) node; + ListCell *lc; + + /* + * This case cannot be reached in normal usage, since no + * AlternativeSubPlan can appear either in parsetrees or + * finished plan trees. We keep it just in case somebody + * wants to use this code to print planner data structures. + */ + appendStringInfoString(buf, "(alternatives: "); + foreach(lc, asplan->subplans) + { + SubPlan *splan = lfirst_node(SubPlan, lc); + + if (splan->useHashTable) + appendStringInfo(buf, "hashed %s", splan->plan_name); + else + appendStringInfoString(buf, splan->plan_name); + if (lnext(asplan->subplans, lc)) + appendStringInfoString(buf, " or "); + } + appendStringInfoChar(buf, ')'); + } + break; + + case T_FieldSelect: + { + FieldSelect *fselect = (FieldSelect *) node; + Node *arg = (Node *) fselect->arg; + int fno = fselect->fieldnum; + const char *fieldname; + bool need_parens; + + /* + * Parenthesize the argument unless it's an SubscriptingRef or + * another FieldSelect. Note in particular that it would be + * WRONG to not parenthesize a Var argument; simplicity is not + * the issue here, having the right number of names is. + */ + need_parens = !IsA(arg, SubscriptingRef) && + !IsA(arg, FieldSelect); + if (need_parens) + appendStringInfoChar(buf, '('); + get_rule_expr(arg, context, true); + if (need_parens) + appendStringInfoChar(buf, ')'); + + /* + * Get and print the field name. + */ + fieldname = get_name_for_var_field((Var *) arg, fno, + 0, context); + appendStringInfo(buf, ".%s", quote_identifier(fieldname)); + } + break; + + case T_FieldStore: + { + FieldStore *fstore = (FieldStore *) node; + bool need_parens; + + /* + * There is no good way to represent a FieldStore as real SQL, + * so decompilation of INSERT or UPDATE statements should + * always use processIndirection as part of the + * statement-level syntax. We should only get here when + * EXPLAIN tries to print the targetlist of a plan resulting + * from such a statement. The plan case is even harder than + * ordinary rules would be, because the planner tries to + * collapse multiple assignments to the same field or subfield + * into one FieldStore; so we can see a list of target fields + * not just one, and the arguments could be FieldStores + * themselves. We don't bother to try to print the target + * field names; we just print the source arguments, with a + * ROW() around them if there's more than one. This isn't + * terribly complete, but it's probably good enough for + * EXPLAIN's purposes; especially since anything more would be + * either hopelessly confusing or an even poorer + * representation of what the plan is actually doing. + */ + need_parens = (list_length(fstore->newvals) != 1); + if (need_parens) + appendStringInfoString(buf, "ROW("); + get_rule_expr((Node *) fstore->newvals, context, showimplicit); + if (need_parens) + appendStringInfoChar(buf, ')'); + } + break; + + case T_RelabelType: + { + RelabelType *relabel = (RelabelType *) node; + Node *arg = (Node *) relabel->arg; + + if (relabel->relabelformat == COERCE_IMPLICIT_CAST && + !showimplicit) + { + /* don't show the implicit cast */ + get_rule_expr_paren(arg, context, false, node); + } + else + { + get_coercion_expr(arg, context, + relabel->resulttype, + relabel->resulttypmod, + node); + } + } + break; + + case T_CoerceViaIO: + { + CoerceViaIO *iocoerce = (CoerceViaIO *) node; + Node *arg = (Node *) iocoerce->arg; + + if (iocoerce->coerceformat == COERCE_IMPLICIT_CAST && + !showimplicit) + { + /* don't show the implicit cast */ + get_rule_expr_paren(arg, context, false, node); + } + else + { + get_coercion_expr(arg, context, + iocoerce->resulttype, + -1, + node); + } + } + break; + + case T_ArrayCoerceExpr: + { + ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node; + Node *arg = (Node *) acoerce->arg; + + if (acoerce->coerceformat == COERCE_IMPLICIT_CAST && + !showimplicit) + { + /* don't show the implicit cast */ + get_rule_expr_paren(arg, context, false, node); + } + else + { + get_coercion_expr(arg, context, + acoerce->resulttype, + acoerce->resulttypmod, + node); + } + } + break; + + case T_ConvertRowtypeExpr: + { + ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node; + Node *arg = (Node *) convert->arg; + + if (convert->convertformat == COERCE_IMPLICIT_CAST && + !showimplicit) + { + /* don't show the implicit cast */ + get_rule_expr_paren(arg, context, false, node); + } + else + { + get_coercion_expr(arg, context, + convert->resulttype, -1, + node); + } + } + break; + + case T_CollateExpr: + { + CollateExpr *collate = (CollateExpr *) node; + Node *arg = (Node *) collate->arg; + + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, '('); + get_rule_expr_paren(arg, context, showimplicit, node); + appendStringInfo(buf, " COLLATE %s", + generate_collation_name(collate->collOid)); + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, ')'); + } + break; + + case T_CaseExpr: + { + CaseExpr *caseexpr = (CaseExpr *) node; + ListCell *temp; + + appendContextKeyword(context, "CASE", + 0, PRETTYINDENT_VAR, 0); + if (caseexpr->arg) + { + appendStringInfoChar(buf, ' '); + get_rule_expr((Node *) caseexpr->arg, context, true); + } + foreach(temp, caseexpr->args) + { + CaseWhen *when = (CaseWhen *) lfirst(temp); + Node *w = (Node *) when->expr; + + if (caseexpr->arg) + { + /* + * The parser should have produced WHEN clauses of the + * form "CaseTestExpr = RHS", possibly with an + * implicit coercion inserted above the CaseTestExpr. + * For accurate decompilation of rules it's essential + * that we show just the RHS. However in an + * expression that's been through the optimizer, the + * WHEN clause could be almost anything (since the + * equality operator could have been expanded into an + * inline function). If we don't recognize the form + * of the WHEN clause, just punt and display it as-is. + */ + if (IsA(w, OpExpr)) + { + List *args = ((OpExpr *) w)->args; + + if (list_length(args) == 2 && + IsA(strip_implicit_coercions(linitial(args)), + CaseTestExpr)) + w = (Node *) lsecond(args); + } + } + + if (!PRETTY_INDENT(context)) + appendStringInfoChar(buf, ' '); + appendContextKeyword(context, "WHEN ", + 0, 0, 0); + get_rule_expr(w, context, false); + appendStringInfoString(buf, " THEN "); + get_rule_expr((Node *) when->result, context, true); + } + if (!PRETTY_INDENT(context)) + appendStringInfoChar(buf, ' '); + appendContextKeyword(context, "ELSE ", + 0, 0, 0); + get_rule_expr((Node *) caseexpr->defresult, context, true); + if (!PRETTY_INDENT(context)) + appendStringInfoChar(buf, ' '); + appendContextKeyword(context, "END", + -PRETTYINDENT_VAR, 0, 0); + } + break; + + case T_CaseTestExpr: + { + /* + * Normally we should never get here, since for expressions + * that can contain this node type we attempt to avoid + * recursing to it. But in an optimized expression we might + * be unable to avoid that (see comments for CaseExpr). If we + * do see one, print it as CASE_TEST_EXPR. + */ + appendStringInfoString(buf, "CASE_TEST_EXPR"); + } + break; + + case T_ArrayExpr: + { + ArrayExpr *arrayexpr = (ArrayExpr *) node; + + appendStringInfoString(buf, "ARRAY["); + get_rule_expr((Node *) arrayexpr->elements, context, true); + appendStringInfoChar(buf, ']'); + + /* + * If the array isn't empty, we assume its elements are + * coerced to the desired type. If it's empty, though, we + * need an explicit coercion to the array type. + */ + if (arrayexpr->elements == NIL) + appendStringInfo(buf, "::%s", + format_type_with_typemod(arrayexpr->array_typeid, -1)); + } + break; + + case T_RowExpr: + { + RowExpr *rowexpr = (RowExpr *) node; + TupleDesc tupdesc = NULL; + ListCell *arg; + int i; + char *sep; + + /* + * If it's a named type and not RECORD, we may have to skip + * dropped columns and/or claim there are NULLs for added + * columns. + */ + if (rowexpr->row_typeid != RECORDOID) + { + tupdesc = lookup_rowtype_tupdesc(rowexpr->row_typeid, -1); + Assert(list_length(rowexpr->args) <= tupdesc->natts); + } + + /* + * SQL99 allows "ROW" to be omitted when there is more than + * one column, but for simplicity we always print it. + */ + appendStringInfoString(buf, "ROW("); + sep = ""; + i = 0; + foreach(arg, rowexpr->args) + { + Node *e = (Node *) lfirst(arg); + + if (tupdesc == NULL || + !TupleDescAttr(tupdesc, i)->attisdropped) + { + appendStringInfoString(buf, sep); + /* Whole-row Vars need special treatment here */ + get_rule_expr_toplevel(e, context, true); + sep = ", "; + } + i++; + } + if (tupdesc != NULL) + { + while (i < tupdesc->natts) + { + if (!TupleDescAttr(tupdesc, i)->attisdropped) + { + appendStringInfoString(buf, sep); + appendStringInfoString(buf, "NULL"); + sep = ", "; + } + i++; + } + + ReleaseTupleDesc(tupdesc); + } + appendStringInfoChar(buf, ')'); + if (rowexpr->row_format == COERCE_EXPLICIT_CAST) + appendStringInfo(buf, "::%s", + format_type_with_typemod(rowexpr->row_typeid, -1)); + } + break; + + case T_RowCompareExpr: + { + RowCompareExpr *rcexpr = (RowCompareExpr *) node; + + /* + * SQL99 allows "ROW" to be omitted when there is more than + * one column, but for simplicity we always print it. Within + * a ROW expression, whole-row Vars need special treatment, so + * use get_rule_list_toplevel. + */ + appendStringInfoString(buf, "(ROW("); + get_rule_list_toplevel(rcexpr->largs, context, true); + + /* + * We assume that the name of the first-column operator will + * do for all the rest too. This is definitely open to + * failure, eg if some but not all operators were renamed + * since the construct was parsed, but there seems no way to + * be perfect. + */ + appendStringInfo(buf, ") %s ROW(", + generate_operator_name(linitial_oid(rcexpr->opnos), + exprType(linitial(rcexpr->largs)), + exprType(linitial(rcexpr->rargs)))); + get_rule_list_toplevel(rcexpr->rargs, context, true); + appendStringInfoString(buf, "))"); + } + break; + + case T_CoalesceExpr: + { + CoalesceExpr *coalesceexpr = (CoalesceExpr *) node; + + appendStringInfoString(buf, "COALESCE("); + get_rule_expr((Node *) coalesceexpr->args, context, true); + appendStringInfoChar(buf, ')'); + } + break; + + case T_MinMaxExpr: + { + MinMaxExpr *minmaxexpr = (MinMaxExpr *) node; + + switch (minmaxexpr->op) + { + case IS_GREATEST: + appendStringInfoString(buf, "GREATEST("); + break; + case IS_LEAST: + appendStringInfoString(buf, "LEAST("); + break; + } + get_rule_expr((Node *) minmaxexpr->args, context, true); + appendStringInfoChar(buf, ')'); + } + break; + + case T_SQLValueFunction: + { + SQLValueFunction *svf = (SQLValueFunction *) node; + + /* + * Note: this code knows that typmod for time, timestamp, and + * timestamptz just prints as integer. + */ + switch (svf->op) + { + case SVFOP_CURRENT_DATE: + appendStringInfoString(buf, "CURRENT_DATE"); + break; + case SVFOP_CURRENT_TIME: + appendStringInfoString(buf, "CURRENT_TIME"); + break; + case SVFOP_CURRENT_TIME_N: + appendStringInfo(buf, "CURRENT_TIME(%d)", svf->typmod); + break; + case SVFOP_CURRENT_TIMESTAMP: + appendStringInfoString(buf, "CURRENT_TIMESTAMP"); + break; + case SVFOP_CURRENT_TIMESTAMP_N: + appendStringInfo(buf, "CURRENT_TIMESTAMP(%d)", + svf->typmod); + break; + case SVFOP_LOCALTIME: + appendStringInfoString(buf, "LOCALTIME"); + break; + case SVFOP_LOCALTIME_N: + appendStringInfo(buf, "LOCALTIME(%d)", svf->typmod); + break; + case SVFOP_LOCALTIMESTAMP: + appendStringInfoString(buf, "LOCALTIMESTAMP"); + break; + case SVFOP_LOCALTIMESTAMP_N: + appendStringInfo(buf, "LOCALTIMESTAMP(%d)", + svf->typmod); + break; + case SVFOP_CURRENT_ROLE: + appendStringInfoString(buf, "CURRENT_ROLE"); + break; + case SVFOP_CURRENT_USER: + appendStringInfoString(buf, "CURRENT_USER"); + break; + case SVFOP_USER: + appendStringInfoString(buf, "USER"); + break; + case SVFOP_SESSION_USER: + appendStringInfoString(buf, "SESSION_USER"); + break; + case SVFOP_CURRENT_CATALOG: + appendStringInfoString(buf, "CURRENT_CATALOG"); + break; + case SVFOP_CURRENT_SCHEMA: + appendStringInfoString(buf, "CURRENT_SCHEMA"); + break; + } + } + break; + + case T_XmlExpr: + { + XmlExpr *xexpr = (XmlExpr *) node; + bool needcomma = false; + ListCell *arg; + ListCell *narg; + Const *con; + + switch (xexpr->op) + { + case IS_XMLCONCAT: + appendStringInfoString(buf, "XMLCONCAT("); + break; + case IS_XMLELEMENT: + appendStringInfoString(buf, "XMLELEMENT("); + break; + case IS_XMLFOREST: + appendStringInfoString(buf, "XMLFOREST("); + break; + case IS_XMLPARSE: + appendStringInfoString(buf, "XMLPARSE("); + break; + case IS_XMLPI: + appendStringInfoString(buf, "XMLPI("); + break; + case IS_XMLROOT: + appendStringInfoString(buf, "XMLROOT("); + break; + case IS_XMLSERIALIZE: + appendStringInfoString(buf, "XMLSERIALIZE("); + break; + case IS_DOCUMENT: + break; + } + if (xexpr->op == IS_XMLPARSE || xexpr->op == IS_XMLSERIALIZE) + { + if (xexpr->xmloption == XMLOPTION_DOCUMENT) + appendStringInfoString(buf, "DOCUMENT "); + else + appendStringInfoString(buf, "CONTENT "); + } + if (xexpr->name) + { + appendStringInfo(buf, "NAME %s", + quote_identifier(map_xml_name_to_sql_identifier(xexpr->name))); + needcomma = true; + } + if (xexpr->named_args) + { + if (xexpr->op != IS_XMLFOREST) + { + if (needcomma) + appendStringInfoString(buf, ", "); + appendStringInfoString(buf, "XMLATTRIBUTES("); + needcomma = false; + } + forboth(arg, xexpr->named_args, narg, xexpr->arg_names) + { + Node *e = (Node *) lfirst(arg); + char *argname = strVal(lfirst(narg)); + + if (needcomma) + appendStringInfoString(buf, ", "); + get_rule_expr((Node *) e, context, true); + appendStringInfo(buf, " AS %s", + quote_identifier(map_xml_name_to_sql_identifier(argname))); + needcomma = true; + } + if (xexpr->op != IS_XMLFOREST) + appendStringInfoChar(buf, ')'); + } + if (xexpr->args) + { + if (needcomma) + appendStringInfoString(buf, ", "); + switch (xexpr->op) + { + case IS_XMLCONCAT: + case IS_XMLELEMENT: + case IS_XMLFOREST: + case IS_XMLPI: + case IS_XMLSERIALIZE: + /* no extra decoration needed */ + get_rule_expr((Node *) xexpr->args, context, true); + break; + case IS_XMLPARSE: + Assert(list_length(xexpr->args) == 2); + + get_rule_expr((Node *) linitial(xexpr->args), + context, true); + + con = lsecond_node(Const, xexpr->args); + Assert(!con->constisnull); + if (DatumGetBool(con->constvalue)) + appendStringInfoString(buf, + " PRESERVE WHITESPACE"); + else + appendStringInfoString(buf, + " STRIP WHITESPACE"); + break; + case IS_XMLROOT: + Assert(list_length(xexpr->args) == 3); + + get_rule_expr((Node *) linitial(xexpr->args), + context, true); + + appendStringInfoString(buf, ", VERSION "); + con = (Const *) lsecond(xexpr->args); + if (IsA(con, Const) && + con->constisnull) + appendStringInfoString(buf, "NO VALUE"); + else + get_rule_expr((Node *) con, context, false); + + con = lthird_node(Const, xexpr->args); + if (con->constisnull) + /* suppress STANDALONE NO VALUE */ ; + else + { + switch (DatumGetInt32(con->constvalue)) + { + case XML_STANDALONE_YES: + appendStringInfoString(buf, + ", STANDALONE YES"); + break; + case XML_STANDALONE_NO: + appendStringInfoString(buf, + ", STANDALONE NO"); + break; + case XML_STANDALONE_NO_VALUE: + appendStringInfoString(buf, + ", STANDALONE NO VALUE"); + break; + default: + break; + } + } + break; + case IS_DOCUMENT: + get_rule_expr_paren((Node *) xexpr->args, context, false, node); + break; + } + } + if (xexpr->op == IS_XMLSERIALIZE) + appendStringInfo(buf, " AS %s", + format_type_with_typemod(xexpr->type, + xexpr->typmod)); + if (xexpr->op == IS_DOCUMENT) + appendStringInfoString(buf, " IS DOCUMENT"); + else + appendStringInfoChar(buf, ')'); + } + break; + + case T_NullTest: + { + NullTest *ntest = (NullTest *) node; + + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, '('); + get_rule_expr_paren((Node *) ntest->arg, context, true, node); + + /* + * For scalar inputs, we prefer to print as IS [NOT] NULL, + * which is shorter and traditional. If it's a rowtype input + * but we're applying a scalar test, must print IS [NOT] + * DISTINCT FROM NULL to be semantically correct. + */ + if (ntest->argisrow || + !type_is_rowtype(exprType((Node *) ntest->arg))) + { + switch (ntest->nulltesttype) + { + case IS_NULL: + appendStringInfoString(buf, " IS NULL"); + break; + case IS_NOT_NULL: + appendStringInfoString(buf, " IS NOT NULL"); + break; + default: + elog(ERROR, "unrecognized nulltesttype: %d", + (int) ntest->nulltesttype); + } + } + else + { + switch (ntest->nulltesttype) + { + case IS_NULL: + appendStringInfoString(buf, " IS NOT DISTINCT FROM NULL"); + break; + case IS_NOT_NULL: + appendStringInfoString(buf, " IS DISTINCT FROM NULL"); + break; + default: + elog(ERROR, "unrecognized nulltesttype: %d", + (int) ntest->nulltesttype); + } + } + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, ')'); + } + break; + + case T_BooleanTest: + { + BooleanTest *btest = (BooleanTest *) node; + + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, '('); + get_rule_expr_paren((Node *) btest->arg, context, false, node); + switch (btest->booltesttype) + { + case IS_TRUE: + appendStringInfoString(buf, " IS TRUE"); + break; + case IS_NOT_TRUE: + appendStringInfoString(buf, " IS NOT TRUE"); + break; + case IS_FALSE: + appendStringInfoString(buf, " IS FALSE"); + break; + case IS_NOT_FALSE: + appendStringInfoString(buf, " IS NOT FALSE"); + break; + case IS_UNKNOWN: + appendStringInfoString(buf, " IS UNKNOWN"); + break; + case IS_NOT_UNKNOWN: + appendStringInfoString(buf, " IS NOT UNKNOWN"); + break; + default: + elog(ERROR, "unrecognized booltesttype: %d", + (int) btest->booltesttype); + } + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, ')'); + } + break; + + case T_CoerceToDomain: + { + CoerceToDomain *ctest = (CoerceToDomain *) node; + Node *arg = (Node *) ctest->arg; + + if (ctest->coercionformat == COERCE_IMPLICIT_CAST && + !showimplicit) + { + /* don't show the implicit cast */ + get_rule_expr(arg, context, false); + } + else + { + get_coercion_expr(arg, context, + ctest->resulttype, + ctest->resulttypmod, + node); + } + } + break; + + case T_CoerceToDomainValue: + appendStringInfoString(buf, "VALUE"); + break; + + case T_SetToDefault: + appendStringInfoString(buf, "DEFAULT"); + break; + + case T_CurrentOfExpr: + { + CurrentOfExpr *cexpr = (CurrentOfExpr *) node; + + if (cexpr->cursor_name) + appendStringInfo(buf, "CURRENT OF %s", + quote_identifier(cexpr->cursor_name)); + else + appendStringInfo(buf, "CURRENT OF $%d", + cexpr->cursor_param); + } + break; + + case T_NextValueExpr: + { + NextValueExpr *nvexpr = (NextValueExpr *) node; + + /* + * This isn't exactly nextval(), but that seems close enough + * for EXPLAIN's purposes. + */ + appendStringInfoString(buf, "nextval("); + simple_quote_literal(buf, + generate_relation_name(nvexpr->seqid, + NIL)); + appendStringInfoChar(buf, ')'); + } + break; + + case T_InferenceElem: + { + InferenceElem *iexpr = (InferenceElem *) node; + bool save_varprefix; + bool need_parens; + + /* + * InferenceElem can only refer to target relation, so a + * prefix is not useful, and indeed would cause parse errors. + */ + save_varprefix = context->varprefix; + context->varprefix = false; + + /* + * Parenthesize the element unless it's a simple Var or a bare + * function call. Follows pg_get_indexdef_worker(). + */ + need_parens = !IsA(iexpr->expr, Var); + if (IsA(iexpr->expr, FuncExpr) && + ((FuncExpr *) iexpr->expr)->funcformat == + COERCE_EXPLICIT_CALL) + need_parens = false; + + if (need_parens) + appendStringInfoChar(buf, '('); + get_rule_expr((Node *) iexpr->expr, + context, false); + if (need_parens) + appendStringInfoChar(buf, ')'); + + context->varprefix = save_varprefix; + + if (iexpr->infercollid) + appendStringInfo(buf, " COLLATE %s", + generate_collation_name(iexpr->infercollid)); + + /* Add the operator class name, if not default */ + if (iexpr->inferopclass) + { + Oid inferopclass = iexpr->inferopclass; + Oid inferopcinputtype = get_opclass_input_type(iexpr->inferopclass); + + get_opclass_name(inferopclass, inferopcinputtype, buf); + } + } + break; + + case T_PartitionBoundSpec: + { + PartitionBoundSpec *spec = (PartitionBoundSpec *) node; + ListCell *cell; + char *sep; + + if (spec->is_default) + { + appendStringInfoString(buf, "DEFAULT"); + break; + } + + switch (spec->strategy) + { + case PARTITION_STRATEGY_HASH: + Assert(spec->modulus > 0 && spec->remainder >= 0); + Assert(spec->modulus > spec->remainder); + + appendStringInfoString(buf, "FOR VALUES"); + appendStringInfo(buf, " WITH (modulus %d, remainder %d)", + spec->modulus, spec->remainder); + break; + + case PARTITION_STRATEGY_LIST: + Assert(spec->listdatums != NIL); + + appendStringInfoString(buf, "FOR VALUES IN ("); + sep = ""; + foreach(cell, spec->listdatums) + { + Const *val = lfirst_node(Const, cell); + + appendStringInfoString(buf, sep); + get_const_expr(val, context, -1); + sep = ", "; + } + + appendStringInfoChar(buf, ')'); + break; + + case PARTITION_STRATEGY_RANGE: + Assert(spec->lowerdatums != NIL && + spec->upperdatums != NIL && + list_length(spec->lowerdatums) == + list_length(spec->upperdatums)); + + appendStringInfo(buf, "FOR VALUES FROM %s TO %s", + get_range_partbound_string(spec->lowerdatums), + get_range_partbound_string(spec->upperdatums)); + break; + + default: + elog(ERROR, "unrecognized partition strategy: %d", + (int) spec->strategy); + break; + } + } + break; + + case T_JsonValueExpr: + { + JsonValueExpr *jve = (JsonValueExpr *) node; + + get_rule_expr((Node *) jve->raw_expr, context, false); + get_json_format(jve->format, context->buf); + } + break; + + case T_JsonConstructorExpr: + get_json_constructor((JsonConstructorExpr *) node, context, false); + break; + + case T_JsonIsPredicate: + { + JsonIsPredicate *pred = (JsonIsPredicate *) node; + + if (!PRETTY_PAREN(context)) + appendStringInfoChar(context->buf, '('); + + get_rule_expr_paren(pred->expr, context, true, node); + + appendStringInfoString(context->buf, " IS JSON"); + + /* TODO: handle FORMAT clause */ + + switch (pred->item_type) + { + case JS_TYPE_SCALAR: + appendStringInfoString(context->buf, " SCALAR"); + break; + case JS_TYPE_ARRAY: + appendStringInfoString(context->buf, " ARRAY"); + break; + case JS_TYPE_OBJECT: + appendStringInfoString(context->buf, " OBJECT"); + break; + default: + break; + } + + if (pred->unique_keys) + appendStringInfoString(context->buf, " WITH UNIQUE KEYS"); + + if (!PRETTY_PAREN(context)) + appendStringInfoChar(context->buf, ')'); + } + break; + + case T_JsonExpr: + { + JsonExpr *jexpr = (JsonExpr *) node; + + switch (jexpr->op) + { + case JSON_EXISTS_OP: + appendStringInfoString(buf, "JSON_EXISTS("); + break; + case JSON_QUERY_OP: + appendStringInfoString(buf, "JSON_QUERY("); + break; + case JSON_VALUE_OP: + appendStringInfoString(buf, "JSON_VALUE("); + break; + default: + elog(ERROR, "unrecognized JsonExpr op: %d", + (int) jexpr->op); + } + + get_rule_expr(jexpr->formatted_expr, context, showimplicit); + + appendStringInfoString(buf, ", "); + + get_json_path_spec(jexpr->path_spec, context, showimplicit); + + if (jexpr->passing_values) + { + ListCell *lc1, + *lc2; + bool needcomma = false; + + appendStringInfoString(buf, " PASSING "); + + forboth(lc1, jexpr->passing_names, + lc2, jexpr->passing_values) + { + if (needcomma) + appendStringInfoString(buf, ", "); + needcomma = true; + + get_rule_expr((Node *) lfirst(lc2), context, showimplicit); + appendStringInfo(buf, " AS %s", + ((String *) lfirst_node(String, lc1))->sval); + } + } + + if (jexpr->op != JSON_EXISTS_OP || + jexpr->returning->typid != BOOLOID) + get_json_returning(jexpr->returning, context->buf, + jexpr->op == JSON_QUERY_OP); + + get_json_expr_options(jexpr, context, + jexpr->op != JSON_EXISTS_OP ? + JSON_BEHAVIOR_NULL : + JSON_BEHAVIOR_FALSE); + + appendStringInfoChar(buf, ')'); + } + break; + + case T_List: + { + char *sep; + ListCell *l; + + sep = ""; + foreach(l, (List *) node) + { + appendStringInfoString(buf, sep); + get_rule_expr((Node *) lfirst(l), context, showimplicit); + sep = ", "; + } + } + break; + + case T_TableFunc: + get_tablefunc((TableFunc *) node, context, showimplicit); + break; + + case T_CallStmt: + get_proc_expr((CallStmt *) node, context, showimplicit); + break; + + default: + elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node)); + break; + } +} + +/* + * get_rule_expr_toplevel - Parse back a toplevel expression + * + * Same as get_rule_expr(), except that if the expr is just a Var, we pass + * istoplevel = true not false to get_variable(). This causes whole-row Vars + * to get printed with decoration that will prevent expansion of "*". + * We need to use this in contexts such as ROW() and VALUES(), where the + * parser would expand "foo.*" appearing at top level. (In principle we'd + * use this in get_target_list() too, but that has additional worries about + * whether to print AS, so it needs to invoke get_variable() directly anyway.) + */ +static void +get_rule_expr_toplevel(Node *node, deparse_context *context, + bool showimplicit) +{ + if (node && IsA(node, Var)) + (void) get_variable((Var *) node, 0, true, context); + else + get_rule_expr(node, context, showimplicit); +} + +/* + * get_rule_list_toplevel - Parse back a list of toplevel expressions + * + * Apply get_rule_expr_toplevel() to each element of a List. + * + * This adds commas between the expressions, but caller is responsible + * for printing surrounding decoration. + */ +static void +get_rule_list_toplevel(List *lst, deparse_context *context, + bool showimplicit) +{ + const char *sep; + ListCell *lc; + + sep = ""; + foreach(lc, lst) + { + Node *e = (Node *) lfirst(lc); + + appendStringInfoString(context->buf, sep); + get_rule_expr_toplevel(e, context, showimplicit); + sep = ", "; + } +} + +/* + * get_rule_expr_funccall - Parse back a function-call expression + * + * Same as get_rule_expr(), except that we guarantee that the output will + * look like a function call, or like one of the things the grammar treats as + * equivalent to a function call (see the func_expr_windowless production). + * This is needed in places where the grammar uses func_expr_windowless and + * you can't substitute a parenthesized a_expr. If what we have isn't going + * to look like a function call, wrap it in a dummy CAST() expression, which + * will satisfy the grammar --- and, indeed, is likely what the user wrote to + * produce such a thing. + */ +static void +get_rule_expr_funccall(Node *node, deparse_context *context, + bool showimplicit) +{ + if (looks_like_function(node)) + get_rule_expr(node, context, showimplicit); + else + { + StringInfo buf = context->buf; + + appendStringInfoString(buf, "CAST("); + /* no point in showing any top-level implicit cast */ + get_rule_expr(node, context, false); + appendStringInfo(buf, " AS %s)", + format_type_with_typemod(exprType(node), + exprTypmod(node))); + } +} + +/* + * Helper function to identify node types that satisfy func_expr_windowless. + * If in doubt, "false" is always a safe answer. + */ +static bool +looks_like_function(Node *node) +{ + if (node == NULL) + return false; /* probably shouldn't happen */ + switch (nodeTag(node)) + { + case T_FuncExpr: + /* OK, unless it's going to deparse as a cast */ + return (((FuncExpr *) node)->funcformat == COERCE_EXPLICIT_CALL || + ((FuncExpr *) node)->funcformat == COERCE_SQL_SYNTAX); + case T_NullIfExpr: + case T_CoalesceExpr: + case T_MinMaxExpr: + case T_SQLValueFunction: + case T_XmlExpr: + case T_JsonExpr: + /* these are all accepted by func_expr_common_subexpr */ + return true; + default: + break; + } + return false; +} + +/* + * get_oper_expr - Parse back an OpExpr node + */ +static void +get_oper_expr(OpExpr *expr, deparse_context *context) +{ + StringInfo buf = context->buf; + Oid opno = expr->opno; + List *args = expr->args; + + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, '('); + if (list_length(args) == 2) + { + /* binary operator */ + Node *arg1 = (Node *) linitial(args); + Node *arg2 = (Node *) lsecond(args); + + get_rule_expr_paren(arg1, context, true, (Node *) expr); + appendStringInfo(buf, " %s ", + generate_operator_name(opno, + exprType(arg1), + exprType(arg2))); + get_rule_expr_paren(arg2, context, true, (Node *) expr); + } + else + { + /* prefix operator */ + Node *arg = (Node *) linitial(args); + + appendStringInfo(buf, "%s ", + generate_operator_name(opno, + InvalidOid, + exprType(arg))); + get_rule_expr_paren(arg, context, true, (Node *) expr); + } + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, ')'); +} + +/* + * get_func_expr - Parse back a FuncExpr node + */ +static void +get_func_expr(FuncExpr *expr, deparse_context *context, + bool showimplicit) +{ + StringInfo buf = context->buf; + Oid funcoid = expr->funcid; + Oid argtypes[FUNC_MAX_ARGS]; + int nargs; + List *argnames; + bool use_variadic; + ListCell *l; + + /* + * If the function call came from an implicit coercion, then just show the + * first argument --- unless caller wants to see implicit coercions. + */ + if (expr->funcformat == COERCE_IMPLICIT_CAST && !showimplicit) + { + get_rule_expr_paren((Node *) linitial(expr->args), context, + false, (Node *) expr); + return; + } + + /* + * If the function call came from a cast, then show the first argument + * plus an explicit cast operation. + */ + if (expr->funcformat == COERCE_EXPLICIT_CAST || + expr->funcformat == COERCE_IMPLICIT_CAST) + { + Node *arg = linitial(expr->args); + Oid rettype = expr->funcresulttype; + int32 coercedTypmod; + + /* Get the typmod if this is a length-coercion function */ + (void) exprIsLengthCoercion((Node *) expr, &coercedTypmod); + + get_coercion_expr(arg, context, + rettype, coercedTypmod, + (Node *) expr); + + return; + } + + /* + * If the function was called using one of the SQL spec's random special + * syntaxes, try to reproduce that. If we don't recognize the function, + * fall through. + */ + if (expr->funcformat == COERCE_SQL_SYNTAX) + { + if (get_func_sql_syntax(expr, context)) + return; + } + + + /* + * Normal function: display as proname(args). First we need to extract + * the argument datatypes. + */ + if (list_length(expr->args) > FUNC_MAX_ARGS) + ereport(ERROR, + (errcode(ERRCODE_TOO_MANY_ARGUMENTS), + errmsg("too many arguments"))); + nargs = 0; + argnames = NIL; + foreach(l, expr->args) + { + Node *arg = (Node *) lfirst(l); + + if (IsA(arg, NamedArgExpr)) + argnames = lappend(argnames, ((NamedArgExpr *) arg)->name); + argtypes[nargs] = exprType(arg); + nargs++; + } + + appendStringInfo(buf, "%s(", + generate_function_name(funcoid, nargs, + argnames, argtypes, + expr->funcvariadic, + &use_variadic, + context->special_exprkind)); + nargs = 0; + foreach(l, expr->args) + { + if (nargs++ > 0) + appendStringInfoString(buf, ", "); + if (use_variadic && lnext(expr->args, l) == NULL) + appendStringInfoString(buf, "VARIADIC "); + get_rule_expr((Node *) lfirst(l), context, true); + } + + appendStringInfoChar(buf, ')'); +} + +/* + * get_proc_expr - Parse back a CallStmt node + */ +static void +get_proc_expr(CallStmt *stmt, deparse_context *context, + bool showimplicit) +{ + StringInfo buf = context->buf; + Oid functionOid = stmt->funcexpr->funcid; + bool use_variadic; + Oid *argumentTypes; + List *finalArgumentList = NIL; + ListCell *argumentCell; + List *namedArgList = NIL; + int numberOfArgs = -1; + + if (!get_merged_argument_list(stmt, &namedArgList, &argumentTypes, + &finalArgumentList, &numberOfArgs)) + { + /* Nothing merged i.e. no OUT arguments */ + get_func_expr((FuncExpr *) stmt->funcexpr, context, showimplicit); + return; + } + + appendStringInfo(buf, "%s(", + generate_function_name(functionOid, numberOfArgs, + namedArgList, argumentTypes, + stmt->funcexpr->funcvariadic, + &use_variadic, + context->special_exprkind)); + int argNumber = 0; + foreach(argumentCell, finalArgumentList) + { + if (argNumber++ > 0) + appendStringInfoString(buf, ", "); + if (use_variadic && lnext(finalArgumentList, argumentCell) == NULL) + appendStringInfoString(buf, "VARIADIC "); + get_rule_expr((Node *) lfirst(argumentCell), context, true); + argNumber++; + } + + appendStringInfoChar(buf, ')'); +} + +/* + * get_agg_expr - Parse back an Aggref node + */ +static void +get_agg_expr(Aggref *aggref, deparse_context *context, + Aggref *original_aggref) +{ + get_agg_expr_helper(aggref, context, original_aggref, NULL, NULL, + false); +} + +/* + * get_agg_expr_helper - subroutine for get_agg_expr and + * get_json_agg_constructor + */ +static void +get_agg_expr_helper(Aggref *aggref, deparse_context *context, + Aggref *original_aggref, const char *funcname, + const char *options, bool is_json_objectagg) +{ + StringInfo buf = context->buf; + Oid argtypes[FUNC_MAX_ARGS]; + int nargs; + bool use_variadic = false; + + /* + * For a combining aggregate, we look up and deparse the corresponding + * partial aggregate instead. This is necessary because our input + * argument list has been replaced; the new argument list always has just + * one element, which will point to a partial Aggref that supplies us with + * transition states to combine. + */ + if (DO_AGGSPLIT_COMBINE(aggref->aggsplit)) + { + TargetEntry *tle; + + + Assert(list_length(aggref->args) == 1); + tle = linitial_node(TargetEntry, aggref->args); + resolve_special_varno((Node *) tle->expr, context, + get_agg_combine_expr, original_aggref); + return; + } + + /* + * Mark as PARTIAL, if appropriate. We look to the original aggref so as + * to avoid printing this when recursing from the code just above. + */ + if (DO_AGGSPLIT_SKIPFINAL(original_aggref->aggsplit)) + appendStringInfoString(buf, "PARTIAL "); + + /* Extract the argument types as seen by the parser */ + nargs = get_aggregate_argtypes(aggref, argtypes); + + if (!funcname) + funcname = generate_function_name(aggref->aggfnoid, nargs, NIL, + argtypes, aggref->aggvariadic, + &use_variadic, + context->special_exprkind); + + /* Print the aggregate name, schema-qualified if needed */ + appendStringInfo(buf, "%s(%s", funcname, + (aggref->aggdistinct != NIL) ? "DISTINCT " : ""); + + if (AGGKIND_IS_ORDERED_SET(aggref->aggkind)) + { + /* + * Ordered-set aggregates do not use "*" syntax. Also, we needn't + * worry about inserting VARIADIC. So we can just dump the direct + * args as-is. + */ + Assert(!aggref->aggvariadic); + get_rule_expr((Node *) aggref->aggdirectargs, context, true); + Assert(aggref->aggorder != NIL); + appendStringInfoString(buf, ") WITHIN GROUP (ORDER BY "); + get_rule_orderby(aggref->aggorder, aggref->args, false, context); + } + else + { + /* aggstar can be set only in zero-argument aggregates */ + if (aggref->aggstar) + appendStringInfoChar(buf, '*'); + else + { + ListCell *l; + int i; + + i = 0; + foreach(l, aggref->args) + { + TargetEntry *tle = (TargetEntry *) lfirst(l); + Node *arg = (Node *) tle->expr; + + Assert(!IsA(arg, NamedArgExpr)); + if (tle->resjunk) + continue; + if (i++ > 0) + { + if (is_json_objectagg) + { + /* + * the ABSENT ON NULL and WITH UNIQUE args are printed + * separately, so ignore them here + */ + if (i > 2) + break; + + appendStringInfoString(buf, " : "); + } + else + appendStringInfoString(buf, ", "); + } + if (use_variadic && i == nargs) + appendStringInfoString(buf, "VARIADIC "); + get_rule_expr(arg, context, true); + } + } + + if (aggref->aggorder != NIL) + { + appendStringInfoString(buf, " ORDER BY "); + get_rule_orderby(aggref->aggorder, aggref->args, false, context); + } + } + + if (options) + appendStringInfoString(buf, options); + + if (aggref->aggfilter != NULL) + { + appendStringInfoString(buf, ") FILTER (WHERE "); + get_rule_expr((Node *) aggref->aggfilter, context, false); + } + + appendStringInfoChar(buf, ')'); +} + +/* + * This is a helper function for get_agg_expr(). It's used when we deparse + * a combining Aggref; resolve_special_varno locates the corresponding partial + * Aggref and then calls this. + */ +static void +get_agg_combine_expr(Node *node, deparse_context *context, void *callback_arg) +{ + Aggref *aggref; + Aggref *original_aggref = callback_arg; + + if (!IsA(node, Aggref)) + elog(ERROR, "combining Aggref does not point to an Aggref"); + + aggref = (Aggref *) node; + get_agg_expr(aggref, context, original_aggref); +} + +/* + * get_windowfunc_expr - Parse back a WindowFunc node + */ +static void +get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context) +{ + get_windowfunc_expr_helper(wfunc, context, NULL, NULL, false); +} + + +/* + * get_windowfunc_expr_helper - subroutine for get_windowfunc_expr and + * get_json_agg_constructor + */ +static void +get_windowfunc_expr_helper(WindowFunc *wfunc, deparse_context *context, + const char *funcname, const char *options, + bool is_json_objectagg) +{ + StringInfo buf = context->buf; + Oid argtypes[FUNC_MAX_ARGS]; + int nargs; + List *argnames; + ListCell *l; + + if (list_length(wfunc->args) > FUNC_MAX_ARGS) + ereport(ERROR, + (errcode(ERRCODE_TOO_MANY_ARGUMENTS), + errmsg("too many arguments"))); + nargs = 0; + argnames = NIL; + foreach(l, wfunc->args) + { + Node *arg = (Node *) lfirst(l); + + if (IsA(arg, NamedArgExpr)) + argnames = lappend(argnames, ((NamedArgExpr *) arg)->name); + argtypes[nargs] = exprType(arg); + nargs++; + } + + if (!funcname) + funcname = generate_function_name(wfunc->winfnoid, nargs, argnames, + argtypes, false, NULL, + context->special_exprkind); + + appendStringInfo(buf, "%s(", funcname); + + /* winstar can be set only in zero-argument aggregates */ + if (wfunc->winstar) + appendStringInfoChar(buf, '*'); + else + { + if (is_json_objectagg) + { + get_rule_expr((Node *) linitial(wfunc->args), context, false); + appendStringInfoString(buf, " : "); + get_rule_expr((Node *) lsecond(wfunc->args), context, false); + } + else + get_rule_expr((Node *) wfunc->args, context, true); + } + + if (options) + appendStringInfoString(buf, options); + + if (wfunc->aggfilter != NULL) + { + appendStringInfoString(buf, ") FILTER (WHERE "); + get_rule_expr((Node *) wfunc->aggfilter, context, false); + } + + appendStringInfoString(buf, ") OVER "); + + foreach(l, context->windowClause) + { + WindowClause *wc = (WindowClause *) lfirst(l); + + if (wc->winref == wfunc->winref) + { + if (wc->name) + appendStringInfoString(buf, quote_identifier(wc->name)); + else + get_rule_windowspec(wc, context->windowTList, context); + break; + } + } + if (l == NULL) + { + if (context->windowClause) + elog(ERROR, "could not find window clause for winref %u", + wfunc->winref); + + /* + * In EXPLAIN, we don't have window context information available, so + * we have to settle for this: + */ + appendStringInfoString(buf, "(?)"); + } +} + +/* + * get_func_sql_syntax - Parse back a SQL-syntax function call + * + * Returns true if we successfully deparsed, false if we did not + * recognize the function. + */ +static bool +get_func_sql_syntax(FuncExpr *expr, deparse_context *context) +{ + StringInfo buf = context->buf; + Oid funcoid = expr->funcid; + + switch (funcoid) + { + case F_TIMEZONE_INTERVAL_TIMESTAMP: + case F_TIMEZONE_INTERVAL_TIMESTAMPTZ: + case F_TIMEZONE_INTERVAL_TIMETZ: + case F_TIMEZONE_TEXT_TIMESTAMP: + case F_TIMEZONE_TEXT_TIMESTAMPTZ: + case F_TIMEZONE_TEXT_TIMETZ: + /* AT TIME ZONE ... note reversed argument order */ + appendStringInfoChar(buf, '('); + get_rule_expr_paren((Node *) lsecond(expr->args), context, false, + (Node *) expr); + appendStringInfoString(buf, " AT TIME ZONE "); + get_rule_expr_paren((Node *) linitial(expr->args), context, false, + (Node *) expr); + appendStringInfoChar(buf, ')'); + return true; + + case F_TIMEZONE_TIMESTAMP: + case F_TIMEZONE_TIMESTAMPTZ: + case F_TIMEZONE_TIMETZ: + /* AT LOCAL */ + appendStringInfoChar(buf, '('); + get_rule_expr_paren((Node *) linitial(expr->args), context, false, + (Node *) expr); + appendStringInfoString(buf, " AT LOCAL)"); + return true; + + case F_OVERLAPS_TIMESTAMPTZ_INTERVAL_TIMESTAMPTZ_INTERVAL: + case F_OVERLAPS_TIMESTAMPTZ_INTERVAL_TIMESTAMPTZ_TIMESTAMPTZ: + case F_OVERLAPS_TIMESTAMPTZ_TIMESTAMPTZ_TIMESTAMPTZ_INTERVAL: + case F_OVERLAPS_TIMESTAMPTZ_TIMESTAMPTZ_TIMESTAMPTZ_TIMESTAMPTZ: + case F_OVERLAPS_TIMESTAMP_INTERVAL_TIMESTAMP_INTERVAL: + case F_OVERLAPS_TIMESTAMP_INTERVAL_TIMESTAMP_TIMESTAMP: + case F_OVERLAPS_TIMESTAMP_TIMESTAMP_TIMESTAMP_INTERVAL: + case F_OVERLAPS_TIMESTAMP_TIMESTAMP_TIMESTAMP_TIMESTAMP: + case F_OVERLAPS_TIMETZ_TIMETZ_TIMETZ_TIMETZ: + case F_OVERLAPS_TIME_INTERVAL_TIME_INTERVAL: + case F_OVERLAPS_TIME_INTERVAL_TIME_TIME: + case F_OVERLAPS_TIME_TIME_TIME_INTERVAL: + case F_OVERLAPS_TIME_TIME_TIME_TIME: + /* (x1, x2) OVERLAPS (y1, y2) */ + appendStringInfoString(buf, "(("); + get_rule_expr((Node *) linitial(expr->args), context, false); + appendStringInfoString(buf, ", "); + get_rule_expr((Node *) lsecond(expr->args), context, false); + appendStringInfoString(buf, ") OVERLAPS ("); + get_rule_expr((Node *) lthird(expr->args), context, false); + appendStringInfoString(buf, ", "); + get_rule_expr((Node *) lfourth(expr->args), context, false); + appendStringInfoString(buf, "))"); + return true; + + case F_EXTRACT_TEXT_DATE: + case F_EXTRACT_TEXT_TIME: + case F_EXTRACT_TEXT_TIMETZ: + case F_EXTRACT_TEXT_TIMESTAMP: + case F_EXTRACT_TEXT_TIMESTAMPTZ: + case F_EXTRACT_TEXT_INTERVAL: + /* EXTRACT (x FROM y) */ + appendStringInfoString(buf, "EXTRACT("); + { + Const *con = (Const *) linitial(expr->args); + + Assert(IsA(con, Const) && + con->consttype == TEXTOID && + !con->constisnull); + appendStringInfoString(buf, TextDatumGetCString(con->constvalue)); + } + appendStringInfoString(buf, " FROM "); + get_rule_expr((Node *) lsecond(expr->args), context, false); + appendStringInfoChar(buf, ')'); + return true; + + case F_IS_NORMALIZED: + /* IS xxx NORMALIZED */ + appendStringInfoChar(buf, '('); + get_rule_expr_paren((Node *) linitial(expr->args), context, false, + (Node *) expr); + appendStringInfoString(buf, " IS"); + if (list_length(expr->args) == 2) + { + Const *con = (Const *) lsecond(expr->args); + + Assert(IsA(con, Const) && + con->consttype == TEXTOID && + !con->constisnull); + appendStringInfo(buf, " %s", + TextDatumGetCString(con->constvalue)); + } + appendStringInfoString(buf, " NORMALIZED)"); + return true; + + case F_PG_COLLATION_FOR: + /* COLLATION FOR */ + appendStringInfoString(buf, "COLLATION FOR ("); + get_rule_expr((Node *) linitial(expr->args), context, false); + appendStringInfoChar(buf, ')'); + return true; + + case F_NORMALIZE: + /* NORMALIZE() */ + appendStringInfoString(buf, "NORMALIZE("); + get_rule_expr((Node *) linitial(expr->args), context, false); + if (list_length(expr->args) == 2) + { + Const *con = (Const *) lsecond(expr->args); + + Assert(IsA(con, Const) && + con->consttype == TEXTOID && + !con->constisnull); + appendStringInfo(buf, ", %s", + TextDatumGetCString(con->constvalue)); + } + appendStringInfoChar(buf, ')'); + return true; + + case F_OVERLAY_BIT_BIT_INT4: + case F_OVERLAY_BIT_BIT_INT4_INT4: + case F_OVERLAY_BYTEA_BYTEA_INT4: + case F_OVERLAY_BYTEA_BYTEA_INT4_INT4: + case F_OVERLAY_TEXT_TEXT_INT4: + case F_OVERLAY_TEXT_TEXT_INT4_INT4: + /* OVERLAY() */ + appendStringInfoString(buf, "OVERLAY("); + get_rule_expr((Node *) linitial(expr->args), context, false); + appendStringInfoString(buf, " PLACING "); + get_rule_expr((Node *) lsecond(expr->args), context, false); + appendStringInfoString(buf, " FROM "); + get_rule_expr((Node *) lthird(expr->args), context, false); + if (list_length(expr->args) == 4) + { + appendStringInfoString(buf, " FOR "); + get_rule_expr((Node *) lfourth(expr->args), context, false); + } + appendStringInfoChar(buf, ')'); + return true; + + case F_POSITION_BIT_BIT: + case F_POSITION_BYTEA_BYTEA: + case F_POSITION_TEXT_TEXT: + /* POSITION() ... extra parens since args are b_expr not a_expr */ + appendStringInfoString(buf, "POSITION(("); + get_rule_expr((Node *) lsecond(expr->args), context, false); + appendStringInfoString(buf, ") IN ("); + get_rule_expr((Node *) linitial(expr->args), context, false); + appendStringInfoString(buf, "))"); + return true; + + case F_SUBSTRING_BIT_INT4: + case F_SUBSTRING_BIT_INT4_INT4: + case F_SUBSTRING_BYTEA_INT4: + case F_SUBSTRING_BYTEA_INT4_INT4: + case F_SUBSTRING_TEXT_INT4: + case F_SUBSTRING_TEXT_INT4_INT4: + /* SUBSTRING FROM/FOR (i.e., integer-position variants) */ + appendStringInfoString(buf, "SUBSTRING("); + get_rule_expr((Node *) linitial(expr->args), context, false); + appendStringInfoString(buf, " FROM "); + get_rule_expr((Node *) lsecond(expr->args), context, false); + if (list_length(expr->args) == 3) + { + appendStringInfoString(buf, " FOR "); + get_rule_expr((Node *) lthird(expr->args), context, false); + } + appendStringInfoChar(buf, ')'); + return true; + + case F_SUBSTRING_TEXT_TEXT_TEXT: + /* SUBSTRING SIMILAR/ESCAPE */ + appendStringInfoString(buf, "SUBSTRING("); + get_rule_expr((Node *) linitial(expr->args), context, false); + appendStringInfoString(buf, " SIMILAR "); + get_rule_expr((Node *) lsecond(expr->args), context, false); + appendStringInfoString(buf, " ESCAPE "); + get_rule_expr((Node *) lthird(expr->args), context, false); + appendStringInfoChar(buf, ')'); + return true; + + case F_BTRIM_BYTEA_BYTEA: + case F_BTRIM_TEXT: + case F_BTRIM_TEXT_TEXT: + /* TRIM() */ + appendStringInfoString(buf, "TRIM(BOTH"); + if (list_length(expr->args) == 2) + { + appendStringInfoChar(buf, ' '); + get_rule_expr((Node *) lsecond(expr->args), context, false); + } + appendStringInfoString(buf, " FROM "); + get_rule_expr((Node *) linitial(expr->args), context, false); + appendStringInfoChar(buf, ')'); + return true; + + case F_LTRIM_BYTEA_BYTEA: + case F_LTRIM_TEXT: + case F_LTRIM_TEXT_TEXT: + /* TRIM() */ + appendStringInfoString(buf, "TRIM(LEADING"); + if (list_length(expr->args) == 2) + { + appendStringInfoChar(buf, ' '); + get_rule_expr((Node *) lsecond(expr->args), context, false); + } + appendStringInfoString(buf, " FROM "); + get_rule_expr((Node *) linitial(expr->args), context, false); + appendStringInfoChar(buf, ')'); + return true; + + case F_RTRIM_BYTEA_BYTEA: + case F_RTRIM_TEXT: + case F_RTRIM_TEXT_TEXT: + /* TRIM() */ + appendStringInfoString(buf, "TRIM(TRAILING"); + if (list_length(expr->args) == 2) + { + appendStringInfoChar(buf, ' '); + get_rule_expr((Node *) lsecond(expr->args), context, false); + } + appendStringInfoString(buf, " FROM "); + get_rule_expr((Node *) linitial(expr->args), context, false); + appendStringInfoChar(buf, ')'); + return true; + + case F_SYSTEM_USER: + appendStringInfoString(buf, "SYSTEM_USER"); + return true; + + case F_XMLEXISTS: + /* XMLEXISTS ... extra parens because args are c_expr */ + appendStringInfoString(buf, "XMLEXISTS(("); + get_rule_expr((Node *) linitial(expr->args), context, false); + appendStringInfoString(buf, ") PASSING ("); + get_rule_expr((Node *) lsecond(expr->args), context, false); + appendStringInfoString(buf, "))"); + return true; + } + return false; +} + +/* ---------- + * get_coercion_expr + * + * Make a string representation of a value coerced to a specific type + * ---------- + */ +static void +get_coercion_expr(Node *arg, deparse_context *context, + Oid resulttype, int32 resulttypmod, + Node *parentNode) +{ + StringInfo buf = context->buf; + + /* + * Since parse_coerce.c doesn't immediately collapse application of + * length-coercion functions to constants, what we'll typically see in + * such cases is a Const with typmod -1 and a length-coercion function + * right above it. Avoid generating redundant output. However, beware of + * suppressing casts when the user actually wrote something like + * 'foo'::text::char(3). + * + * Note: it might seem that we are missing the possibility of needing to + * print a COLLATE clause for such a Const. However, a Const could only + * have nondefault collation in a post-constant-folding tree, in which the + * length coercion would have been folded too. See also the special + * handling of CollateExpr in coerce_to_target_type(): any collation + * marking will be above the coercion node, not below it. + */ + if (arg && IsA(arg, Const) && + ((Const *) arg)->consttype == resulttype && + ((Const *) arg)->consttypmod == -1) + { + /* Show the constant without normal ::typename decoration */ + get_const_expr((Const *) arg, context, -1); + } + else + { + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, '('); + get_rule_expr_paren(arg, context, false, parentNode); + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, ')'); + } + appendStringInfo(buf, "::%s", + format_type_with_typemod(resulttype, resulttypmod)); +} + +/* ---------- + * get_const_expr + * + * Make a string representation of a Const + * + * showtype can be -1 to never show "::typename" decoration, or +1 to always + * show it, or 0 to show it only if the constant wouldn't be assumed to be + * the right type by default. + * + * If the Const's collation isn't default for its type, show that too. + * We mustn't do this when showtype is -1 (since that means the caller will + * print "::typename", and we can't put a COLLATE clause in between). It's + * caller's responsibility that collation isn't missed in such cases. + * ---------- + */ +static void +get_const_expr(Const *constval, deparse_context *context, int showtype) +{ + StringInfo buf = context->buf; + Oid typoutput; + bool typIsVarlena; + char *extval; + bool needlabel = false; + + if (constval->constisnull) + { + /* + * Always label the type of a NULL constant to prevent misdecisions + * about type when reparsing. + */ + appendStringInfoString(buf, "NULL"); + if (showtype >= 0) + { + appendStringInfo(buf, "::%s", + format_type_with_typemod(constval->consttype, + constval->consttypmod)); + get_const_collation(constval, context); + } + return; + } + + getTypeOutputInfo(constval->consttype, + &typoutput, &typIsVarlena); + + extval = OidOutputFunctionCall(typoutput, constval->constvalue); + + switch (constval->consttype) + { + case INT4OID: + + /* + * INT4 can be printed without any decoration, unless it is + * negative; in that case print it as '-nnn'::integer to ensure + * that the output will re-parse as a constant, not as a constant + * plus operator. In most cases we could get away with printing + * (-nnn) instead, because of the way that gram.y handles negative + * literals; but that doesn't work for INT_MIN, and it doesn't + * seem that much prettier anyway. + */ + if (extval[0] != '-') + appendStringInfoString(buf, extval); + else + { + appendStringInfo(buf, "'%s'", extval); + needlabel = true; /* we must attach a cast */ + } + break; + + case NUMERICOID: + + /* + * NUMERIC can be printed without quotes if it looks like a float + * constant (not an integer, and not Infinity or NaN) and doesn't + * have a leading sign (for the same reason as for INT4). + */ + if (isdigit((unsigned char) extval[0]) && + strcspn(extval, "eE.") != strlen(extval)) + { + appendStringInfoString(buf, extval); + } + else + { + appendStringInfo(buf, "'%s'", extval); + needlabel = true; /* we must attach a cast */ + } + break; + + case BITOID: + case VARBITOID: + appendStringInfo(buf, "B'%s'", extval); + break; + + case BOOLOID: + if (strcmp(extval, "t") == 0) + appendStringInfoString(buf, "true"); + else + appendStringInfoString(buf, "false"); + break; + + default: + simple_quote_literal(buf, extval); + break; + } + + pfree(extval); + + if (showtype < 0) + return; + + /* + * For showtype == 0, append ::typename unless the constant will be + * implicitly typed as the right type when it is read in. + * + * XXX this code has to be kept in sync with the behavior of the parser, + * especially make_const. + */ + switch (constval->consttype) + { + case BOOLOID: + case UNKNOWNOID: + /* These types can be left unlabeled */ + needlabel = false; + break; + case INT4OID: + /* We determined above whether a label is needed */ + break; + case NUMERICOID: + + /* + * Float-looking constants will be typed as numeric, which we + * checked above; but if there's a nondefault typmod we need to + * show it. + */ + needlabel |= (constval->consttypmod >= 0); + break; + default: + needlabel = true; + break; + } + if (needlabel || showtype > 0) + appendStringInfo(buf, "::%s", + format_type_with_typemod(constval->consttype, + constval->consttypmod)); + + get_const_collation(constval, context); +} + +/* + * helper for get_const_expr: append COLLATE if needed + */ +static void +get_const_collation(Const *constval, deparse_context *context) +{ + StringInfo buf = context->buf; + + if (OidIsValid(constval->constcollid)) + { + Oid typcollation = get_typcollation(constval->consttype); + + if (constval->constcollid != typcollation) + { + appendStringInfo(buf, " COLLATE %s", + generate_collation_name(constval->constcollid)); + } + } +} + +/* + * get_json_path_spec - Parse back a JSON path specification + */ +static void +get_json_path_spec(Node *path_spec, deparse_context *context, bool showimplicit) +{ + if (IsA(path_spec, Const)) + get_const_expr((Const *) path_spec, context, -1); + else + get_rule_expr(path_spec, context, showimplicit); +} + +/* + * get_json_format - Parse back a JsonFormat node + */ +static void +get_json_format(JsonFormat *format, StringInfo buf) +{ + if (format->format_type == JS_FORMAT_DEFAULT) + return; + + appendStringInfoString(buf, + format->format_type == JS_FORMAT_JSONB ? + " FORMAT JSONB" : " FORMAT JSON"); + + if (format->encoding != JS_ENC_DEFAULT) + { + const char *encoding; + + encoding = + format->encoding == JS_ENC_UTF16 ? "UTF16" : + format->encoding == JS_ENC_UTF32 ? "UTF32" : "UTF8"; + + appendStringInfo(buf, " ENCODING %s", encoding); + } +} + +/* + * get_json_returning - Parse back a JsonReturning structure + */ +static void +get_json_returning(JsonReturning *returning, StringInfo buf, + bool json_format_by_default) +{ + if (!OidIsValid(returning->typid)) + return; + + appendStringInfo(buf, " RETURNING %s", + format_type_with_typemod(returning->typid, + returning->typmod)); + + if (!json_format_by_default || + returning->format->format_type != + (returning->typid == JSONBOID ? JS_FORMAT_JSONB : JS_FORMAT_JSON)) + get_json_format(returning->format, buf); +} + +/* + * get_json_constructor - Parse back a JsonConstructorExpr node + */ +static void +get_json_constructor(JsonConstructorExpr *ctor, deparse_context *context, + bool showimplicit) +{ + StringInfo buf = context->buf; + const char *funcname; + bool is_json_object; + int curridx; + ListCell *lc; + + if (ctor->type == JSCTOR_JSON_OBJECTAGG) + { + get_json_agg_constructor(ctor, context, "JSON_OBJECTAGG", true); + return; + } + else if (ctor->type == JSCTOR_JSON_ARRAYAGG) + { + get_json_agg_constructor(ctor, context, "JSON_ARRAYAGG", false); + return; + } + + switch (ctor->type) + { + case JSCTOR_JSON_OBJECT: + funcname = "JSON_OBJECT"; + break; + case JSCTOR_JSON_ARRAY: + funcname = "JSON_ARRAY"; + break; + case JSCTOR_JSON_PARSE: + funcname = "JSON"; + break; + case JSCTOR_JSON_SCALAR: + funcname = "JSON_SCALAR"; + break; + case JSCTOR_JSON_SERIALIZE: + funcname = "JSON_SERIALIZE"; + break; + default: + elog(ERROR, "invalid JsonConstructorType %d", ctor->type); + } + + appendStringInfo(buf, "%s(", funcname); + + is_json_object = ctor->type == JSCTOR_JSON_OBJECT; + foreach(lc, ctor->args) + { + curridx = foreach_current_index(lc); + if (curridx > 0) + { + const char *sep; + + sep = (is_json_object && (curridx % 2) != 0) ? " : " : ", "; + appendStringInfoString(buf, sep); + } + + get_rule_expr((Node *) lfirst(lc), context, true); + } + + get_json_constructor_options(ctor, buf); + appendStringInfoChar(buf, ')'); +} + +/* + * Append options, if any, to the JSON constructor being deparsed + */ +static void +get_json_constructor_options(JsonConstructorExpr *ctor, StringInfo buf) +{ + if (ctor->absent_on_null) + { + if (ctor->type == JSCTOR_JSON_OBJECT || + ctor->type == JSCTOR_JSON_OBJECTAGG) + appendStringInfoString(buf, " ABSENT ON NULL"); + } + else + { + if (ctor->type == JSCTOR_JSON_ARRAY || + ctor->type == JSCTOR_JSON_ARRAYAGG) + appendStringInfoString(buf, " NULL ON NULL"); + } + + if (ctor->unique) + appendStringInfoString(buf, " WITH UNIQUE KEYS"); + + /* + * Append RETURNING clause if needed; JSON() and JSON_SCALAR() don't + * support one. + */ + if (ctor->type != JSCTOR_JSON_PARSE && ctor->type != JSCTOR_JSON_SCALAR) + get_json_returning(ctor->returning, buf, true); +} + +/* + * get_json_agg_constructor - Parse back an aggregate JsonConstructorExpr node + */ +static void +get_json_agg_constructor(JsonConstructorExpr *ctor, deparse_context *context, + const char *funcname, bool is_json_objectagg) +{ + StringInfoData options; + + initStringInfo(&options); + get_json_constructor_options(ctor, &options); + + if (IsA(ctor->func, Aggref)) + get_agg_expr_helper((Aggref *) ctor->func, context, + (Aggref *) ctor->func, + funcname, options.data, is_json_objectagg); + else if (IsA(ctor->func, WindowFunc)) + get_windowfunc_expr_helper((WindowFunc *) ctor->func, context, + funcname, options.data, + is_json_objectagg); + else + elog(ERROR, "invalid JsonConstructorExpr underlying node type: %d", + nodeTag(ctor->func)); +} + +/* + * simple_quote_literal - Format a string as a SQL literal, append to buf + */ +static void +simple_quote_literal(StringInfo buf, const char *val) +{ + const char *valptr; + + /* + * We form the string literal according to the prevailing setting of + * standard_conforming_strings; we never use E''. User is responsible for + * making sure result is used correctly. + */ + appendStringInfoChar(buf, '\''); + for (valptr = val; *valptr; valptr++) + { + char ch = *valptr; + + if (SQL_STR_DOUBLE(ch, !standard_conforming_strings)) + appendStringInfoChar(buf, ch); + appendStringInfoChar(buf, ch); + } + appendStringInfoChar(buf, '\''); +} + +/* ---------- + * get_sublink_expr - Parse back a sublink + * ---------- + */ +static void +get_sublink_expr(SubLink *sublink, deparse_context *context) +{ + StringInfo buf = context->buf; + Query *query = (Query *) (sublink->subselect); + char *opname = NULL; + bool need_paren; + + if (sublink->subLinkType == ARRAY_SUBLINK) + appendStringInfoString(buf, "ARRAY("); + else + appendStringInfoChar(buf, '('); + + /* + * Note that we print the name of only the first operator, when there are + * multiple combining operators. This is an approximation that could go + * wrong in various scenarios (operators in different schemas, renamed + * operators, etc) but there is not a whole lot we can do about it, since + * the syntax allows only one operator to be shown. + */ + if (sublink->testexpr) + { + if (IsA(sublink->testexpr, OpExpr)) + { + /* single combining operator */ + OpExpr *opexpr = (OpExpr *) sublink->testexpr; + + get_rule_expr(linitial(opexpr->args), context, true); + opname = generate_operator_name(opexpr->opno, + exprType(linitial(opexpr->args)), + exprType(lsecond(opexpr->args))); + } + else if (IsA(sublink->testexpr, BoolExpr)) + { + /* multiple combining operators, = or <> cases */ + char *sep; + ListCell *l; + + appendStringInfoChar(buf, '('); + sep = ""; + foreach(l, ((BoolExpr *) sublink->testexpr)->args) + { + OpExpr *opexpr = lfirst_node(OpExpr, l); + + appendStringInfoString(buf, sep); + get_rule_expr(linitial(opexpr->args), context, true); + if (!opname) + opname = generate_operator_name(opexpr->opno, + exprType(linitial(opexpr->args)), + exprType(lsecond(opexpr->args))); + sep = ", "; + } + appendStringInfoChar(buf, ')'); + } + else if (IsA(sublink->testexpr, RowCompareExpr)) + { + /* multiple combining operators, < <= > >= cases */ + RowCompareExpr *rcexpr = (RowCompareExpr *) sublink->testexpr; + + appendStringInfoChar(buf, '('); + get_rule_expr((Node *) rcexpr->largs, context, true); + opname = generate_operator_name(linitial_oid(rcexpr->opnos), + exprType(linitial(rcexpr->largs)), + exprType(linitial(rcexpr->rargs))); + appendStringInfoChar(buf, ')'); + } + else + elog(ERROR, "unrecognized testexpr type: %d", + (int) nodeTag(sublink->testexpr)); + } + + need_paren = true; + + switch (sublink->subLinkType) + { + case EXISTS_SUBLINK: + appendStringInfoString(buf, "EXISTS "); + break; + + case ANY_SUBLINK: + if (strcmp(opname, "=") == 0) /* Represent = ANY as IN */ + appendStringInfoString(buf, " IN "); + else + appendStringInfo(buf, " %s ANY ", opname); + break; + + case ALL_SUBLINK: + appendStringInfo(buf, " %s ALL ", opname); + break; + + case ROWCOMPARE_SUBLINK: + appendStringInfo(buf, " %s ", opname); + break; + + case EXPR_SUBLINK: + case MULTIEXPR_SUBLINK: + case ARRAY_SUBLINK: + need_paren = false; + break; + + case CTE_SUBLINK: /* shouldn't occur in a SubLink */ + default: + elog(ERROR, "unrecognized sublink type: %d", + (int) sublink->subLinkType); + break; + } + + if (need_paren) + appendStringInfoChar(buf, '('); + + get_query_def(query, buf, context->namespaces, NULL, false, + context->prettyFlags, context->wrapColumn, + context->indentLevel); + + if (need_paren) + appendStringInfoString(buf, "))"); + else + appendStringInfoChar(buf, ')'); +} + +/* ---------- + * get_xmltable - Parse back a XMLTABLE function + * ---------- + */ +static void +get_xmltable(TableFunc *tf, deparse_context *context, bool showimplicit) +{ + StringInfo buf = context->buf; + + appendStringInfoString(buf, "XMLTABLE("); + + if (tf->ns_uris != NIL) + { + ListCell *lc1, + *lc2; + bool first = true; + + appendStringInfoString(buf, "XMLNAMESPACES ("); + forboth(lc1, tf->ns_uris, lc2, tf->ns_names) + { + Node *expr = (Node *) lfirst(lc1); + char *name = strVal(lfirst(lc2)); + + if (!first) + appendStringInfoString(buf, ", "); + else + first = false; + + if (name != NULL) + { + get_rule_expr(expr, context, showimplicit); + appendStringInfo(buf, " AS %s", name); + } + else + { + appendStringInfoString(buf, "DEFAULT "); + get_rule_expr(expr, context, showimplicit); + } + } + appendStringInfoString(buf, "), "); + } + + appendStringInfoChar(buf, '('); + get_rule_expr((Node *) tf->rowexpr, context, showimplicit); + appendStringInfoString(buf, ") PASSING ("); + get_rule_expr((Node *) tf->docexpr, context, showimplicit); + appendStringInfoChar(buf, ')'); + + if (tf->colexprs != NIL) + { + ListCell *l1; + ListCell *l2; + ListCell *l3; + ListCell *l4; + ListCell *l5; + int colnum = 0; + + appendStringInfoString(buf, " COLUMNS "); + forfive(l1, tf->colnames, l2, tf->coltypes, l3, tf->coltypmods, + l4, tf->colexprs, l5, tf->coldefexprs) + { + char *colname = strVal(lfirst(l1)); + Oid typid = lfirst_oid(l2); + int32 typmod = lfirst_int(l3); + Node *colexpr = (Node *) lfirst(l4); + Node *coldefexpr = (Node *) lfirst(l5); + bool ordinality = (tf->ordinalitycol == colnum); + bool notnull = bms_is_member(colnum, tf->notnulls); + + if (colnum > 0) + appendStringInfoString(buf, ", "); + colnum++; + + appendStringInfo(buf, "%s %s", quote_identifier(colname), + ordinality ? "FOR ORDINALITY" : + format_type_with_typemod(typid, typmod)); + if (ordinality) + continue; + + if (coldefexpr != NULL) + { + appendStringInfoString(buf, " DEFAULT ("); + get_rule_expr((Node *) coldefexpr, context, showimplicit); + appendStringInfoChar(buf, ')'); + } + if (colexpr != NULL) + { + appendStringInfoString(buf, " PATH ("); + get_rule_expr((Node *) colexpr, context, showimplicit); + appendStringInfoChar(buf, ')'); + } + if (notnull) + appendStringInfoString(buf, " NOT NULL"); + } + } + + appendStringInfoChar(buf, ')'); +} + +/* + * get_json_table_nested_columns - Parse back nested JSON_TABLE columns + */ +static void +get_json_table_nested_columns(TableFunc *tf, JsonTablePlan *plan, + deparse_context *context, bool showimplicit, + bool needcomma) +{ + if (IsA(plan, JsonTablePathScan)) + { + JsonTablePathScan *scan = castNode(JsonTablePathScan, plan); + + if (needcomma) + appendStringInfoChar(context->buf, ','); + + appendStringInfoChar(context->buf, ' '); + appendContextKeyword(context, "NESTED PATH ", 0, 0, 0); + get_const_expr(scan->path->value, context, -1); + appendStringInfo(context->buf, " AS %s", quote_identifier(scan->path->name)); + get_json_table_columns(tf, scan, context, showimplicit); + } + else if (IsA(plan, JsonTableSiblingJoin)) + { + JsonTableSiblingJoin *join = (JsonTableSiblingJoin *) plan; + + get_json_table_nested_columns(tf, join->lplan, context, showimplicit, + needcomma); + get_json_table_nested_columns(tf, join->rplan, context, showimplicit, + true); + } +} + +/* + * get_json_table_columns - Parse back JSON_TABLE columns + */ +static void +get_json_table_columns(TableFunc *tf, JsonTablePathScan *scan, + deparse_context *context, + bool showimplicit) +{ + StringInfo buf = context->buf; + JsonExpr *jexpr = castNode(JsonExpr, tf->docexpr); + ListCell *lc_colname; + ListCell *lc_coltype; + ListCell *lc_coltypmod; + ListCell *lc_colvalexpr; + int colnum = 0; + + appendStringInfoChar(buf, ' '); + appendContextKeyword(context, "COLUMNS (", 0, 0, 0); + + if (PRETTY_INDENT(context)) + context->indentLevel += PRETTYINDENT_VAR; + + forfour(lc_colname, tf->colnames, + lc_coltype, tf->coltypes, + lc_coltypmod, tf->coltypmods, + lc_colvalexpr, tf->colvalexprs) + { + char *colname = strVal(lfirst(lc_colname)); + JsonExpr *colexpr; + Oid typid; + int32 typmod; + bool ordinality; + JsonBehaviorType default_behavior; + + typid = lfirst_oid(lc_coltype); + typmod = lfirst_int(lc_coltypmod); + colexpr = castNode(JsonExpr, lfirst(lc_colvalexpr)); + + /* Skip columns that don't belong to this scan. */ + if (scan->colMin < 0 || colnum < scan->colMin) + { + colnum++; + continue; + } + if (colnum > scan->colMax) + break; + + if (colnum > scan->colMin) + appendStringInfoString(buf, ", "); + + colnum++; + + ordinality = !colexpr; + + appendContextKeyword(context, "", 0, 0, 0); + + appendStringInfo(buf, "%s %s", quote_identifier(colname), + ordinality ? "FOR ORDINALITY" : + format_type_with_typemod(typid, typmod)); + if (ordinality) + continue; + + if (colexpr->op == JSON_EXISTS_OP) + { + appendStringInfoString(buf, " EXISTS"); + default_behavior = JSON_BEHAVIOR_FALSE; + } + else + { + if (colexpr->op == JSON_QUERY_OP) + { + char typcategory; + bool typispreferred; + + get_type_category_preferred(typid, &typcategory, &typispreferred); + + if (typcategory == TYPCATEGORY_STRING) + appendStringInfoString(buf, + colexpr->format->format_type == JS_FORMAT_JSONB ? + " FORMAT JSONB" : " FORMAT JSON"); + } + + default_behavior = JSON_BEHAVIOR_NULL; + } + + if (jexpr->on_error->btype == JSON_BEHAVIOR_ERROR) + default_behavior = JSON_BEHAVIOR_ERROR; + + appendStringInfoString(buf, " PATH "); + + get_json_path_spec(colexpr->path_spec, context, showimplicit); + + get_json_expr_options(colexpr, context, default_behavior); + } + + if (scan->child) + get_json_table_nested_columns(tf, scan->child, context, showimplicit, + scan->colMin >= 0); + + if (PRETTY_INDENT(context)) + context->indentLevel -= PRETTYINDENT_VAR; + + appendContextKeyword(context, ")", 0, 0, 0); +} + +/* ---------- + * get_json_table - Parse back a JSON_TABLE function + * ---------- + */ +static void +get_json_table(TableFunc *tf, deparse_context *context, bool showimplicit) +{ + StringInfo buf = context->buf; + JsonExpr *jexpr = castNode(JsonExpr, tf->docexpr); + JsonTablePathScan *root = castNode(JsonTablePathScan, tf->plan); + + appendStringInfoString(buf, "JSON_TABLE("); + + if (PRETTY_INDENT(context)) + context->indentLevel += PRETTYINDENT_VAR; + + appendContextKeyword(context, "", 0, 0, 0); + + get_rule_expr(jexpr->formatted_expr, context, showimplicit); + + appendStringInfoString(buf, ", "); + + get_const_expr(root->path->value, context, -1); + + appendStringInfo(buf, " AS %s", quote_identifier(root->path->name)); + + if (jexpr->passing_values) + { + ListCell *lc1, + *lc2; + bool needcomma = false; + + appendStringInfoChar(buf, ' '); + appendContextKeyword(context, "PASSING ", 0, 0, 0); + + if (PRETTY_INDENT(context)) + context->indentLevel += PRETTYINDENT_VAR; + + forboth(lc1, jexpr->passing_names, + lc2, jexpr->passing_values) + { + if (needcomma) + appendStringInfoString(buf, ", "); + needcomma = true; + + appendContextKeyword(context, "", 0, 0, 0); + + get_rule_expr((Node *) lfirst(lc2), context, false); + appendStringInfo(buf, " AS %s", + quote_identifier((lfirst_node(String, lc1))->sval) + ); + } + + if (PRETTY_INDENT(context)) + context->indentLevel -= PRETTYINDENT_VAR; + } + + get_json_table_columns(tf, castNode(JsonTablePathScan, tf->plan), context, + showimplicit); + + if (jexpr->on_error->btype != JSON_BEHAVIOR_EMPTY) + get_json_behavior(jexpr->on_error, context, "ERROR"); + + if (PRETTY_INDENT(context)) + context->indentLevel -= PRETTYINDENT_VAR; + + appendContextKeyword(context, ")", 0, 0, 0); +} + +/* ---------- + * get_tablefunc - Parse back a table function + * ---------- + */ +static void +get_tablefunc(TableFunc *tf, deparse_context *context, bool showimplicit) +{ + /* XMLTABLE and JSON_TABLE are the only existing implementations. */ + + if (tf->functype == TFT_XMLTABLE) + get_xmltable(tf, context, showimplicit); + else if (tf->functype == TFT_JSON_TABLE) + get_json_table(tf, context, showimplicit); +} + +/* ---------- + * get_from_clause - Parse back a FROM clause + * + * "prefix" is the keyword that denotes the start of the list of FROM + * elements. It is FROM when used to parse back SELECT and UPDATE, but + * is USING when parsing back DELETE. + * ---------- + */ +static void +get_from_clause(Query *query, const char *prefix, deparse_context *context) +{ + StringInfo buf = context->buf; + bool first = true; + ListCell *l; + + /* + * We use the query's jointree as a guide to what to print. However, we + * must ignore auto-added RTEs that are marked not inFromCl. (These can + * only appear at the top level of the jointree, so it's sufficient to + * check here.) This check also ensures we ignore the rule pseudo-RTEs + * for NEW and OLD. + */ + foreach(l, query->jointree->fromlist) + { + Node *jtnode = (Node *) lfirst(l); + + if (IsA(jtnode, RangeTblRef)) + { + int varno = ((RangeTblRef *) jtnode)->rtindex; + RangeTblEntry *rte = rt_fetch(varno, query->rtable); + + if (!rte->inFromCl) + continue; + } + + if (first) + { + appendContextKeyword(context, prefix, + -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); + first = false; + + get_from_clause_item(jtnode, query, context); + } + else + { + StringInfoData itembuf; + + appendStringInfoString(buf, ", "); + + /* + * Put the new FROM item's text into itembuf so we can decide + * after we've got it whether or not it needs to go on a new line. + */ + initStringInfo(&itembuf); + context->buf = &itembuf; + + get_from_clause_item(jtnode, query, context); + + /* Restore context's output buffer */ + context->buf = buf; + + /* Consider line-wrapping if enabled */ + if (PRETTY_INDENT(context) && context->wrapColumn >= 0) + { + /* Does the new item start with a new line? */ + if (itembuf.len > 0 && itembuf.data[0] == '\n') + { + /* If so, we shouldn't add anything */ + /* instead, remove any trailing spaces currently in buf */ + removeStringInfoSpaces(buf); + } + else + { + char *trailing_nl; + + /* Locate the start of the current line in the buffer */ + trailing_nl = strrchr(buf->data, '\n'); + if (trailing_nl == NULL) + trailing_nl = buf->data; + else + trailing_nl++; + + /* + * Add a newline, plus some indentation, if the new item + * would cause an overflow. + */ + if (strlen(trailing_nl) + itembuf.len > context->wrapColumn) + appendContextKeyword(context, "", -PRETTYINDENT_STD, + PRETTYINDENT_STD, + PRETTYINDENT_VAR); + } + } + + /* Add the new item */ + appendStringInfoString(buf, itembuf.data); + + /* clean up */ + pfree(itembuf.data); + } + } +} + +static void +get_from_clause_item(Node *jtnode, Query *query, deparse_context *context) +{ + StringInfo buf = context->buf; + deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); + + if (IsA(jtnode, RangeTblRef)) + { + int varno = ((RangeTblRef *) jtnode)->rtindex; + RangeTblEntry *rte = rt_fetch(varno, query->rtable); + deparse_columns *colinfo = deparse_columns_fetch(varno, dpns); + RangeTblFunction *rtfunc1 = NULL; + CitusRTEKind rteKind = GetRangeTblKind(rte); + + if (rte->lateral) + appendStringInfoString(buf, "LATERAL "); + + /* Print the FROM item proper */ + switch (rte->rtekind) + { + case RTE_RELATION: + /* Normal relation RTE */ + appendStringInfo(buf, "%s%s", + only_marker(rte), + generate_relation_or_shard_name(rte->relid, + context->distrelid, + context->shardid, + context->namespaces)); + break; + case RTE_SUBQUERY: + /* Subquery RTE */ + appendStringInfoChar(buf, '('); + get_query_def(rte->subquery, buf, context->namespaces, NULL, + true, + context->prettyFlags, context->wrapColumn, + context->indentLevel); + appendStringInfoChar(buf, ')'); + break; + case RTE_FUNCTION: + /* if it's a shard, do differently */ + if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) + { + char *fragmentSchemaName = NULL; + char *fragmentTableName = NULL; + + ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); + + /* use schema and table name from the remote alias */ + appendStringInfo(buf, "%s%s", + only_marker(rte), + generate_fragment_name(fragmentSchemaName, + fragmentTableName)); + break; + } + + /* Function RTE */ + rtfunc1 = (RangeTblFunction *) linitial(rte->functions); + + /* + * Omit ROWS FROM() syntax for just one function, unless it + * has both a coldeflist and WITH ORDINALITY. If it has both, + * we must use ROWS FROM() syntax to avoid ambiguity about + * whether the coldeflist includes the ordinality column. + */ + if (list_length(rte->functions) == 1 && + (rtfunc1->funccolnames == NIL || !rte->funcordinality)) + { + get_rule_expr_funccall(rtfunc1->funcexpr, context, true); + /* we'll print the coldeflist below, if it has one */ + } + else + { + bool all_unnest; + ListCell *lc; + + /* + * If all the function calls in the list are to unnest, + * and none need a coldeflist, then collapse the list back + * down to UNNEST(args). (If we had more than one + * built-in unnest function, this would get more + * difficult.) + * + * XXX This is pretty ugly, since it makes not-terribly- + * future-proof assumptions about what the parser would do + * with the output; but the alternative is to emit our + * nonstandard ROWS FROM() notation for what might have + * been a perfectly spec-compliant multi-argument + * UNNEST(). + */ + all_unnest = true; + foreach(lc, rte->functions) + { + RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); + + if (!IsA(rtfunc->funcexpr, FuncExpr) || + ((FuncExpr *) rtfunc->funcexpr)->funcid != F_UNNEST_ANYARRAY || + rtfunc->funccolnames != NIL) + { + all_unnest = false; + break; + } + } + + if (all_unnest) + { + List *allargs = NIL; + + foreach(lc, rte->functions) + { + RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); + List *args = ((FuncExpr *) rtfunc->funcexpr)->args; + + allargs = list_concat(allargs, args); + } + + appendStringInfoString(buf, "UNNEST("); + get_rule_expr((Node *) allargs, context, true); + appendStringInfoChar(buf, ')'); + } + else + { + int funcno = 0; + + appendStringInfoString(buf, "ROWS FROM("); + foreach(lc, rte->functions) + { + RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); + + if (funcno > 0) + appendStringInfoString(buf, ", "); + get_rule_expr_funccall(rtfunc->funcexpr, context, true); + if (rtfunc->funccolnames != NIL) + { + /* Reconstruct the column definition list */ + appendStringInfoString(buf, " AS "); + get_from_clause_coldeflist(rtfunc, + NULL, + context); + } + funcno++; + } + appendStringInfoChar(buf, ')'); + } + /* prevent printing duplicate coldeflist below */ + rtfunc1 = NULL; + } + if (rte->funcordinality) + appendStringInfoString(buf, " WITH ORDINALITY"); + break; + case RTE_TABLEFUNC: + get_tablefunc(rte->tablefunc, context, true); + break; + case RTE_VALUES: + /* Values list RTE */ + appendStringInfoChar(buf, '('); + get_values_def(rte->values_lists, context); + appendStringInfoChar(buf, ')'); + break; + case RTE_CTE: + appendStringInfoString(buf, quote_identifier(rte->ctename)); + break; + default: + elog(ERROR, "unrecognized RTE kind: %d", (int) rte->rtekind); + break; + } + + /* Print the relation alias, if needed */ + get_rte_alias(rte, varno, false, context); + + /* Print the column definitions or aliases, if needed */ + if (rtfunc1 && rtfunc1->funccolnames != NIL) + { + /* Reconstruct the columndef list, which is also the aliases */ + get_from_clause_coldeflist(rtfunc1, colinfo, context); + } + else if (GetRangeTblKind(rte) != CITUS_RTE_SHARD || + (rte->alias != NULL && rte->alias->colnames != NIL)) + { + /* Else print column aliases as needed */ + get_column_alias_list(colinfo, context); + } + /* check if column's are given aliases in distributed tables */ + else if (colinfo->parentUsing != NIL) + { + Assert(colinfo->printaliases); + get_column_alias_list(colinfo, context); + } + + /* Tablesample clause must go after any alias */ + if ((rteKind == CITUS_RTE_RELATION || rteKind == CITUS_RTE_SHARD) && + rte->tablesample) + { + get_tablesample_def(rte->tablesample, context); + } + } + else if (IsA(jtnode, JoinExpr)) + { + JoinExpr *j = (JoinExpr *) jtnode; + deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns); + bool need_paren_on_right; + + need_paren_on_right = PRETTY_PAREN(context) && + !IsA(j->rarg, RangeTblRef) && + !(IsA(j->rarg, JoinExpr) && ((JoinExpr *) j->rarg)->alias != NULL); + + if (!PRETTY_PAREN(context) || j->alias != NULL) + appendStringInfoChar(buf, '('); + + get_from_clause_item(j->larg, query, context); + + switch (j->jointype) + { + case JOIN_INNER: + if (j->quals) + appendContextKeyword(context, " JOIN ", + -PRETTYINDENT_STD, + PRETTYINDENT_STD, + PRETTYINDENT_JOIN); + else + appendContextKeyword(context, " CROSS JOIN ", + -PRETTYINDENT_STD, + PRETTYINDENT_STD, + PRETTYINDENT_JOIN); + break; + case JOIN_LEFT: + appendContextKeyword(context, " LEFT JOIN ", + -PRETTYINDENT_STD, + PRETTYINDENT_STD, + PRETTYINDENT_JOIN); + break; + case JOIN_FULL: + appendContextKeyword(context, " FULL JOIN ", + -PRETTYINDENT_STD, + PRETTYINDENT_STD, + PRETTYINDENT_JOIN); + break; + case JOIN_RIGHT: + appendContextKeyword(context, " RIGHT JOIN ", + -PRETTYINDENT_STD, + PRETTYINDENT_STD, + PRETTYINDENT_JOIN); + break; + default: + elog(ERROR, "unrecognized join type: %d", + (int) j->jointype); + } + + if (need_paren_on_right) + appendStringInfoChar(buf, '('); + get_from_clause_item(j->rarg, query, context); + if (need_paren_on_right) + appendStringInfoChar(buf, ')'); + + if (j->usingClause) + { + ListCell *lc; + bool first = true; + + appendStringInfoString(buf, " USING ("); + /* Use the assigned names, not what's in usingClause */ + foreach(lc, colinfo->usingNames) + { + char *colname = (char *) lfirst(lc); + + if (first) + first = false; + else + appendStringInfoString(buf, ", "); + appendStringInfoString(buf, quote_identifier(colname)); + } + appendStringInfoChar(buf, ')'); + + if (j->join_using_alias) + appendStringInfo(buf, " AS %s", + quote_identifier(j->join_using_alias->aliasname)); + } + else if (j->quals) + { + appendStringInfoString(buf, " ON "); + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, '('); + get_rule_expr(j->quals, context, false); + if (!PRETTY_PAREN(context)) + appendStringInfoChar(buf, ')'); + } + else if (j->jointype != JOIN_INNER) + { + /* If we didn't say CROSS JOIN above, we must provide an ON */ + appendStringInfoString(buf, " ON TRUE"); + } + + if (!PRETTY_PAREN(context) || j->alias != NULL) + appendStringInfoChar(buf, ')'); + + /* Yes, it's correct to put alias after the right paren ... */ + if (j->alias != NULL) + { + /* + * Note that it's correct to emit an alias clause if and only if + * there was one originally. Otherwise we'd be converting a named + * join to unnamed or vice versa, which creates semantic + * subtleties we don't want. However, we might print a different + * alias name than was there originally. + */ + appendStringInfo(buf, " %s", + quote_identifier(get_rtable_name(j->rtindex, + context))); + get_column_alias_list(colinfo, context); + } + } + else + elog(ERROR, "unrecognized node type: %d", + (int) nodeTag(jtnode)); +} + +/* + * get_rte_alias - print the relation's alias, if needed + * + * If printed, the alias is preceded by a space, or by " AS " if use_as is true. + */ +static void +get_rte_alias(RangeTblEntry *rte, int varno, bool use_as, + deparse_context *context) +{ + deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); + char *refname = get_rtable_name(varno, context); + deparse_columns *colinfo = deparse_columns_fetch(varno, dpns); + bool printalias = false; + + if (rte->alias != NULL) + { + /* Always print alias if user provided one */ + printalias = true; + } + else if (colinfo->printaliases) + { + /* Always print alias if we need to print column aliases */ + printalias = true; + } + else if (rte->rtekind == RTE_RELATION) + { + /* + * No need to print alias if it's same as relation name (this would + * normally be the case, but not if set_rtable_names had to resolve a + * conflict). + */ + if (strcmp(refname, get_relation_name(rte->relid)) != 0) + printalias = true; + } + else if (rte->rtekind == RTE_FUNCTION) + { + /* + * For a function RTE, always print alias. This covers possible + * renaming of the function and/or instability of the FigureColname + * rules for things that aren't simple functions. Note we'd need to + * force it anyway for the columndef list case. + */ + printalias = true; + } + else if (rte->rtekind == RTE_SUBQUERY || + rte->rtekind == RTE_VALUES) + { + /* + * For a subquery, always print alias. This makes the output + * SQL-spec-compliant, even though we allow such aliases to be omitted + * on input. + */ + printalias = true; + } + else if (rte->rtekind == RTE_CTE) + { + /* + * No need to print alias if it's same as CTE name (this would + * normally be the case, but not if set_rtable_names had to resolve a + * conflict). + */ + if (strcmp(refname, rte->ctename) != 0) + printalias = true; + } + + if (printalias) + appendStringInfo(context->buf, "%s%s", + use_as ? " AS " : " ", + quote_identifier(refname)); +} + +/* + * get_column_alias_list - print column alias list for an RTE + * + * Caller must already have printed the relation's alias name. + */ +static void +get_column_alias_list(deparse_columns *colinfo, deparse_context *context) +{ + StringInfo buf = context->buf; + int i; + bool first = true; + + /* Don't print aliases if not needed */ + if (!colinfo->printaliases) + return; + + for (i = 0; i < colinfo->num_new_cols; i++) + { + char *colname = colinfo->new_colnames[i]; + + if (first) + { + appendStringInfoChar(buf, '('); + first = false; + } + else + appendStringInfoString(buf, ", "); + appendStringInfoString(buf, quote_identifier(colname)); + } + if (!first) + appendStringInfoChar(buf, ')'); +} + +/* + * get_from_clause_coldeflist - reproduce FROM clause coldeflist + * + * When printing a top-level coldeflist (which is syntactically also the + * relation's column alias list), use column names from colinfo. But when + * printing a coldeflist embedded inside ROWS FROM(), we prefer to use the + * original coldeflist's names, which are available in rtfunc->funccolnames. + * Pass NULL for colinfo to select the latter behavior. + * + * The coldeflist is appended immediately (no space) to buf. Caller is + * responsible for ensuring that an alias or AS is present before it. + */ +static void +get_from_clause_coldeflist(RangeTblFunction *rtfunc, + deparse_columns *colinfo, + deparse_context *context) +{ + StringInfo buf = context->buf; + ListCell *l1; + ListCell *l2; + ListCell *l3; + ListCell *l4; + int i; + + appendStringInfoChar(buf, '('); + + i = 0; + forfour(l1, rtfunc->funccoltypes, + l2, rtfunc->funccoltypmods, + l3, rtfunc->funccolcollations, + l4, rtfunc->funccolnames) + { + Oid atttypid = lfirst_oid(l1); + int32 atttypmod = lfirst_int(l2); + Oid attcollation = lfirst_oid(l3); + char *attname; + + if (colinfo) + attname = colinfo->colnames[i]; + else + attname = strVal(lfirst(l4)); + + Assert(attname); /* shouldn't be any dropped columns here */ + + if (i > 0) + appendStringInfoString(buf, ", "); + appendStringInfo(buf, "%s %s", + quote_identifier(attname), + format_type_with_typemod(atttypid, atttypmod)); + if (OidIsValid(attcollation) && + attcollation != get_typcollation(atttypid)) + appendStringInfo(buf, " COLLATE %s", + generate_collation_name(attcollation)); + + i++; + } + + appendStringInfoChar(buf, ')'); +} + +/* + * get_tablesample_def - print a TableSampleClause + */ +static void +get_tablesample_def(TableSampleClause *tablesample, deparse_context *context) +{ + StringInfo buf = context->buf; + Oid argtypes[1]; + int nargs; + ListCell *l; + + /* + * We should qualify the handler's function name if it wouldn't be + * resolved by lookup in the current search path. + */ + argtypes[0] = INTERNALOID; + appendStringInfo(buf, " TABLESAMPLE %s (", + generate_function_name(tablesample->tsmhandler, 1, + NIL, argtypes, + false, NULL, EXPR_KIND_NONE)); + + nargs = 0; + foreach(l, tablesample->args) + { + if (nargs++ > 0) + appendStringInfoString(buf, ", "); + get_rule_expr((Node *) lfirst(l), context, false); + } + appendStringInfoChar(buf, ')'); + + if (tablesample->repeatable != NULL) + { + appendStringInfoString(buf, " REPEATABLE ("); + get_rule_expr((Node *) tablesample->repeatable, context, false); + appendStringInfoChar(buf, ')'); + } +} + +/* + * get_opclass_name - fetch name of an index operator class + * + * The opclass name is appended (after a space) to buf. + * + * Output is suppressed if the opclass is the default for the given + * actual_datatype. (If you don't want this behavior, just pass + * InvalidOid for actual_datatype.) + */ +static void +get_opclass_name(Oid opclass, Oid actual_datatype, + StringInfo buf) +{ + HeapTuple ht_opc; + Form_pg_opclass opcrec; + char *opcname; + char *nspname; + + ht_opc = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass)); + if (!HeapTupleIsValid(ht_opc)) + elog(ERROR, "cache lookup failed for opclass %u", opclass); + opcrec = (Form_pg_opclass) GETSTRUCT(ht_opc); + + if (!OidIsValid(actual_datatype) || + GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass) + { + /* Okay, we need the opclass name. Do we need to qualify it? */ + opcname = NameStr(opcrec->opcname); + if (OpclassIsVisible(opclass)) + appendStringInfo(buf, " %s", quote_identifier(opcname)); + else + { + nspname = get_namespace_name_or_temp(opcrec->opcnamespace); + appendStringInfo(buf, " %s.%s", + quote_identifier(nspname), + quote_identifier(opcname)); + } + } + ReleaseSysCache(ht_opc); +} + +/* + * processIndirection - take care of array and subfield assignment + * + * We strip any top-level FieldStore or assignment SubscriptingRef nodes that + * appear in the input, printing them as decoration for the base column + * name (which we assume the caller just printed). We might also need to + * strip CoerceToDomain nodes, but only ones that appear above assignment + * nodes. + * + * Returns the subexpression that's to be assigned. + */ +static Node * +processIndirection(Node *node, deparse_context *context) +{ + StringInfo buf = context->buf; + CoerceToDomain *cdomain = NULL; + + for (;;) + { + if (node == NULL) + break; + if (IsA(node, FieldStore)) + { + FieldStore *fstore = (FieldStore *) node; + Oid typrelid; + char *fieldname; + + /* lookup tuple type */ + typrelid = get_typ_typrelid(fstore->resulttype); + if (!OidIsValid(typrelid)) + elog(ERROR, "argument type %s of FieldStore is not a tuple type", + format_type_be(fstore->resulttype)); + + /* + * Print the field name. There should only be one target field in + * stored rules. There could be more than that in executable + * target lists, but this function cannot be used for that case. + */ + Assert(list_length(fstore->fieldnums) == 1); + fieldname = get_attname(typrelid, + linitial_int(fstore->fieldnums), false); + appendStringInfo(buf, ".%s", quote_identifier(fieldname)); + + /* + * We ignore arg since it should be an uninteresting reference to + * the target column or subcolumn. + */ + node = (Node *) linitial(fstore->newvals); + } + else if (IsA(node, SubscriptingRef)) + { + SubscriptingRef *sbsref = (SubscriptingRef *) node; + + if (sbsref->refassgnexpr == NULL) + break; + printSubscripts(sbsref, context); + + /* + * We ignore refexpr since it should be an uninteresting reference + * to the target column or subcolumn. + */ + node = (Node *) sbsref->refassgnexpr; + } + else if (IsA(node, CoerceToDomain)) + { + cdomain = (CoerceToDomain *) node; + /* If it's an explicit domain coercion, we're done */ + if (cdomain->coercionformat != COERCE_IMPLICIT_CAST) + break; + /* Tentatively descend past the CoerceToDomain */ + node = (Node *) cdomain->arg; + } + else + break; + } + + /* + * If we descended past a CoerceToDomain whose argument turned out not to + * be a FieldStore or array assignment, back up to the CoerceToDomain. + * (This is not enough to be fully correct if there are nested implicit + * CoerceToDomains, but such cases shouldn't ever occur.) + */ + if (cdomain && node == (Node *) cdomain->arg) + node = (Node *) cdomain; + + return node; +} + +static void +printSubscripts(SubscriptingRef *sbsref, deparse_context *context) +{ + StringInfo buf = context->buf; + ListCell *lowlist_item; + ListCell *uplist_item; + + lowlist_item = list_head(sbsref->reflowerindexpr); /* could be NULL */ + foreach(uplist_item, sbsref->refupperindexpr) + { + appendStringInfoChar(buf, '['); + if (lowlist_item) + { + /* If subexpression is NULL, get_rule_expr prints nothing */ + get_rule_expr((Node *) lfirst(lowlist_item), context, false); + appendStringInfoChar(buf, ':'); + lowlist_item = lnext(sbsref->reflowerindexpr, lowlist_item); + } + /* If subexpression is NULL, get_rule_expr prints nothing */ + get_rule_expr((Node *) lfirst(uplist_item), context, false); + appendStringInfoChar(buf, ']'); + } +} + +/* + * get_relation_name + * Get the unqualified name of a relation specified by OID + * + * This differs from the underlying get_rel_name() function in that it will + * throw error instead of silently returning NULL if the OID is bad. + */ +static char * +get_relation_name(Oid relid) +{ + char *relname = get_rel_name(relid); + + if (!relname) + elog(ERROR, "cache lookup failed for relation %u", relid); + return relname; +} + +/* + * generate_relation_or_shard_name + * Compute the name to display for a relation or shard + * + * If the provided relid is equal to the provided distrelid, this function + * returns a shard-extended relation name; otherwise, it falls through to a + * simple generate_relation_name call. + */ +static char * +generate_relation_or_shard_name(Oid relid, Oid distrelid, int64 shardid, + List *namespaces) +{ + char *relname = NULL; + + if (relid == distrelid) + { + relname = get_relation_name(relid); + + if (shardid > 0) + { + Oid schemaOid = get_rel_namespace(relid); + char *schemaName = get_namespace_name_or_temp(schemaOid); + + AppendShardIdToName(&relname, shardid); + + relname = quote_qualified_identifier(schemaName, relname); + } + } + else + { + relname = generate_relation_name(relid, namespaces); + } + + return relname; +} + +/* + * generate_relation_name + * Compute the name to display for a relation specified by OID + * + * The result includes all necessary quoting and schema-prefixing. + * + * If namespaces isn't NIL, it must be a list of deparse_namespace nodes. + * We will forcibly qualify the relation name if it equals any CTE name + * visible in the namespace list. + */ +char * +generate_relation_name(Oid relid, List *namespaces) +{ + HeapTuple tp; + Form_pg_class reltup; + bool need_qual; + ListCell *nslist; + char *relname; + char *nspname; + char *result; + + tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + if (!HeapTupleIsValid(tp)) + elog(ERROR, "cache lookup failed for relation %u", relid); + reltup = (Form_pg_class) GETSTRUCT(tp); + relname = NameStr(reltup->relname); + + /* Check for conflicting CTE name */ + need_qual = false; + foreach(nslist, namespaces) + { + deparse_namespace *dpns = (deparse_namespace *) lfirst(nslist); + ListCell *ctlist; + + foreach(ctlist, dpns->ctes) + { + CommonTableExpr *cte = (CommonTableExpr *) lfirst(ctlist); + + if (strcmp(cte->ctename, relname) == 0) + { + need_qual = true; + break; + } + } + if (need_qual) + break; + } + + /* Otherwise, qualify the name if not visible in search path */ + if (!need_qual) + need_qual = !RelationIsVisible(relid); + + if (need_qual) + nspname = get_namespace_name_or_temp(reltup->relnamespace); + else + nspname = NULL; + + result = quote_qualified_identifier(nspname, relname); + + ReleaseSysCache(tp); + + return result; +} + +/* + * generate_rte_shard_name returns the qualified name of the shard given a + * CITUS_RTE_SHARD range table entry. + */ +static char * +generate_rte_shard_name(RangeTblEntry *rangeTableEntry) +{ + char *shardSchemaName = NULL; + char *shardTableName = NULL; + + Assert(GetRangeTblKind(rangeTableEntry) == CITUS_RTE_SHARD); + + ExtractRangeTblExtraData(rangeTableEntry, NULL, &shardSchemaName, &shardTableName, + NULL); + + return generate_fragment_name(shardSchemaName, shardTableName); +} + +/* + * generate_fragment_name + * Compute the name to display for a shard or merged table + * + * The result includes all necessary quoting and schema-prefixing. The schema + * name can be NULL for regular shards. For merged tables, they are always + * declared within a job-specific schema, and therefore can't have null schema + * names. + */ +static char * +generate_fragment_name(char *schemaName, char *tableName) +{ + StringInfo fragmentNameString = makeStringInfo(); + + if (schemaName != NULL) + { + appendStringInfo(fragmentNameString, "%s.%s", quote_identifier(schemaName), + quote_identifier(tableName)); + } + else + { + appendStringInfoString(fragmentNameString, quote_identifier(tableName)); + } + + return fragmentNameString->data; +} + +/* + * generate_function_name + * Compute the name to display for a function specified by OID, + * given that it is being called with the specified actual arg names and + * types. (Those matter because of ambiguous-function resolution rules.) + * + * If we're dealing with a potentially variadic function (in practice, this + * means a FuncExpr or Aggref, not some other way of calling a function), then + * has_variadic must specify whether variadic arguments have been merged, + * and *use_variadic_p will be set to indicate whether to print VARIADIC in + * the output. For non-FuncExpr cases, has_variadic should be false and + * use_variadic_p can be NULL. + * + * The result includes all necessary quoting and schema-prefixing. + */ +static char * +generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes, + bool has_variadic, bool *use_variadic_p, + ParseExprKind special_exprkind) +{ + char *result; + HeapTuple proctup; + Form_pg_proc procform; + char *proname; + bool use_variadic; + char *nspname; + FuncDetailCode p_result; + Oid p_funcid; + Oid p_rettype; + bool p_retset; + int p_nvargs; + Oid p_vatype; + Oid *p_true_typeids; + bool force_qualify = false; + + proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); + if (!HeapTupleIsValid(proctup)) + elog(ERROR, "cache lookup failed for function %u", funcid); + procform = (Form_pg_proc) GETSTRUCT(proctup); + proname = NameStr(procform->proname); + + /* + * Due to parser hacks to avoid needing to reserve CUBE, we need to force + * qualification in some special cases. + */ + if (special_exprkind == EXPR_KIND_GROUP_BY) + { + if (strcmp(proname, "cube") == 0 || strcmp(proname, "rollup") == 0) + force_qualify = true; + } + + /* + * Determine whether VARIADIC should be printed. We must do this first + * since it affects the lookup rules in func_get_detail(). + * + * Currently, we always print VARIADIC if the function has a merged + * variadic-array argument. Note that this is always the case for + * functions taking a VARIADIC argument type other than VARIADIC ANY. + * + * In principle, if VARIADIC wasn't originally specified and the array + * actual argument is deconstructable, we could print the array elements + * separately and not print VARIADIC, thus more nearly reproducing the + * original input. For the moment that seems like too much complication + * for the benefit, and anyway we do not know whether VARIADIC was + * originally specified if it's a non-ANY type. + */ + if (use_variadic_p) + { + /* Parser should not have set funcvariadic unless fn is variadic */ + Assert(!has_variadic || OidIsValid(procform->provariadic)); + use_variadic = has_variadic; + *use_variadic_p = use_variadic; + } + else + { + Assert(!has_variadic); + use_variadic = false; + } + + /* + * The idea here is to schema-qualify only if the parser would fail to + * resolve the correct function given the unqualified func name with the + * specified argtypes and VARIADIC flag. But if we already decided to + * force qualification, then we can skip the lookup and pretend we didn't + * find it. + */ + if (!force_qualify) + p_result = func_get_detail(list_make1(makeString(proname)), + NIL, argnames, nargs, argtypes, + !use_variadic, true, false, + &p_funcid, &p_rettype, + &p_retset, &p_nvargs, &p_vatype, + &p_true_typeids, NULL); + else + { + p_result = FUNCDETAIL_NOTFOUND; + p_funcid = InvalidOid; + } + + if ((p_result == FUNCDETAIL_NORMAL || + p_result == FUNCDETAIL_AGGREGATE || + p_result == FUNCDETAIL_WINDOWFUNC) && + p_funcid == funcid) + nspname = NULL; + else + nspname = get_namespace_name_or_temp(procform->pronamespace); + + result = quote_qualified_identifier(nspname, proname); + + ReleaseSysCache(proctup); + + return result; +} + +/* + * generate_operator_name + * Compute the name to display for an operator specified by OID, + * given that it is being called with the specified actual arg types. + * (Arg types matter because of ambiguous-operator resolution rules. + * Pass InvalidOid for unused arg of a unary operator.) + * + * The result includes all necessary quoting and schema-prefixing, + * plus the OPERATOR() decoration needed to use a qualified operator name + * in an expression. + */ +char * +generate_operator_name(Oid operid, Oid arg1, Oid arg2) +{ + StringInfoData buf; + HeapTuple opertup; + Form_pg_operator operform; + char *oprname; + char *nspname; + + initStringInfo(&buf); + + opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(operid)); + if (!HeapTupleIsValid(opertup)) + elog(ERROR, "cache lookup failed for operator %u", operid); + operform = (Form_pg_operator) GETSTRUCT(opertup); + oprname = NameStr(operform->oprname); + + /* + * Unlike generate_operator_name() in postgres/src/backend/utils/adt/ruleutils.c, + * we don't check if the operator is in current namespace or not. This is + * because this check is costly when the operator is not in current namespace. + */ + nspname = get_namespace_name_or_temp(operform->oprnamespace); + Assert(nspname != NULL); + appendStringInfo(&buf, "OPERATOR(%s.", quote_identifier(nspname)); + appendStringInfoString(&buf, oprname); + appendStringInfoChar(&buf, ')'); + + ReleaseSysCache(opertup); + + return buf.data; +} + +/* + * get_one_range_partition_bound_string + * A C string representation of one range partition bound + */ +char * +get_range_partbound_string(List *bound_datums) +{ + deparse_context context; + StringInfo buf = makeStringInfo(); + ListCell *cell; + char *sep; + + memset(&context, 0, sizeof(deparse_context)); + context.buf = buf; + + appendStringInfoChar(buf, '('); + sep = ""; + foreach(cell, bound_datums) + { + PartitionRangeDatum *datum = + lfirst_node(PartitionRangeDatum, cell); + + appendStringInfoString(buf, sep); + if (datum->kind == PARTITION_RANGE_DATUM_MINVALUE) + appendStringInfoString(buf, "MINVALUE"); + else if (datum->kind == PARTITION_RANGE_DATUM_MAXVALUE) + appendStringInfoString(buf, "MAXVALUE"); + else + { + Const *val = castNode(Const, datum->value); + + get_const_expr(val, &context, -1); + } + sep = ", "; + } + appendStringInfoChar(buf, ')'); + + return buf->data; +} + +/* + * get_list_partvalue_string + * A C string representation of one list partition value + */ +char * +get_list_partvalue_string(Const *val) +{ + deparse_context context; + StringInfo buf = makeStringInfo(); + + memset(&context, 0, sizeof(deparse_context)); + context.buf = buf; + + get_const_expr(val, &context, -1); + + return buf->data; +} + +/* + * Collect a list of OIDs of all sequences owned by the specified relation, + * and column if specified. If deptype is not zero, then only find sequences + * with the specified dependency type. + */ +List * +getOwnedSequences_internal(Oid relid, AttrNumber attnum, char deptype) +{ + List *result = NIL; + Relation depRel; + ScanKeyData key[3]; + SysScanDesc scan; + HeapTuple tup; + + depRel = table_open(DependRelationId, AccessShareLock); + + ScanKeyInit(&key[0], + Anum_pg_depend_refclassid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationRelationId)); + ScanKeyInit(&key[1], + Anum_pg_depend_refobjid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(relid)); + if (attnum) + ScanKeyInit(&key[2], + Anum_pg_depend_refobjsubid, + BTEqualStrategyNumber, F_INT4EQ, + Int32GetDatum(attnum)); + + scan = systable_beginscan(depRel, DependReferenceIndexId, true, + NULL, attnum ? 3 : 2, key); + + while (HeapTupleIsValid(tup = systable_getnext(scan))) + { + Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup); + + /* + * We assume any auto or internal dependency of a sequence on a column + * must be what we are looking for. (We need the relkind test because + * indexes can also have auto dependencies on columns.) + */ + if (deprec->classid == RelationRelationId && + deprec->objsubid == 0 && + deprec->refobjsubid != 0 && + (deprec->deptype == DEPENDENCY_AUTO || deprec->deptype == DEPENDENCY_INTERNAL) && + get_rel_relkind(deprec->objid) == RELKIND_SEQUENCE) + { + if (!deptype || deprec->deptype == deptype) + result = lappend_oid(result, deprec->objid); + } + } + + systable_endscan(scan); + + table_close(depRel, AccessShareLock); + + return result; +} + +/* + * get_insert_column_names_list Prepares the insert-column-names list. Any indirection + * decoration needed on the column names can be inferred from the top targetlist. + */ +static List * +get_insert_column_names_list(List *targetList, StringInfo buf, + deparse_context *context, RangeTblEntry *rte) +{ + char *sep; + ListCell *l; + List *strippedexprs; + + strippedexprs = NIL; + sep = ""; + appendStringInfoChar(buf, '('); + foreach(l, targetList) + { + TargetEntry *tle = (TargetEntry *) lfirst(l); + + if (tle->resjunk) + continue; /* ignore junk entries */ + + appendStringInfoString(buf, sep); + sep = ", "; + + /* + * Put out name of target column; look in the catalogs, not at + * tle->resname, since resname will fail to track RENAME. + */ + appendStringInfoString(buf, + quote_identifier(get_attname(rte->relid, + tle->resno, + false))); + + /* + * Print any indirection needed (subfields or subscripts), and strip + * off the top-level nodes representing the indirection assignments. + * Add the stripped expressions to strippedexprs. (If it's a + * single-VALUES statement, the stripped expressions are the VALUES to + * print below. Otherwise they're just Vars and not really + * interesting.) + */ + strippedexprs = lappend(strippedexprs, + processIndirection((Node *) tle->expr, + context)); + } + appendStringInfoString(buf, ") "); + + return strippedexprs; +} +#endif /* (PG_VERSION_NUM >= PG_VERSION_17) && (PG_VERSION_NUM < PG_VERSION_18) */ diff --git a/src/backend/distributed/executor/adaptive_executor.c b/src/backend/distributed/executor/adaptive_executor.c index e912f418d6f..9f98ad9cf14 100644 --- a/src/backend/distributed/executor/adaptive_executor.c +++ b/src/backend/distributed/executor/adaptive_executor.c @@ -1430,7 +1430,7 @@ AssignTasksToConnectionsOrWorkerPool(DistributedExecution *execution) List *taskList = execution->remoteTaskList; Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { bool placementExecutionReady = true; int placementExecutionIndex = 0; @@ -1453,7 +1453,7 @@ AssignTasksToConnectionsOrWorkerPool(DistributedExecution *execution) SetAttributeInputMetadata(execution, shardCommandExecution); ShardPlacement *taskPlacement = NULL; - foreach_ptr(taskPlacement, task->taskPlacementList) + foreach_declared_ptr(taskPlacement, task->taskPlacementList) { int connectionFlags = 0; char *nodeName = NULL; @@ -1598,7 +1598,7 @@ AssignTasksToConnectionsOrWorkerPool(DistributedExecution *execution) * connection may be be returned multiple times by GetPlacementListConnectionIfCached. */ WorkerSession *session = NULL; - foreach_ptr(session, execution->sessionList) + foreach_declared_ptr(session, execution->sessionList) { MultiConnection *connection = session->connection; @@ -1721,7 +1721,7 @@ static WorkerPool * FindOrCreateWorkerPool(DistributedExecution *execution, char *nodeName, int nodePort) { WorkerPool *workerPool = NULL; - foreach_ptr(workerPool, execution->workerList) + foreach_declared_ptr(workerPool, execution->workerList) { if (strncmp(nodeName, workerPool->nodeName, WORKER_LENGTH) == 0 && nodePort == workerPool->nodePort) @@ -1768,7 +1768,7 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection) static uint64 sessionId = 1; WorkerSession *session = NULL; - foreach_ptr(session, workerPool->sessionList) + foreach_declared_ptr(session, workerPool->sessionList) { if (session->connection == connection) { @@ -1871,7 +1871,7 @@ SequentialRunDistributedExecution(DistributedExecution *execution) */ MultiShardConnectionType = SEQUENTIAL_CONNECTION; Task *taskToExecute = NULL; - foreach_ptr(taskToExecute, taskList) + foreach_declared_ptr(taskToExecute, taskList) { execution->remoteAndLocalTaskList = list_make1(taskToExecute); execution->remoteTaskList = list_make1(taskToExecute); @@ -1911,7 +1911,7 @@ RunDistributedExecution(DistributedExecution *execution) { /* Preemptively step state machines in case of immediate errors */ WorkerSession *session = NULL; - foreach_ptr(session, execution->sessionList) + foreach_declared_ptr(session, execution->sessionList) { ConnectionStateMachine(session); } @@ -1943,7 +1943,7 @@ RunDistributedExecution(DistributedExecution *execution) HasIncompleteConnectionEstablishment(execution))) { WorkerPool *workerPool = NULL; - foreach_ptr(workerPool, execution->workerList) + foreach_declared_ptr(workerPool, execution->workerList) { ManageWorkerPool(workerPool); } @@ -2028,7 +2028,7 @@ ProcessSessionsWithFailedWaitEventSetOperations(DistributedExecution *execution) { bool foundFailedSession = false; WorkerSession *session = NULL; - foreach_ptr(session, execution->sessionList) + foreach_declared_ptr(session, execution->sessionList) { if (session->waitEventSetIndex == WAIT_EVENT_SET_INDEX_FAILED) { @@ -2072,7 +2072,7 @@ HasIncompleteConnectionEstablishment(DistributedExecution *execution) } WorkerSession *session = NULL; - foreach_ptr(session, execution->sessionList) + foreach_declared_ptr(session, execution->sessionList) { MultiConnection *connection = session->connection; if (connection->connectionState == MULTI_CONNECTION_INITIAL || @@ -2550,7 +2550,7 @@ AvgTaskExecutionTimeApproximation(WorkerPool *workerPool) INSTR_TIME_SET_CURRENT(now); WorkerSession *session = NULL; - foreach_ptr(session, workerPool->sessionList) + foreach_declared_ptr(session, workerPool->sessionList) { /* * Involve the tasks that are currently running. We do this to @@ -2588,7 +2588,7 @@ AvgConnectionEstablishmentTime(WorkerPool *workerPool) int sessionCount = 0; WorkerSession *session = NULL; - foreach_ptr(session, workerPool->sessionList) + foreach_declared_ptr(session, workerPool->sessionList) { MultiConnection *connection = session->connection; @@ -2744,7 +2744,7 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount, #endif WorkerSession *session = NULL; - foreach_ptr(session, newSessionsList) + foreach_declared_ptr(session, newSessionsList) { /* immediately run the state machine to handle potential failure */ ConnectionStateMachine(session); @@ -2862,7 +2862,7 @@ static void MarkEstablishingSessionsTimedOut(WorkerPool *workerPool) { WorkerSession *session = NULL; - foreach_ptr(session, workerPool->sessionList) + foreach_declared_ptr(session, workerPool->sessionList) { MultiConnection *connection = session->connection; @@ -2914,7 +2914,7 @@ NextEventTimeout(DistributedExecution *execution) long eventTimeout = 1000; /* milliseconds */ WorkerPool *workerPool = NULL; - foreach_ptr(workerPool, execution->workerList) + foreach_declared_ptr(workerPool, execution->workerList) { if (workerPool->failureState == WORKER_POOL_FAILED) { @@ -4255,7 +4255,7 @@ WorkerPoolFailed(WorkerPool *workerPool) } WorkerSession *session = NULL; - foreach_ptr(session, workerPool->sessionList) + foreach_declared_ptr(session, workerPool->sessionList) { WorkerSessionFailed(session); } @@ -4280,7 +4280,7 @@ WorkerPoolFailed(WorkerPool *workerPool) List *workerList = workerPool->distributedExecution->workerList; WorkerPool *pool = NULL; - foreach_ptr(pool, workerList) + foreach_declared_ptr(pool, workerList) { /* failed pools or pools without any connection attempts ignored */ if (pool->failureState == WORKER_POOL_FAILED || @@ -4633,7 +4633,7 @@ PlacementExecutionReady(TaskPlacementExecution *placementExecution) /* wake up an idle connection by checking whether the connection is writeable */ WorkerSession *session = NULL; - foreach_ptr(session, workerPool->sessionList) + foreach_declared_ptr(session, workerPool->sessionList) { MultiConnection *connection = session->connection; RemoteTransaction *transaction = &(connection->remoteTransaction); @@ -4755,10 +4755,10 @@ BuildWaitEventSet(List *sessionList) int eventSetSize = GetEventSetSize(sessionList); WaitEventSet *waitEventSet = - CreateWaitEventSet(CurrentMemoryContext, eventSetSize); + CreateWaitEventSet(WaitEventSetTracker_compat, eventSetSize); WorkerSession *session = NULL; - foreach_ptr(session, sessionList) + foreach_declared_ptr(session, sessionList) { AddSessionToWaitEventSet(session, waitEventSet); } @@ -4856,7 +4856,7 @@ static void RebuildWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList) { WorkerSession *session = NULL; - foreach_ptr(session, sessionList) + foreach_declared_ptr(session, sessionList) { MultiConnection *connection = session->connection; int waitEventSetIndex = session->waitEventSetIndex; @@ -4912,7 +4912,7 @@ CleanUpSessions(DistributedExecution *execution) /* always trigger wait event set in the first round */ WorkerSession *session = NULL; - foreach_ptr(session, sessionList) + foreach_declared_ptr(session, sessionList) { MultiConnection *connection = session->connection; @@ -4993,7 +4993,7 @@ static void UnclaimAllSessionConnections(List *sessionList) { WorkerSession *session = NULL; - foreach_ptr(session, sessionList) + foreach_declared_ptr(session, sessionList) { MultiConnection *connection = session->connection; diff --git a/src/backend/distributed/executor/citus_custom_scan.c b/src/backend/distributed/executor/citus_custom_scan.c index 34a2f3d90bd..e072ba0fa1a 100644 --- a/src/backend/distributed/executor/citus_custom_scan.c +++ b/src/backend/distributed/executor/citus_custom_scan.c @@ -524,7 +524,7 @@ static bool AnchorShardsInTaskListExist(List *taskList) { Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { if (!ShardExists(task->anchorShardId)) { diff --git a/src/backend/distributed/executor/directed_acyclic_graph_execution.c b/src/backend/distributed/executor/directed_acyclic_graph_execution.c index 15b0272ddaf..48e8fbb6458 100644 --- a/src/backend/distributed/executor/directed_acyclic_graph_execution.c +++ b/src/backend/distributed/executor/directed_acyclic_graph_execution.c @@ -94,7 +94,7 @@ FindExecutableTasks(List *allTasks, HTAB *completedTasks) List *curTasks = NIL; Task *task = NULL; - foreach_ptr(task, allTasks) + foreach_declared_ptr(task, allTasks) { if (IsAllDependencyCompleted(task, completedTasks) && !IsTaskAlreadyCompleted(task, completedTasks)) @@ -118,7 +118,7 @@ RemoveMergeTasks(List *taskList) List *prunedTaskList = NIL; Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { if (task->taskType != MERGE_TASK) { @@ -139,7 +139,7 @@ AddCompletedTasks(List *curCompletedTasks, HTAB *completedTasks) bool found; Task *task = NULL; - foreach_ptr(task, curCompletedTasks) + foreach_declared_ptr(task, curCompletedTasks) { TaskHashKey taskKey = { task->jobId, task->taskId }; hash_search(completedTasks, &taskKey, HASH_ENTER, &found); @@ -172,7 +172,7 @@ IsAllDependencyCompleted(Task *targetTask, HTAB *completedTasks) bool found = false; Task *task = NULL; - foreach_ptr(task, targetTask->dependentTaskList) + foreach_declared_ptr(task, targetTask->dependentTaskList) { TaskHashKey taskKey = { task->jobId, task->taskId }; diff --git a/src/backend/distributed/executor/distributed_execution_locks.c b/src/backend/distributed/executor/distributed_execution_locks.c index 4424accb712..9c9f09a4cab 100644 --- a/src/backend/distributed/executor/distributed_execution_locks.c +++ b/src/backend/distributed/executor/distributed_execution_locks.c @@ -198,7 +198,7 @@ AcquireExecutorShardLocksForExecution(RowModifyLevel modLevel, List *taskList) List *requiresConsistentSnapshotRelationShardList = NIL; Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { ShardInterval *anchorShardInterval = LoadShardInterval(task->anchorShardId); anchorShardIntervalList = lappend(anchorShardIntervalList, anchorShardInterval); @@ -344,7 +344,7 @@ AcquireMetadataLocks(List *taskList) */ Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { LockShardDistributionMetadata(task->anchorShardId, ShareLock); } @@ -379,7 +379,7 @@ AcquireExecutorShardLocksForRelationRowLockList(List *relationRowLockList) * them. */ RelationRowLock *relationRowLock = NULL; - foreach_ptr(relationRowLock, relationRowLockList) + foreach_declared_ptr(relationRowLock, relationRowLockList) { LockClauseStrength rowLockStrength = relationRowLock->rowLockStrength; Oid relationId = relationRowLock->relationId; @@ -412,7 +412,7 @@ void LockPartitionsInRelationList(List *relationIdList, LOCKMODE lockmode) { Oid relationId = InvalidOid; - foreach_oid(relationId, relationIdList) + foreach_declared_oid(relationId, relationIdList) { if (PartitionedTable(relationId)) { @@ -437,7 +437,7 @@ LockPartitionRelations(Oid relationId, LOCKMODE lockMode) */ List *partitionList = PartitionList(relationId); Oid partitionRelationId = InvalidOid; - foreach_oid(partitionRelationId, partitionList) + foreach_declared_oid(partitionRelationId, partitionList) { LockRelationOid(partitionRelationId, lockMode); } diff --git a/src/backend/distributed/executor/distributed_intermediate_results.c b/src/backend/distributed/executor/distributed_intermediate_results.c index c5ac27fb624..24e8ca8d806 100644 --- a/src/backend/distributed/executor/distributed_intermediate_results.c +++ b/src/backend/distributed/executor/distributed_intermediate_results.c @@ -206,7 +206,7 @@ WrapTasksForPartitioning(const char *resultIdPrefix, List *selectTaskList, intervalTypeMod); Task *selectTask = NULL; - foreach_ptr(selectTask, selectTaskList) + foreach_declared_ptr(selectTask, selectTaskList) { char *taskPrefix = SourceShardPrefix(resultIdPrefix, selectTask->anchorShardId); char *partitionMethodString = targetRelation->partitionMethod == 'h' ? @@ -490,7 +490,7 @@ ColocateFragmentsWithRelation(List *fragmentList, CitusTableCacheEntry *targetRe List **shardResultIdList = palloc0(shardCount * sizeof(List *)); DistributedResultFragment *sourceFragment = NULL; - foreach_ptr(sourceFragment, fragmentList) + foreach_declared_ptr(sourceFragment, fragmentList) { int shardIndex = sourceFragment->targetShardIndex; @@ -520,11 +520,11 @@ ColocationTransfers(List *fragmentList, CitusTableCacheEntry *targetRelation) HASH_ELEM | HASH_CONTEXT | HASH_BLOBS); DistributedResultFragment *fragment = NULL; - foreach_ptr(fragment, fragmentList) + foreach_declared_ptr(fragment, fragmentList) { List *placementList = ActiveShardPlacementList(fragment->targetShardId); ShardPlacement *placement = NULL; - foreach_ptr(placement, placementList) + foreach_declared_ptr(placement, placementList) { NodePair transferKey = { .sourceNodeId = fragment->nodeId, @@ -576,7 +576,7 @@ FragmentTransferTaskList(List *fragmentListTransfers) List *fetchTaskList = NIL; NodeToNodeFragmentsTransfer *fragmentsTransfer = NULL; - foreach_ptr(fragmentsTransfer, fragmentListTransfers) + foreach_declared_ptr(fragmentsTransfer, fragmentListTransfers) { uint32 targetNodeId = fragmentsTransfer->nodes.targetNodeId; @@ -629,7 +629,7 @@ QueryStringForFragmentsTransfer(NodeToNodeFragmentsTransfer *fragmentsTransfer) appendStringInfoString(fragmentNamesArrayString, "ARRAY["); DistributedResultFragment *fragment = NULL; - foreach_ptr(fragment, fragmentsTransfer->fragmentList) + foreach_declared_ptr(fragment, fragmentsTransfer->fragmentList) { const char *fragmentName = fragment->resultId; diff --git a/src/backend/distributed/executor/executor_util_tasks.c b/src/backend/distributed/executor/executor_util_tasks.c index 6a3eec8fc52..82a153b94f1 100644 --- a/src/backend/distributed/executor/executor_util_tasks.c +++ b/src/backend/distributed/executor/executor_util_tasks.c @@ -163,7 +163,7 @@ bool TaskListCannotBeExecutedInTransaction(List *taskList) { Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { if (task->cannotBeExecutedInTransaction) { @@ -190,7 +190,7 @@ SelectForUpdateOnReferenceTable(List *taskList) Task *task = (Task *) linitial(taskList); RelationRowLock *relationRowLock = NULL; - foreach_ptr(relationRowLock, task->relationRowLockList) + foreach_declared_ptr(relationRowLock, task->relationRowLockList) { Oid relationId = relationRowLock->relationId; @@ -239,7 +239,7 @@ bool ModifiedTableReplicated(List *taskList) { Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { int64 shardId = task->anchorShardId; diff --git a/src/backend/distributed/executor/insert_select_executor.c b/src/backend/distributed/executor/insert_select_executor.c index a8dc1fa5a44..76dde345f55 100644 --- a/src/backend/distributed/executor/insert_select_executor.c +++ b/src/backend/distributed/executor/insert_select_executor.c @@ -239,7 +239,7 @@ NonPushableInsertSelectExecScan(CustomScanState *node) * on shards with connections. */ Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { uint64 shardId = task->anchorShardId; bool shardModified = false; @@ -376,7 +376,7 @@ BuildColumnNameListFromTargetList(Oid targetRelationId, List *insertTargetList) /* build the list of column names for the COPY statement */ TargetEntry *insertTargetEntry = NULL; - foreach_ptr(insertTargetEntry, insertTargetList) + foreach_declared_ptr(insertTargetEntry, insertTargetList) { columnNameList = lappend(columnNameList, insertTargetEntry->resname); } @@ -397,7 +397,7 @@ PartitionColumnIndexFromColumnList(Oid relationId, List *columnNameList) int partitionColumnIndex = 0; const char *columnName = NULL; - foreach_ptr(columnName, columnNameList) + foreach_declared_ptr(columnName, columnNameList) { AttrNumber attrNumber = get_attnum(relationId, columnName); @@ -423,7 +423,7 @@ DistributionColumnIndex(List *insertTargetList, Var *distributionColumn) { TargetEntry *insertTargetEntry = NULL; int targetEntryIndex = 0; - foreach_ptr(insertTargetEntry, insertTargetList) + foreach_declared_ptr(insertTargetEntry, insertTargetList) { if (insertTargetEntry->resno == distributionColumn->varattno) { @@ -447,7 +447,7 @@ WrapTaskListForProjection(List *taskList, List *projectedTargetEntries) StringInfo projectedColumnsString = makeStringInfo(); int entryIndex = 0; TargetEntry *targetEntry = NULL; - foreach_ptr(targetEntry, projectedTargetEntries) + foreach_declared_ptr(targetEntry, projectedTargetEntries) { if (entryIndex != 0) { @@ -462,7 +462,7 @@ WrapTaskListForProjection(List *taskList, List *projectedTargetEntries) } Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { StringInfo wrappedQuery = makeStringInfo(); appendStringInfo(wrappedQuery, "SELECT %s FROM (%s) subquery", diff --git a/src/backend/distributed/executor/intermediate_results.c b/src/backend/distributed/executor/intermediate_results.c index daf707b2462..ee1aafad2db 100644 --- a/src/backend/distributed/executor/intermediate_results.c +++ b/src/backend/distributed/executor/intermediate_results.c @@ -306,7 +306,7 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest) } WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, initialNodeList) + foreach_declared_ptr(workerNode, initialNodeList) { int flags = 0; @@ -326,7 +326,7 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest) RemoteTransactionsBeginIfNecessary(connectionList); MultiConnection *connection = NULL; - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { StringInfo copyCommand = ConstructCopyResultStatement(resultId); @@ -337,7 +337,7 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest) } } - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { bool raiseInterrupts = true; @@ -516,7 +516,7 @@ static void BroadcastCopyData(StringInfo dataBuffer, List *connectionList) { MultiConnection *connection = NULL; - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { SendCopyDataOverConnection(dataBuffer, connection); } @@ -712,7 +712,7 @@ void RemoveIntermediateResultsDirectories(void) { char *directoryElement = NULL; - foreach_ptr(directoryElement, CreatedResultsDirectories) + foreach_declared_ptr(directoryElement, CreatedResultsDirectories) { /* * The shared directory is renamed before deleting it. Otherwise it diff --git a/src/backend/distributed/executor/local_executor.c b/src/backend/distributed/executor/local_executor.c index bedaa643e29..d824d8f3152 100644 --- a/src/backend/distributed/executor/local_executor.c +++ b/src/backend/distributed/executor/local_executor.c @@ -253,7 +253,7 @@ ExecuteLocalTaskListExtended(List *taskList, ALLOCSET_DEFAULT_SIZES); Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { MemoryContext oldContext = MemoryContextSwitchTo(loopContext); @@ -304,7 +304,7 @@ ExecuteLocalTaskListExtended(List *taskList, LOCKMODE lockMode = GetQueryLockMode(jobQuery); Oid relationId = InvalidOid; - foreach_oid(relationId, localPlan->relationOids) + foreach_declared_oid(relationId, localPlan->relationOids) { LockRelationOid(relationId, lockMode); } @@ -393,7 +393,7 @@ SetColocationIdAndPartitionKeyValueForTasks(List *taskList, Job *workerJob) if (workerJob->colocationId != INVALID_COLOCATION_ID) { Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { task->colocationId = workerJob->colocationId; task->partitionKeyValue = workerJob->partitionKeyValue; @@ -412,7 +412,7 @@ LocallyPlanAndExecuteMultipleQueries(List *queryStrings, TupleDestination *tuple { char *queryString = NULL; uint64 totalProcessedRows = 0; - foreach_ptr(queryString, queryStrings) + foreach_declared_ptr(queryString, queryStrings) { Query *shardQuery = ParseQueryString(queryString, NULL, @@ -490,7 +490,7 @@ ExecuteUtilityCommand(const char *taskQueryCommand) List *parseTreeList = pg_parse_query(taskQueryCommand); RawStmt *taskRawStmt = NULL; - foreach_ptr(taskRawStmt, parseTreeList) + foreach_declared_ptr(taskRawStmt, parseTreeList) { Node *taskRawParseTree = taskRawStmt->stmt; @@ -580,7 +580,7 @@ ExtractLocalAndRemoteTasks(bool readOnly, List *taskList, List **localTaskList, *localTaskList = NIL; Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { List *localTaskPlacementList = NULL; List *remoteTaskPlacementList = NULL; @@ -645,7 +645,7 @@ SplitLocalAndRemotePlacements(List *taskPlacementList, List **localTaskPlacement *remoteTaskPlacementList = NIL; ShardPlacement *taskPlacement = NULL; - foreach_ptr(taskPlacement, taskPlacementList) + foreach_declared_ptr(taskPlacement, taskPlacementList) { if (taskPlacement->groupId == localGroupId) { @@ -817,7 +817,7 @@ RecordNonDistTableAccessesForTask(Task *task) List *placementAccessList = PlacementAccessListForTask(task, taskPlacement); ShardPlacementAccess *placementAccess = NULL; - foreach_ptr(placementAccess, placementAccessList) + foreach_declared_ptr(placementAccess, placementAccessList) { uint64 placementAccessShardId = placementAccess->placement->shardId; if (placementAccessShardId == INVALID_SHARD_ID) @@ -968,7 +968,7 @@ AnyTaskAccessesLocalNode(List *taskList) { Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { if (TaskAccessesLocalNode(task)) { @@ -990,7 +990,7 @@ TaskAccessesLocalNode(Task *task) int32 localGroupId = GetLocalGroupId(); ShardPlacement *taskPlacement = NULL; - foreach_ptr(taskPlacement, task->taskPlacementList) + foreach_declared_ptr(taskPlacement, task->taskPlacementList) { if (taskPlacement->groupId == localGroupId) { diff --git a/src/backend/distributed/executor/merge_executor.c b/src/backend/distributed/executor/merge_executor.c index 969b03faf93..ce1eb007318 100644 --- a/src/backend/distributed/executor/merge_executor.c +++ b/src/backend/distributed/executor/merge_executor.c @@ -258,7 +258,7 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState) * on shards with connections. */ Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { uint64 shardId = task->anchorShardId; bool shardModified = false; diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index 386a278b4c7..e257b80c6be 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -224,7 +224,7 @@ CitusExecutorRun(QueryDesc *queryDesc, */ List *citusCustomScanStates = FindCitusCustomScanStates(queryDesc->planstate); CitusScanState *citusScanState = NULL; - foreach_ptr(citusScanState, citusCustomScanStates) + foreach_declared_ptr(citusScanState, citusCustomScanStates) { if (citusScanState->PreExecScan) { @@ -512,7 +512,7 @@ SortTupleStore(CitusScanState *scanState) * for sorting the tuples. */ TargetEntry *returningEntry = NULL; - foreach_ptr(returningEntry, targetList) + foreach_declared_ptr(returningEntry, targetList) { Oid sortop = InvalidOid; diff --git a/src/backend/distributed/executor/placement_access.c b/src/backend/distributed/executor/placement_access.c index a8573de7c08..1046ae53972 100644 --- a/src/backend/distributed/executor/placement_access.c +++ b/src/backend/distributed/executor/placement_access.c @@ -126,7 +126,7 @@ BuildPlacementAccessList(int32 groupId, List *relationShardList, List *placementAccessList = NIL; RelationShard *relationShard = NULL; - foreach_ptr(relationShard, relationShardList) + foreach_declared_ptr(relationShard, relationShardList) { ShardPlacement *placement = ActiveShardPlacementOnGroup(groupId, relationShard->shardId); diff --git a/src/backend/distributed/executor/query_stats.c b/src/backend/distributed/executor/query_stats.c index f37a99bbf72..ce6179b9617 100644 --- a/src/backend/distributed/executor/query_stats.c +++ b/src/backend/distributed/executor/query_stats.c @@ -759,9 +759,6 @@ citus_query_stats(PG_FUNCTION_ARGS) LWLockRelease(queryStats->lock); - /* clean up and return the tuplestore */ - tuplestore_donestoring(tupstore); - return (Datum) 0; } diff --git a/src/backend/distributed/executor/repartition_join_execution.c b/src/backend/distributed/executor/repartition_join_execution.c index 8dce1239034..d72e030b5f4 100644 --- a/src/backend/distributed/executor/repartition_join_execution.c +++ b/src/backend/distributed/executor/repartition_join_execution.c @@ -93,7 +93,7 @@ TraverseJobTree(Job *curJob, List **jobIds) *jobIds = lappend(*jobIds, jobIdPointer); Job *childJob = NULL; - foreach_ptr(childJob, curJob->dependentJobList) + foreach_declared_ptr(childJob, curJob->dependentJobList) { TraverseJobTree(childJob, jobIds); } diff --git a/src/backend/distributed/executor/subplan_execution.c b/src/backend/distributed/executor/subplan_execution.c index 4e81bb48680..ef283834339 100644 --- a/src/backend/distributed/executor/subplan_execution.c +++ b/src/backend/distributed/executor/subplan_execution.c @@ -59,7 +59,7 @@ ExecuteSubPlans(DistributedPlan *distributedPlan) UseCoordinatedTransaction(); DistributedSubPlan *subPlan = NULL; - foreach_ptr(subPlan, subPlanList) + foreach_declared_ptr(subPlan, subPlanList) { PlannedStmt *plannedStmt = subPlan->plan; uint32 subPlanId = subPlan->subPlanId; diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index 01653721ec3..2569b58fca8 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -207,7 +207,7 @@ GetUniqueDependenciesList(List *objectAddressesList) InitObjectAddressCollector(&objectAddressCollector); ObjectAddress *objectAddress = NULL; - foreach_ptr(objectAddress, objectAddressesList) + foreach_declared_ptr(objectAddress, objectAddressesList) { if (IsObjectAddressCollected(*objectAddress, &objectAddressCollector)) { @@ -334,7 +334,7 @@ OrderObjectAddressListInDependencyOrder(List *objectAddressList) InitObjectAddressCollector(&collector); ObjectAddress *objectAddress = NULL; - foreach_ptr(objectAddress, objectAddressList) + foreach_declared_ptr(objectAddress, objectAddressList) { if (IsObjectAddressCollected(*objectAddress, &collector)) { @@ -403,7 +403,7 @@ RecurseObjectDependencies(ObjectAddress target, expandFn expand, followFn follow /* iterate all entries and recurse depth first */ DependencyDefinition *dependencyDefinition = NULL; - foreach_ptr(dependencyDefinition, dependenyDefinitionList) + foreach_declared_ptr(dependencyDefinition, dependenyDefinitionList) { if (follow == NULL || !follow(collector, dependencyDefinition)) { @@ -869,7 +869,7 @@ bool ErrorOrWarnIfAnyObjectHasUnsupportedDependency(List *objectAddresses) { ObjectAddress *objectAddress = NULL; - foreach_ptr(objectAddress, objectAddresses) + foreach_declared_ptr(objectAddress, objectAddresses) { if (ErrorOrWarnIfObjectHasUnsupportedDependency(objectAddress)) { @@ -962,7 +962,7 @@ DeferErrorIfAnyObjectHasUnsupportedDependency(const List *objectAddresses) { DeferredErrorMessage *deferredErrorMessage = NULL; ObjectAddress *objectAddress = NULL; - foreach_ptr(objectAddress, objectAddresses) + foreach_declared_ptr(objectAddress, objectAddresses) { deferredErrorMessage = DeferErrorIfHasUnsupportedDependency(objectAddress); if (deferredErrorMessage) @@ -995,7 +995,7 @@ GetUndistributableDependency(const ObjectAddress *objectAddress) return NULL; } - foreach_ptr(dependency, dependencies) + foreach_declared_ptr(dependency, dependencies) { /* * Objects with the id smaller than FirstNormalObjectId should be created within @@ -1172,7 +1172,7 @@ IsAnyObjectAddressOwnedByExtension(const List *targets, ObjectAddress *extensionAddress) { ObjectAddress *target = NULL; - foreach_ptr(target, targets) + foreach_declared_ptr(target, targets) { if (IsObjectAddressOwnedByExtension(target, extensionAddress)) { @@ -1563,7 +1563,7 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe List *FDWOids = GetDependentFDWsToExtension(extensionId); Oid FDWOid = InvalidOid; - foreach_oid(FDWOid, FDWOids) + foreach_declared_oid(FDWOid, FDWOids) { List *dependentRoleIds = GetDependentRoleIdsFDW(FDWOid); List *dependencies = @@ -1849,7 +1849,7 @@ GetViewRuleReferenceDependencyList(Oid viewId) List *nonInternalDependenciesOfDependingRules = NIL; HeapTuple depTup = NULL; - foreach_ptr(depTup, dependencyTupleList) + foreach_declared_ptr(depTup, dependencyTupleList) { Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup); @@ -1872,7 +1872,7 @@ GetViewRuleReferenceDependencyList(Oid viewId) List *ruleDependencies = DependencyDefinitionFromPgDepend(ruleAddress); DependencyDefinition *dependencyDef = NULL; - foreach_ptr(dependencyDef, ruleDependencies) + foreach_declared_ptr(dependencyDef, ruleDependencies) { /* * Follow all dependencies of the internally dependent rule dependencies @@ -1907,7 +1907,7 @@ GetRelationSequenceDependencyList(Oid relationId) List *seqIdList = NIL; SequenceInfo *seqInfo = NULL; - foreach_ptr(seqInfo, seqInfoList) + foreach_declared_ptr(seqInfo, seqInfoList) { seqIdList = lappend_oid(seqIdList, seqInfo->sequenceOid); } @@ -1980,7 +1980,7 @@ GetRelationTriggerFunctionDependencyList(Oid relationId) List *triggerIdList = GetExplicitTriggerIdList(relationId); Oid triggerId = InvalidOid; - foreach_oid(triggerId, triggerIdList) + foreach_declared_oid(triggerId, triggerIdList) { Oid functionId = GetTriggerFunctionId(triggerId); DependencyDefinition *dependency = @@ -2005,7 +2005,7 @@ GetPublicationRelationsDependencyList(Oid publicationId) Oid relationId = InvalidOid; - foreach_oid(relationId, allRelationIds) + foreach_declared_oid(relationId, allRelationIds) { if (!IsCitusTable(relationId)) { @@ -2087,7 +2087,7 @@ CreateObjectAddressDependencyDefList(Oid classId, List *objectIdList) { List *dependencyList = NIL; Oid objectId = InvalidOid; - foreach_oid(objectId, objectIdList) + foreach_declared_oid(objectId, objectIdList) { DependencyDefinition *dependency = CreateObjectAddressDependencyDef(classId, objectId); @@ -2161,7 +2161,7 @@ BuildViewDependencyGraph(Oid relationId, HTAB *nodeMap) targetObjectId); HeapTuple depTup = NULL; - foreach_ptr(depTup, dependencyTupleList) + foreach_declared_ptr(depTup, dependencyTupleList) { Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup); @@ -2240,7 +2240,7 @@ GetDependingViews(Oid relationId) foreach_ptr_append(node, nodeQueue) { ViewDependencyNode *dependingNode = NULL; - foreach_ptr(dependingNode, node->dependingNodes) + foreach_declared_ptr(dependingNode, node->dependingNodes) { ObjectAddress relationAddress = { 0 }; ObjectAddressSet(relationAddress, RelationRelationId, dependingNode->id); diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index ff5b2c7a954..daa51eb7576 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -554,7 +554,7 @@ bool IsAnyObjectDistributed(const List *addresses) { ObjectAddress *address = NULL; - foreach_ptr(address, addresses) + foreach_declared_ptr(address, addresses) { if (IsObjectDistributed(address)) { diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 402dedb8a91..440e4d7c471 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -920,7 +920,7 @@ CitusTableList(void) List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE); Oid relationId = InvalidOid; - foreach_oid(relationId, citusTableIdList) + foreach_declared_oid(relationId, citusTableIdList) { CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(relationId); @@ -1891,7 +1891,7 @@ BuildCachedShardList(CitusTableCacheEntry *cacheEntry) sizeof(int)); HeapTuple shardTuple = NULL; - foreach_ptr(shardTuple, distShardTupleList) + foreach_declared_ptr(shardTuple, distShardTupleList) { ShardInterval *shardInterval = TupleToShardInterval(shardTuple, distShardTupleDesc, @@ -2029,7 +2029,7 @@ BuildCachedShardList(CitusTableCacheEntry *cacheEntry) GroupShardPlacement *placementArray = palloc0(numberOfPlacements * sizeof(GroupShardPlacement)); GroupShardPlacement *srcPlacement = NULL; - foreach_ptr(srcPlacement, placementList) + foreach_declared_ptr(srcPlacement, placementList) { placementArray[placementOffset] = *srcPlacement; placementOffset++; @@ -4333,7 +4333,7 @@ InitializeWorkerNodeCache(void) /* iterate over the worker node list */ WorkerNode *currentNode = NULL; - foreach_ptr(currentNode, workerNodeList) + foreach_declared_ptr(currentNode, workerNodeList) { bool handleFound = false; @@ -4510,7 +4510,7 @@ GetLocalNodeId(void) List *workerNodeList = ReadDistNode(includeNodesFromOtherClusters); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { if (workerNode->groupId == localGroupId && workerNode->isActive) @@ -5098,7 +5098,7 @@ CitusTableCacheFlushInvalidatedEntries() if (DistTableCacheHash != NULL && DistTableCacheExpired != NIL) { CitusTableCacheEntry *cacheEntry = NULL; - foreach_ptr(cacheEntry, DistTableCacheExpired) + foreach_declared_ptr(cacheEntry, DistTableCacheExpired) { ResetCitusTableCacheEntry(cacheEntry); } diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index ef7c56dc752..0da56eb9e56 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -307,7 +307,7 @@ CreateDependingViewsOnWorkers(Oid relationId) SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); Oid viewOid = InvalidOid; - foreach_oid(viewOid, views) + foreach_declared_oid(viewOid, views) { if (!ShouldMarkRelationDistributed(viewOid)) { @@ -347,7 +347,7 @@ AddTableToPublications(Oid relationId) SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); - foreach_oid(publicationId, publicationIds) + foreach_declared_oid(publicationId, publicationIds) { ObjectAddress *publicationAddress = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*publicationAddress, PublicationRelationId, publicationId); @@ -818,7 +818,7 @@ NodeListInsertCommand(List *workerNodeList) /* iterate over the worker nodes, add the values */ WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { char *hasMetadataString = workerNode->hasMetadata ? "TRUE" : "FALSE"; char *metadataSyncedString = workerNode->metadataSynced ? "TRUE" : "FALSE"; @@ -946,7 +946,7 @@ MarkObjectsDistributedCreateCommand(List *addresses, char *name = NULL; bool firstInNameLoop = true; - foreach_ptr(name, names) + foreach_declared_ptr(name, names) { if (!firstInNameLoop) { @@ -961,7 +961,7 @@ MarkObjectsDistributedCreateCommand(List *addresses, char *arg; bool firstInArgLoop = true; - foreach_ptr(arg, args) + foreach_declared_ptr(arg, args) { if (!firstInArgLoop) { @@ -1217,13 +1217,13 @@ ShardListInsertCommand(List *shardIntervalList) ShardInterval *shardInterval = NULL; bool firstPlacementProcessed = false; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { uint64 shardId = shardInterval->shardId; List *shardPlacementList = ActiveShardPlacementList(shardId); ShardPlacement *placement = NULL; - foreach_ptr(placement, shardPlacementList) + foreach_declared_ptr(placement, shardPlacementList) { if (firstPlacementProcessed) { @@ -1257,7 +1257,7 @@ ShardListInsertCommand(List *shardIntervalList) "WITH shard_data(relationname, shardid, storagetype, " "shardminvalue, shardmaxvalue) AS (VALUES "); - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { uint64 shardId = shardInterval->shardId; Oid distributedRelationId = shardInterval->relationId; @@ -1694,7 +1694,7 @@ GetDependentRelationsWithSequence(Oid sequenceOid, char depType) Oid attrDefOid; List *attrDefOids = GetAttrDefsFromSequence(sequenceOid); - foreach_oid(attrDefOid, attrDefOids) + foreach_declared_oid(attrDefOid, attrDefOids) { ObjectAddress columnAddress = GetAttrDefaultColumnAddress(attrDefOid); relations = lappend_oid(relations, columnAddress.objectId); @@ -1890,7 +1890,7 @@ GetDependentFunctionsWithRelation(Oid relationId) table_close(depRel, AccessShareLock); ObjectAddress *referencingObject = NULL; - foreach_ptr(referencingObject, referencingObjects) + foreach_declared_ptr(referencingObject, referencingObjects) { functionOids = list_concat(functionOids, GetFunctionDependenciesForObjects(referencingObject)); @@ -2771,7 +2771,7 @@ HasMetadataWorkers(void) List *workerNodeList = ActiveReadableNonCoordinatorNodeList(); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { if (workerNode->hasMetadata) { @@ -2804,7 +2804,7 @@ CreateInterTableRelationshipOfRelationOnWorkers(Oid relationId) SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); const char *command = NULL; - foreach_ptr(command, commandList) + foreach_declared_ptr(command, commandList) { SendCommandToWorkersWithMetadata(command); } @@ -2857,14 +2857,14 @@ CreateShellTableOnWorkers(Oid relationId) creatingShellTableOnRemoteNode); TableDDLCommand *tableDDLCommand = NULL; - foreach_ptr(tableDDLCommand, tableDDLCommands) + foreach_declared_ptr(tableDDLCommand, tableDDLCommands) { Assert(CitusIsA(tableDDLCommand, TableDDLCommand)); commandList = lappend(commandList, GetTableDDLCommand(tableDDLCommand)); } const char *command = NULL; - foreach_ptr(command, commandList) + foreach_declared_ptr(command, commandList) { SendCommandToWorkersWithMetadata(command); } @@ -2888,7 +2888,7 @@ CreateTableMetadataOnWorkers(Oid relationId) /* send the commands one by one */ const char *command = NULL; - foreach_ptr(command, commandList) + foreach_declared_ptr(command, commandList) { SendCommandToWorkersWithMetadata(command); } @@ -2912,7 +2912,7 @@ DetachPartitionCommandList(void) /* we iterate over all distributed partitioned tables and DETACH their partitions */ CitusTableCacheEntry *cacheEntry = NULL; - foreach_ptr(cacheEntry, distributedTableList) + foreach_declared_ptr(cacheEntry, distributedTableList) { if (!PartitionedTable(cacheEntry->relationId)) { @@ -2976,7 +2976,7 @@ SyncNodeMetadataToNodesOptional(void) List *syncedWorkerList = NIL; List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerList) + foreach_declared_ptr(workerNode, workerList) { if (workerNode->hasMetadata && !workerNode->metadataSynced) { @@ -2996,7 +2996,7 @@ SyncNodeMetadataToNodesOptional(void) } } - foreach_ptr(workerNode, syncedWorkerList) + foreach_declared_ptr(workerNode, syncedWorkerList) { SetWorkerColumnOptional(workerNode, Anum_pg_dist_node_metadatasynced, BoolGetDatum(true)); @@ -3041,7 +3041,7 @@ SyncNodeMetadataToNodes(void) List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerList) + foreach_declared_ptr(workerNode, workerList) { if (workerNode->hasMetadata) { @@ -3280,7 +3280,7 @@ ShouldInitiateMetadataSync(bool *lockFailure) List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerList) + foreach_declared_ptr(workerNode, workerList) { if (workerNode->hasMetadata && !workerNode->metadataSynced) { @@ -3638,7 +3638,7 @@ EnsureShardMetadataIsSane(Oid relationId, int64 shardId, char storageType, GetFunctionInfo(intervalTypeId, BTREE_AM_OID, BTORDER_PROC); HeapTuple shardTuple = NULL; - foreach_ptr(shardTuple, distShardTupleList) + foreach_declared_ptr(shardTuple, distShardTupleList) { ShardInterval *shardInterval = TupleToShardInterval(shardTuple, distShardTupleDesc, @@ -3934,7 +3934,7 @@ citus_internal_delete_shard_metadata(PG_FUNCTION_ARGS) List *shardPlacementList = ShardPlacementList(shardId); ShardPlacement *shardPlacement = NULL; - foreach_ptr(shardPlacement, shardPlacementList) + foreach_declared_ptr(shardPlacement, shardPlacementList) { DeleteShardPlacementRow(shardPlacement->placementId); } @@ -4503,7 +4503,7 @@ SetMetadataSyncNodesFromNodeList(MetadataSyncContext *context, List *nodeList) List *activatedWorkerNodeList = NIL; WorkerNode *node = NULL; - foreach_ptr(node, nodeList) + foreach_declared_ptr(node, nodeList) { if (NodeIsPrimary(node)) { @@ -4538,7 +4538,7 @@ EstablishAndSetMetadataSyncBareConnections(MetadataSyncContext *context) /* establish bare connections to activated worker nodes */ List *bareConnectionList = NIL; WorkerNode *node = NULL; - foreach_ptr(node, context->activatedWorkerNodeList) + foreach_declared_ptr(node, context->activatedWorkerNodeList) { MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlags, node->workerName, @@ -5147,7 +5147,7 @@ SendDependencyCreationCommands(MetadataSyncContext *context) ALLOCSET_DEFAULT_SIZES); MemoryContextSwitchTo(commandsContext); ObjectAddress *dependency = NULL; - foreach_ptr(dependency, dependencies) + foreach_declared_ptr(dependency, dependencies) { if (!MetadataSyncCollectsCommands(context)) { diff --git a/src/backend/distributed/metadata/metadata_utility.c b/src/backend/distributed/metadata/metadata_utility.c index 15e167008dd..fad263abd47 100644 --- a/src/backend/distributed/metadata/metadata_utility.c +++ b/src/backend/distributed/metadata/metadata_utility.c @@ -420,7 +420,7 @@ OpenConnectionToNodes(List *workerNodeList) { List *connectionList = NIL; WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { const char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; @@ -444,7 +444,7 @@ GenerateShardStatisticsQueryList(List *workerNodeList, List *citusTableIds) { List *shardStatisticsQueryList = NIL; WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { char *shardStatisticsQuery = GenerateAllShardStatisticsQueryForNode(workerNode, citusTableIds); @@ -465,7 +465,7 @@ ReceiveShardIdAndSizeResults(List *connectionList, Tuplestorestate *tupleStore, TupleDesc tupleDescriptor) { MultiConnection *connection = NULL; - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { bool raiseInterrupts = true; Datum values[SHARD_SIZES_COLUMN_COUNT]; @@ -559,7 +559,7 @@ DistributedRelationSize(Oid relationId, SizeQueryType sizeQueryType, List *workerNodeList = ActiveReadableNodeList(); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { uint64 relationSizeOnNode = 0; @@ -780,7 +780,7 @@ GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList, List *nonPartitionedShardNames = NIL; ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { if (optimizePartitionCalculations && PartitionTable(shardInterval->relationId)) { @@ -859,7 +859,7 @@ GenerateSizeQueryForRelationNameList(List *quotedShardNames, char *sizeFunction) bool addComma = false; char *quotedShardName = NULL; - foreach_ptr(quotedShardName, quotedShardNames) + foreach_declared_ptr(quotedShardName, quotedShardNames) { if (addComma) { @@ -960,7 +960,7 @@ GenerateAllShardStatisticsQueryForNode(WorkerNode *workerNode, List *citusTableI appendStringInfoString(allShardStatisticsQuery, " FROM (VALUES "); Oid relationId = InvalidOid; - foreach_oid(relationId, citusTableIds) + foreach_declared_oid(relationId, citusTableIds) { /* * Ensure the table still exists by trying to acquire a lock on it @@ -1007,7 +1007,7 @@ GenerateShardIdNameValuesForShardList(List *shardIntervalList, bool firstValue) StringInfo selectQuery = makeStringInfo(); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { if (!firstValue) { @@ -1147,7 +1147,7 @@ TableShardReplicationFactor(Oid relationId) List *shardIntervalList = LoadShardIntervalList(relationId); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { uint64 shardId = shardInterval->shardId; @@ -1238,7 +1238,7 @@ LoadUnsortedShardIntervalListViaCatalog(Oid relationId) &intervalTypeMod); HeapTuple distShardTuple = NULL; - foreach_ptr(distShardTuple, distShardTuples) + foreach_declared_ptr(distShardTuple, distShardTuples) { ShardInterval *interval = TupleToShardInterval(distShardTuple, distShardTupleDesc, @@ -1487,7 +1487,7 @@ FilterShardPlacementList(List *shardPlacementList, bool (*filter)(ShardPlacement List *filteredShardPlacementList = NIL; ShardPlacement *shardPlacement = NULL; - foreach_ptr(shardPlacement, shardPlacementList) + foreach_declared_ptr(shardPlacement, shardPlacementList) { if (filter(shardPlacement)) { @@ -1511,7 +1511,7 @@ FilterActiveShardPlacementListByNode(List *shardPlacementList, WorkerNode *worke List *filteredShardPlacementList = NIL; ShardPlacement *shardPlacement = NULL; - foreach_ptr(shardPlacement, activeShardPlacementList) + foreach_declared_ptr(shardPlacement, activeShardPlacementList) { if (IsPlacementOnWorkerNode(shardPlacement, workerNode)) { @@ -1535,7 +1535,7 @@ ActiveShardPlacementListOnGroup(uint64 shardId, int32 groupId) List *activePlacementList = ActiveShardPlacementList(shardId); ShardPlacement *shardPlacement = NULL; - foreach_ptr(shardPlacement, activePlacementList) + foreach_declared_ptr(shardPlacement, activePlacementList) { if (shardPlacement->groupId == groupId) { @@ -3331,7 +3331,7 @@ ResetRunningBackgroundTasks(void) /* there are tasks that need to release their lock before we can continue */ int64 *taskId = NULL; - foreach_ptr(taskId, taskIdsToWait) + foreach_declared_ptr(taskId, taskIdsToWait) { LOCKTAG locktag = { 0 }; SET_LOCKTAG_BACKGROUND_TASK(locktag, *taskId); diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index d93b133eaed..1fa268a7306 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -987,7 +987,7 @@ MarkNodesNotSyncedInLoopBackConnection(MetadataSyncContext *context, List *commandList = NIL; WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, context->activatedWorkerNodeList) + foreach_declared_ptr(workerNode, context->activatedWorkerNodeList) { /* * We need to prevent self deadlock when we access pg_dist_node using separate @@ -1020,7 +1020,7 @@ SetNodeMetadata(MetadataSyncContext *context, bool localOnly) List *updatedActivatedNodeList = NIL; WorkerNode *node = NULL; - foreach_ptr(node, context->activatedWorkerNodeList) + foreach_declared_ptr(node, context->activatedWorkerNodeList) { node = SetWorkerColumnLocalOnly(node, Anum_pg_dist_node_isactive, BoolGetDatum(true)); @@ -1039,7 +1039,7 @@ SetNodeMetadata(MetadataSyncContext *context, bool localOnly) if (!localOnly && EnableMetadataSync) { WorkerNode *node = NULL; - foreach_ptr(node, context->activatedWorkerNodeList) + foreach_declared_ptr(node, context->activatedWorkerNodeList) { SetNodeStateViaMetadataContext(context, node, BoolGetDatum(true)); } @@ -1844,7 +1844,7 @@ FindNodeAnyClusterByNodeId(uint32 nodeId) List *nodeList = ReadDistNode(includeNodesFromOtherClusters); WorkerNode *node = NULL; - foreach_ptr(node, nodeList) + foreach_declared_ptr(node, nodeList) { if (node->nodeId == nodeId) { @@ -1866,7 +1866,7 @@ FindNodeWithNodeId(int nodeId, bool missingOk) List *nodeList = ActiveReadableNodeList(); WorkerNode *node = NULL; - foreach_ptr(node, nodeList) + foreach_declared_ptr(node, nodeList) { if (node->nodeId == nodeId) { @@ -1894,7 +1894,7 @@ FindCoordinatorNodeId() List *nodeList = ReadDistNode(includeNodesFromOtherClusters); WorkerNode *node = NULL; - foreach_ptr(node, nodeList) + foreach_declared_ptr(node, nodeList) { if (NodeIsCoordinator(node)) { @@ -2024,7 +2024,7 @@ ErrorIfNodeContainsNonRemovablePlacements(WorkerNode *workerNode) shardPlacements = SortList(shardPlacements, CompareGroupShardPlacements); GroupShardPlacement *placement = NULL; - foreach_ptr(placement, shardPlacements) + foreach_declared_ptr(placement, shardPlacements) { if (!PlacementHasActivePlacementOnAnotherGroup(placement)) { @@ -2060,7 +2060,7 @@ PlacementHasActivePlacementOnAnotherGroup(GroupShardPlacement *sourcePlacement) bool foundActivePlacementOnAnotherGroup = false; ShardPlacement *activePlacement = NULL; - foreach_ptr(activePlacement, activePlacementList) + foreach_declared_ptr(activePlacement, activePlacementList) { if (activePlacement->groupId != sourcePlacement->groupId) { @@ -2411,7 +2411,7 @@ SetWorkerColumnOptional(WorkerNode *workerNode, int columnIndex, Datum value) /* open connections in parallel */ WorkerNode *worker = NULL; - foreach_ptr(worker, workerNodeList) + foreach_declared_ptr(worker, workerNodeList) { bool success = SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction( worker->workerName, worker->workerPort, @@ -3144,7 +3144,7 @@ static void ErrorIfAnyNodeNotExist(List *nodeList) { WorkerNode *node = NULL; - foreach_ptr(node, nodeList) + foreach_declared_ptr(node, nodeList) { /* * First, locally mark the node is active, if everything goes well, @@ -3193,7 +3193,7 @@ static void SendDeletionCommandsForReplicatedTablePlacements(MetadataSyncContext *context) { WorkerNode *node = NULL; - foreach_ptr(node, context->activatedWorkerNodeList) + foreach_declared_ptr(node, context->activatedWorkerNodeList) { if (!node->isActive) { diff --git a/src/backend/distributed/operations/citus_create_restore_point.c b/src/backend/distributed/operations/citus_create_restore_point.c index 8a5e738e44b..18081b6e45b 100644 --- a/src/backend/distributed/operations/citus_create_restore_point.c +++ b/src/backend/distributed/operations/citus_create_restore_point.c @@ -122,7 +122,7 @@ OpenConnectionsToAllWorkerNodes(LOCKMODE lockMode) List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(lockMode); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { MultiConnection *connection = StartNodeConnection(connectionFlags, workerNode->workerName, @@ -164,7 +164,7 @@ CreateRemoteRestorePoints(char *restoreName, List *connectionList) const char *parameterValues[1] = { restoreName }; MultiConnection *connection = NULL; - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { int querySent = SendRemoteCommandParams(connection, CREATE_RESTORE_POINT_COMMAND, parameterCount, parameterTypes, @@ -175,7 +175,7 @@ CreateRemoteRestorePoints(char *restoreName, List *connectionList) } } - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { PGresult *result = GetRemoteCommandResult(connection, true); if (!IsResponseOK(result)) diff --git a/src/backend/distributed/operations/create_shards.c b/src/backend/distributed/operations/create_shards.c index 96254705122..1553de92f33 100644 --- a/src/backend/distributed/operations/create_shards.c +++ b/src/backend/distributed/operations/create_shards.c @@ -200,7 +200,7 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount, * each placement insertion. */ uint64 *shardIdPtr; - foreach_ptr(shardIdPtr, insertedShardIds) + foreach_declared_ptr(shardIdPtr, insertedShardIds) { List *placementsForShard = ShardPlacementList(*shardIdPtr); insertedShardPlacements = list_concat(insertedShardPlacements, @@ -258,7 +258,7 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool char targetShardStorageType = ShardStorageType(targetRelationId); ShardInterval *sourceShardInterval = NULL; - foreach_ptr(sourceShardInterval, sourceShardIntervalList) + foreach_declared_ptr(sourceShardInterval, sourceShardIntervalList) { uint64 sourceShardId = sourceShardInterval->shardId; uint64 *newShardIdPtr = (uint64 *) palloc0(sizeof(uint64)); @@ -286,7 +286,7 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool shardMinValueText, shardMaxValueText); ShardPlacement *sourcePlacement = NULL; - foreach_ptr(sourcePlacement, sourceShardPlacementList) + foreach_declared_ptr(sourcePlacement, sourceShardPlacementList) { int32 groupId = sourcePlacement->groupId; const uint64 shardSize = 0; @@ -304,7 +304,7 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool * each placement insertion. */ uint64 *shardIdPtr; - foreach_ptr(shardIdPtr, insertedShardIds) + foreach_declared_ptr(shardIdPtr, insertedShardIds) { List *placementsForShard = ShardPlacementList(*shardIdPtr); insertedShardPlacements = list_concat(insertedShardPlacements, diff --git a/src/backend/distributed/operations/delete_protocol.c b/src/backend/distributed/operations/delete_protocol.c index 39651715853..d73b74720d5 100644 --- a/src/backend/distributed/operations/delete_protocol.c +++ b/src/backend/distributed/operations/delete_protocol.c @@ -250,12 +250,12 @@ DropShards(Oid relationId, char *schemaName, char *relationName, bool shouldExecuteTasksLocally = ShouldExecuteTasksLocally(dropTaskList); Task *task = NULL; - foreach_ptr(task, dropTaskList) + foreach_declared_ptr(task, dropTaskList) { uint64 shardId = task->anchorShardId; ShardPlacement *shardPlacement = NULL; - foreach_ptr(shardPlacement, task->taskPlacementList) + foreach_declared_ptr(shardPlacement, task->taskPlacementList) { uint64 shardPlacementId = shardPlacement->placementId; int32 shardPlacementGroupId = shardPlacement->groupId; @@ -350,7 +350,7 @@ DropTaskList(Oid relationId, char *schemaName, char *relationName, int taskId = 1; ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, deletableShardIntervalList) + foreach_declared_ptr(shardInterval, deletableShardIntervalList) { Assert(shardInterval->relationId == relationId); diff --git a/src/backend/distributed/operations/health_check.c b/src/backend/distributed/operations/health_check.c index c908606c156..e54d80b7e65 100644 --- a/src/backend/distributed/operations/health_check.c +++ b/src/backend/distributed/operations/health_check.c @@ -119,11 +119,11 @@ StoreAllConnectivityChecks(Tuplestorestate *tupleStore, TupleDesc tupleDescripto /* * We iterate over the workerNodeList twice, for source and target worker nodes. This - * operation is safe for foreach_ptr macro, as long as we use different variables for + * operation is safe for foreach_declared_ptr macro, as long as we use different variables for * each iteration. */ WorkerNode *sourceWorkerNode = NULL; - foreach_ptr(sourceWorkerNode, workerNodeList) + foreach_declared_ptr(sourceWorkerNode, workerNodeList) { const char *sourceNodeName = sourceWorkerNode->workerName; const int sourceNodePort = sourceWorkerNode->workerPort; @@ -135,7 +135,7 @@ StoreAllConnectivityChecks(Tuplestorestate *tupleStore, TupleDesc tupleDescripto /* the second iteration over workerNodeList for the target worker nodes. */ WorkerNode *targetWorkerNode = NULL; - foreach_ptr(targetWorkerNode, workerNodeList) + foreach_declared_ptr(targetWorkerNode, workerNodeList) { const char *targetNodeName = targetWorkerNode->workerName; const int targetNodePort = targetWorkerNode->workerPort; diff --git a/src/backend/distributed/operations/node_protocol.c b/src/backend/distributed/operations/node_protocol.c index 52e44bea01c..8a633e3dce0 100644 --- a/src/backend/distributed/operations/node_protocol.c +++ b/src/backend/distributed/operations/node_protocol.c @@ -645,7 +645,7 @@ GetPreLoadTableCreationCommands(Oid relationId, if (tableACLList != NIL) { char *tableACLCommand = NULL; - foreach_ptr(tableACLCommand, tableACLList) + foreach_declared_ptr(tableACLCommand, tableACLList) { tableDDLEventList = lappend(tableDDLEventList, makeTableDDLCommandString(tableACLCommand)); @@ -822,7 +822,7 @@ GetTableRowLevelSecurityCommands(Oid relationId) List *rowLevelSecurityEnableCommands = pg_get_row_level_security_commands(relationId); char *rowLevelSecurityCommand = NULL; - foreach_ptr(rowLevelSecurityCommand, rowLevelSecurityEnableCommands) + foreach_declared_ptr(rowLevelSecurityCommand, rowLevelSecurityEnableCommands) { rowLevelSecurityCommandList = lappend( rowLevelSecurityCommandList, diff --git a/src/backend/distributed/operations/replicate_none_dist_table_shard.c b/src/backend/distributed/operations/replicate_none_dist_table_shard.c index 33a98ee4226..aa48b488a2f 100644 --- a/src/backend/distributed/operations/replicate_none_dist_table_shard.c +++ b/src/backend/distributed/operations/replicate_none_dist_table_shard.c @@ -63,7 +63,7 @@ NoneDistTableReplicateCoordinatorPlacement(Oid noneDistTableId, /* insert new placements to pg_dist_placement */ List *insertedPlacementList = NIL; WorkerNode *targetNode = NULL; - foreach_ptr(targetNode, targetNodeList) + foreach_declared_ptr(targetNode, targetNodeList) { ShardPlacement *shardPlacement = InsertShardPlacementRowGlobally(shardId, GetNextPlacementId(), @@ -215,7 +215,7 @@ CreateForeignKeysFromReferenceTablesOnShards(Oid noneDistTableId) List *taskList = NIL; char *command = NULL; - foreach_ptr(command, ddlCommandList) + foreach_declared_ptr(command, ddlCommandList) { List *commandTaskList = InterShardDDLTaskList( ForeignConstraintGetReferencingTableId(command), diff --git a/src/backend/distributed/operations/shard_cleaner.c b/src/backend/distributed/operations/shard_cleaner.c index 2efce9a7b09..f76476c15c2 100644 --- a/src/backend/distributed/operations/shard_cleaner.c +++ b/src/backend/distributed/operations/shard_cleaner.c @@ -259,7 +259,7 @@ DropOrphanedResourcesForCleanup() int failedResourceCountForCleanup = 0; CleanupRecord *record = NULL; - foreach_ptr(record, cleanupRecordList) + foreach_declared_ptr(record, cleanupRecordList) { if (!PrimaryNodeForGroup(record->nodeGroupId, NULL)) { @@ -369,7 +369,7 @@ FinalizeOperationNeedingCleanupOnSuccess(const char *operationName) int failedShardCountOnComplete = 0; CleanupRecord *record = NULL; - foreach_ptr(record, currentOperationRecordList) + foreach_declared_ptr(record, currentOperationRecordList) { if (record->policy == CLEANUP_ALWAYS) { @@ -932,7 +932,7 @@ TryDropDatabaseOutsideTransaction(char *databaseName, char *nodeName, int nodePo bool executeCommand = true; const char *commandString = NULL; - foreach_ptr(commandString, commandList) + foreach_declared_ptr(commandString, commandList) { /* * Cannot use SendOptionalCommandListToWorkerOutsideTransactionWithConnection() diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index 03dc4c1b84e..074f1bed00e 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -357,7 +357,7 @@ CheckRebalanceStateInvariants(const RebalanceState *state) Assert(state != NULL); Assert(list_length(state->fillStateListAsc) == list_length(state->fillStateListDesc)); - foreach_ptr(fillState, state->fillStateListAsc) + foreach_declared_ptr(fillState, state->fillStateListAsc) { float4 totalCost = 0; ShardCost *shardCost = NULL; @@ -376,7 +376,7 @@ CheckRebalanceStateInvariants(const RebalanceState *state) fillState); - foreach_ptr(shardCost, fillState->shardCostListDesc) + foreach_declared_ptr(shardCost, fillState->shardCostListDesc) { if (prevShardCost != NULL) { @@ -521,7 +521,7 @@ GetRebalanceSteps(RebalanceOptions *options) List *activeWorkerList = SortedActiveWorkers(); int shardAllowedNodeCount = 0; WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, activeWorkerList) + foreach_declared_ptr(workerNode, activeWorkerList) { if (workerNode->shouldHaveShards) { @@ -540,7 +540,7 @@ GetRebalanceSteps(RebalanceOptions *options) List *unbalancedShards = NIL; Oid relationId = InvalidOid; - foreach_oid(relationId, options->relationIdList) + foreach_declared_oid(relationId, options->relationIdList) { List *shardPlacementList = FullShardPlacementList(relationId, options->excludedShardArray); @@ -1336,7 +1336,7 @@ get_rebalance_progress(PG_FUNCTION_ARGS) &segmentList); ProgressMonitorData *monitor = NULL; - foreach_ptr(monitor, rebalanceMonitorList) + foreach_declared_ptr(monitor, rebalanceMonitorList) { PlacementUpdateEventProgress *placementUpdateEvents = ProgressMonitorSteps( monitor); @@ -1847,7 +1847,7 @@ NonColocatedDistRelationIdList(void) HTAB *alreadySelectedColocationIds = hash_create("RebalanceColocationIdSet", capacity, &info, flags); - foreach_oid(tableId, allCitusTablesList) + foreach_declared_oid(tableId, allCitusTablesList) { bool foundInSet = false; CitusTableCacheEntry *citusTableCacheEntry = GetCitusTableCacheEntry( @@ -1913,7 +1913,7 @@ RebalanceTableShards(RebalanceOptions *options, Oid shardReplicationModeOid) * is required for logical replication to replicate UPDATE and DELETE commands. */ PlacementUpdateEvent *placementUpdate = NULL; - foreach_ptr(placementUpdate, placementUpdateList) + foreach_declared_ptr(placementUpdate, placementUpdateList) { Oid relationId = RelationIdForShard(placementUpdate->shardId); List *colocatedTableList = ColocatedTableList(relationId); @@ -1948,7 +1948,7 @@ static void ErrorOnConcurrentRebalance(RebalanceOptions *options) { Oid relationId = InvalidOid; - foreach_oid(relationId, options->relationIdList) + foreach_declared_oid(relationId, options->relationIdList) { /* this provides the legacy error when the lock can't be acquired */ AcquireRebalanceColocationLock(relationId, options->operationName); @@ -2039,7 +2039,7 @@ GenerateTaskMoveDependencyList(PlacementUpdateEvent *move, int64 colocationId, if (found) { int64 *taskId = NULL; - foreach_ptr(taskId, shardMoveSourceNodeHashEntry->taskIds) + foreach_declared_ptr(taskId, shardMoveSourceNodeHashEntry->taskIds) { hash_search(dependsList, taskId, HASH_ENTER, NULL); } @@ -2123,13 +2123,13 @@ RebalanceTableShardsBackground(RebalanceOptions *options, Oid shardReplicationMo const char shardTransferMode = LookupShardTransferMode(shardReplicationModeOid); List *colocatedTableList = NIL; Oid relationId = InvalidOid; - foreach_oid(relationId, options->relationIdList) + foreach_declared_oid(relationId, options->relationIdList) { colocatedTableList = list_concat(colocatedTableList, ColocatedTableList(relationId)); } Oid colocatedTableId = InvalidOid; - foreach_oid(colocatedTableId, colocatedTableList) + foreach_declared_oid(colocatedTableId, colocatedTableList) { EnsureTableOwner(colocatedTableId); } @@ -2151,7 +2151,7 @@ RebalanceTableShardsBackground(RebalanceOptions *options, Oid shardReplicationMo * is required for logical replication to replicate UPDATE and DELETE commands. */ PlacementUpdateEvent *placementUpdate = NULL; - foreach_ptr(placementUpdate, placementUpdateList) + foreach_declared_ptr(placementUpdate, placementUpdateList) { relationId = RelationIdForShard(placementUpdate->shardId); List *colocatedTables = ColocatedTableList(relationId); @@ -2204,7 +2204,7 @@ RebalanceTableShardsBackground(RebalanceOptions *options, Oid shardReplicationMo ShardMoveDependencies shardMoveDependencies = InitializeShardMoveDependencies(); - foreach_ptr(move, placementUpdateList) + foreach_declared_ptr(move, placementUpdateList) { resetStringInfo(&buf); @@ -2361,7 +2361,7 @@ ExecuteRebalancerCommandInSeparateTransaction(char *command) List *setCommands = GetSetCommandListForNewConnections(); char *setCommand = NULL; - foreach_ptr(setCommand, setCommands) + foreach_declared_ptr(setCommand, setCommands) { commandList = lappend(commandList, setCommand); } @@ -2429,14 +2429,14 @@ RebalancePlacementUpdates(List *workerNodeList, List *activeShardPlacementListLi List *shardPlacementList = NIL; List *placementUpdateList = NIL; - foreach_ptr(shardPlacementList, activeShardPlacementListList) + foreach_declared_ptr(shardPlacementList, activeShardPlacementListList) { state = InitRebalanceState(workerNodeList, shardPlacementList, functions); rebalanceStates = lappend(rebalanceStates, state); } - foreach_ptr(state, rebalanceStates) + foreach_declared_ptr(state, rebalanceStates) { state->placementUpdateList = placementUpdateList; MoveShardsAwayFromDisallowedNodes(state); @@ -2445,7 +2445,7 @@ RebalancePlacementUpdates(List *workerNodeList, List *activeShardPlacementListLi if (!drainOnly) { - foreach_ptr(state, rebalanceStates) + foreach_declared_ptr(state, rebalanceStates) { state->placementUpdateList = placementUpdateList; @@ -2477,13 +2477,13 @@ RebalancePlacementUpdates(List *workerNodeList, List *activeShardPlacementListLi } } - foreach_ptr(state, rebalanceStates) + foreach_declared_ptr(state, rebalanceStates) { hash_destroy(state->placementsHash); } int64 ignoredMoves = 0; - foreach_ptr(state, rebalanceStates) + foreach_declared_ptr(state, rebalanceStates) { ignoredMoves += state->ignoredMoves; } @@ -2538,7 +2538,7 @@ InitRebalanceState(List *workerNodeList, List *shardPlacementList, state->placementsHash = ShardPlacementsListToHash(shardPlacementList); /* create empty fill state for all of the worker nodes */ - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { NodeFillState *fillState = palloc0(sizeof(NodeFillState)); fillState->node = workerNode; @@ -2621,7 +2621,7 @@ FindFillStateForPlacement(RebalanceState *state, ShardPlacement *placement) NodeFillState *fillState = NULL; /* Find the correct fill state to add the placement to and do that */ - foreach_ptr(fillState, state->fillStateListAsc) + foreach_declared_ptr(fillState, state->fillStateListAsc) { if (IsPlacementOnWorkerNode(placement, fillState->node)) { @@ -2733,7 +2733,7 @@ MoveShardsAwayFromDisallowedNodes(RebalanceState *state) CompareDisallowedPlacementDesc); /* Move shards off of nodes they are not allowed on */ - foreach_ptr(disallowedPlacement, state->disallowedPlacementList) + foreach_declared_ptr(disallowedPlacement, state->disallowedPlacementList) { NodeFillState *targetFillState = FindAllowedTargetFillState( state, disallowedPlacement->shardCost->shardId); @@ -2788,7 +2788,7 @@ static NodeFillState * FindAllowedTargetFillState(RebalanceState *state, uint64 shardId) { NodeFillState *targetFillState = NULL; - foreach_ptr(targetFillState, state->fillStateListAsc) + foreach_declared_ptr(targetFillState, state->fillStateListAsc) { bool hasShard = PlacementsHashFind( state->placementsHash, @@ -2914,7 +2914,7 @@ FindAndMoveShardCost(float4 utilizationLowerBound, * find a source node for the move, starting at the node with the highest * utilization */ - foreach_ptr(sourceFillState, state->fillStateListDesc) + foreach_declared_ptr(sourceFillState, state->fillStateListDesc) { /* Don't move shards away from nodes that are already too empty, we're * done searching */ @@ -2925,7 +2925,7 @@ FindAndMoveShardCost(float4 utilizationLowerBound, /* find a target node for the move, starting at the node with the * lowest utilization */ - foreach_ptr(targetFillState, state->fillStateListAsc) + foreach_declared_ptr(targetFillState, state->fillStateListAsc) { ShardCost *shardCost = NULL; @@ -2948,7 +2948,7 @@ FindAndMoveShardCost(float4 utilizationLowerBound, /* find a shardcost that can be moved between between nodes that * makes the cost distribution more equal */ - foreach_ptr(shardCost, sourceFillState->shardCostListDesc) + foreach_declared_ptr(shardCost, sourceFillState->shardCostListDesc) { bool targetHasShard = PlacementsHashFind(state->placementsHash, shardCost->shardId, diff --git a/src/backend/distributed/operations/shard_split.c b/src/backend/distributed/operations/shard_split.c index 4baf0fb241b..b1202e648c8 100644 --- a/src/backend/distributed/operations/shard_split.c +++ b/src/backend/distributed/operations/shard_split.c @@ -302,7 +302,7 @@ ErrorIfCannotSplitShardExtended(SplitOperation splitOperation, NullableDatum lastShardSplitPoint = { 0, true /*isnull*/ }; Datum shardSplitPoint; - foreach_int(shardSplitPoint, shardSplitPointsList) + foreach_declared_int(shardSplitPoint, shardSplitPointsList) { int32 shardSplitPointValue = DatumGetInt32(shardSplitPoint); @@ -399,7 +399,7 @@ GetWorkerNodesFromWorkerIds(List *nodeIdsForPlacementList) { List *workersForPlacementList = NIL; int32 nodeId; - foreach_int(nodeId, nodeIdsForPlacementList) + foreach_declared_int(nodeId, nodeIdsForPlacementList) { uint32 nodeIdValue = (uint32) nodeId; WorkerNode *workerNode = LookupNodeByNodeId(nodeIdValue); @@ -464,7 +464,7 @@ SplitShard(SplitMode splitMode, /* sort the tables to avoid deadlocks */ colocatedTableList = SortList(colocatedTableList, CompareOids); Oid colocatedTableId = InvalidOid; - foreach_oid(colocatedTableId, colocatedTableList) + foreach_declared_oid(colocatedTableId, colocatedTableList) { /* * Block concurrent DDL / TRUNCATE commands on the relation. Similarly, @@ -694,7 +694,7 @@ CreateSplitShardsForShardGroup(List *shardGroupSplitIntervalListList, * Iterate over all the shards in the shard group. */ List *shardIntervalList = NIL; - foreach_ptr(shardIntervalList, shardGroupSplitIntervalListList) + foreach_declared_ptr(shardIntervalList, shardGroupSplitIntervalListList) { ShardInterval *shardInterval = NULL; WorkerNode *workerPlacementNode = NULL; @@ -778,7 +778,7 @@ CreateAuxiliaryStructuresForShardGroup(List *shardGroupSplitIntervalListList, /* * Iterate over all the shards in the shard group. */ - foreach_ptr(shardIntervalList, shardGroupSplitIntervalListList) + foreach_declared_ptr(shardIntervalList, shardGroupSplitIntervalListList) { ShardInterval *shardInterval = NULL; WorkerNode *workerPlacementNode = NULL; @@ -1029,7 +1029,7 @@ CreateSplitIntervalsForShardGroup(List *sourceColocatedShardIntervalList, List *shardGroupSplitIntervalListList = NIL; ShardInterval *shardToSplitInterval = NULL; - foreach_ptr(shardToSplitInterval, sourceColocatedShardIntervalList) + foreach_declared_ptr(shardToSplitInterval, sourceColocatedShardIntervalList) { List *shardSplitIntervalList = NIL; CreateSplitIntervalsForShard(shardToSplitInterval, splitPointsForShard, @@ -1121,7 +1121,7 @@ UpdateDistributionColumnsForShardGroup(List *colocatedShardList, uint32 colocationId) { ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, colocatedShardList) + foreach_declared_ptr(shardInterval, colocatedShardList) { Oid relationId = shardInterval->relationId; Var *distributionColumn = GetDistributionColumnFromMap(distributionColumnMap, @@ -1162,7 +1162,7 @@ InsertSplitChildrenShardMetadata(List *shardGroupSplitIntervalListList, /* * Iterate over all the shards in the shard group. */ - foreach_ptr(shardIntervalList, shardGroupSplitIntervalListList) + foreach_declared_ptr(shardIntervalList, shardGroupSplitIntervalListList) { /* * Iterate on split shards list for a given shard and insert metadata. @@ -1195,7 +1195,7 @@ InsertSplitChildrenShardMetadata(List *shardGroupSplitIntervalListList, /* send commands to synced nodes one by one */ List *splitOffShardMetadataCommandList = ShardListInsertCommand(syncedShardList); char *command = NULL; - foreach_ptr(command, splitOffShardMetadataCommandList) + foreach_declared_ptr(command, splitOffShardMetadataCommandList) { SendCommandToWorkersWithMetadata(command); } @@ -1216,7 +1216,7 @@ CreatePartitioningHierarchyForBlockingSplit(List *shardGroupSplitIntervalListLis /* * Iterate over all the shards in the shard group. */ - foreach_ptr(shardIntervalList, shardGroupSplitIntervalListList) + foreach_declared_ptr(shardIntervalList, shardGroupSplitIntervalListList) { ShardInterval *shardInterval = NULL; WorkerNode *workerPlacementNode = NULL; @@ -1255,7 +1255,7 @@ CreateForeignKeyConstraints(List *shardGroupSplitIntervalListList, /* * Iterate over all the shards in the shard group. */ - foreach_ptr(shardIntervalList, shardGroupSplitIntervalListList) + foreach_declared_ptr(shardIntervalList, shardGroupSplitIntervalListList) { ShardInterval *shardInterval = NULL; WorkerNode *workerPlacementNode = NULL; @@ -1281,7 +1281,7 @@ CreateForeignKeyConstraints(List *shardGroupSplitIntervalListList, referenceTableForeignConstraintList); char *constraintCommand = NULL; - foreach_ptr(constraintCommand, constraintCommandList) + foreach_declared_ptr(constraintCommand, constraintCommandList) { SendCommandToWorker( workerPlacementNode->workerName, @@ -1685,7 +1685,7 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList, } ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, sourceColocatedShardIntervalList) + foreach_declared_ptr(shardInterval, sourceColocatedShardIntervalList) { /* Populate list of commands necessary to create shard interval on destination */ List *splitShardCreationCommandList = GetPreLoadTableCreationCommands( @@ -1739,7 +1739,7 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList, * If the target shard was created on source node as placement, skip it (See Note 2 from function description). */ List *shardIntervalList = NULL; - foreach_ptr(shardIntervalList, shardGroupSplitIntervalListList) + foreach_declared_ptr(shardIntervalList, shardGroupSplitIntervalListList) { ShardInterval *shardInterval = NULL; workerPlacementNode = NULL; @@ -1818,7 +1818,7 @@ CreateWorkerForPlacementSet(List *workersForPlacementList) hashFlags); WorkerNode *workerForPlacement = NULL; - foreach_ptr(workerForPlacement, workersForPlacementList) + foreach_declared_ptr(workerForPlacement, workersForPlacementList) { void *hashKey = (void *) workerForPlacement; hash_search(workerForPlacementSet, hashKey, HASH_ENTER, NULL); diff --git a/src/backend/distributed/operations/shard_transfer.c b/src/backend/distributed/operations/shard_transfer.c index 737086752d6..b7d07b2cfce 100644 --- a/src/backend/distributed/operations/shard_transfer.c +++ b/src/backend/distributed/operations/shard_transfer.c @@ -503,7 +503,7 @@ TransferShards(int64 shardId, char *sourceNodeName, DropOrphanedResourcesInSeparateTransaction(); ShardInterval *colocatedShard = NULL; - foreach_ptr(colocatedShard, colocatedShardList) + foreach_declared_ptr(colocatedShard, colocatedShardList) { /* * This is to prevent any race condition possibility among the shard moves. @@ -530,7 +530,7 @@ TransferShards(int64 shardId, char *sourceNodeName, * metadata workers. */ colocatedShard = NULL; - foreach_ptr(colocatedShard, colocatedShardList) + foreach_declared_ptr(colocatedShard, colocatedShardList) { uint64 colocatedShardId = colocatedShard->shardId; uint32 groupId = GroupForNode(targetNodeName, targetNodePort); @@ -623,7 +623,7 @@ InsertCleanupRecordsForShardPlacementsOnNode(List *shardIntervalList, int32 groupId) { ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { /* get shard name */ char *qualifiedShardName = ConstructQualifiedShardName(shardInterval); @@ -659,7 +659,7 @@ IsShardListOnNode(List *colocatedShardList, char *targetNodeName, uint32 targetN * We exhaustively search all co-located shards */ ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, colocatedShardList) + foreach_declared_ptr(shardInterval, colocatedShardList) { uint64 shardId = shardInterval->shardId; List *placementList = ActiveShardPlacementListOnGroup(shardId, @@ -682,7 +682,7 @@ static void LockColocatedRelationsForMove(List *colocatedTableList) { Oid colocatedTableId = InvalidOid; - foreach_oid(colocatedTableId, colocatedTableList) + foreach_declared_oid(colocatedTableId, colocatedTableList) { LockRelationOid(colocatedTableId, ShareUpdateExclusiveLock); } @@ -698,7 +698,7 @@ ErrorIfForeignTableForShardTransfer(List *colocatedTableList, ShardTransferType transferType) { Oid colocatedTableId = InvalidOid; - foreach_oid(colocatedTableId, colocatedTableList) + foreach_declared_oid(colocatedTableId, colocatedTableList) { if (IsForeignTable(colocatedTableId)) { @@ -724,7 +724,7 @@ EnsureAllShardsCanBeCopied(List *colocatedShardList, char *targetNodeName, uint32 targetNodePort) { ShardInterval *colocatedShard = NULL; - foreach_ptr(colocatedShard, colocatedShardList) + foreach_declared_ptr(colocatedShard, colocatedShardList) { uint64 colocatedShardId = colocatedShard->shardId; @@ -1124,7 +1124,7 @@ void BlockWritesToShardList(List *shardList) { ShardInterval *shard = NULL; - foreach_ptr(shard, shardList) + foreach_declared_ptr(shard, shardList) { /* * We need to lock the referenced reference table metadata to avoid @@ -1295,7 +1295,7 @@ static void EnsureTableListOwner(List *tableIdList) { Oid tableId = InvalidOid; - foreach_oid(tableId, tableIdList) + foreach_declared_oid(tableId, tableIdList) { EnsureTableOwner(tableId); } @@ -1310,7 +1310,7 @@ static void ErrorIfReplicatingDistributedTableWithFKeys(List *tableIdList) { Oid tableId = InvalidOid; - foreach_oid(tableId, tableIdList) + foreach_declared_oid(tableId, tableIdList) { List *foreignConstraintCommandList = GetReferencingForeignConstaintCommands(tableId); @@ -1381,7 +1381,7 @@ CopyShardTablesViaLogicalReplication(List *shardIntervalList, char *sourceNodeNa * target node. We do not create the indexes yet. */ ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { Oid relationId = shardInterval->relationId; uint64 shardId = shardInterval->shardId; @@ -1449,7 +1449,7 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, /* iterate through the colocated shards and copy each */ ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { /* * For each shard we first create the shard table in a separate @@ -1492,7 +1492,7 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, sourceNodePort, PLACEMENT_UPDATE_STATUS_CREATING_CONSTRAINTS); - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { List *ddlCommandList = PostLoadShardCreationCommandList(shardInterval, sourceNodeName, @@ -1509,7 +1509,7 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, * Create DDL commands to Attach child tables to their parents in a partitioning hierarchy. */ List *shardIntervalWithDDCommandsList = NIL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { if (PartitionTable(shardInterval->relationId)) { @@ -1534,7 +1534,7 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, * Iterate through the colocated shards and create DDL commamnds * to create the foreign constraints. */ - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { List *shardForeignConstraintCommandList = NIL; List *referenceTableForeignConstraintList = NIL; @@ -1553,7 +1553,7 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, /* Now execute the Partitioning & Foreign constraints creation commads. */ ShardCommandList *shardCommandList = NULL; - foreach_ptr(shardCommandList, shardIntervalWithDDCommandsList) + foreach_declared_ptr(shardCommandList, shardIntervalWithDDCommandsList) { char *tableOwner = TableOwner(shardCommandList->shardInterval->relationId); SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, @@ -1583,7 +1583,7 @@ CopyShardsToNode(WorkerNode *sourceNode, WorkerNode *targetNode, List *shardInte int taskId = 0; List *copyTaskList = NIL; ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { /* * Skip copying data for partitioned tables, because they contain no @@ -1716,7 +1716,7 @@ SearchShardPlacementInList(List *shardPlacementList, const char *nodeName, uint32 nodePort) { ShardPlacement *shardPlacement = NULL; - foreach_ptr(shardPlacement, shardPlacementList) + foreach_declared_ptr(shardPlacement, shardPlacementList) { if (strncmp(nodeName, shardPlacement->nodeName, MAX_NODE_LENGTH) == 0 && nodePort == shardPlacement->nodePort) @@ -1837,7 +1837,7 @@ CopyShardForeignConstraintCommandListGrouped(ShardInterval *shardInterval, *referenceTableForeignConstraintList = NIL; const char *command = NULL; - foreach_ptr(command, commandList) + foreach_declared_ptr(command, commandList) { char *escapedCommand = quote_literal_cstr(command); @@ -2002,7 +2002,7 @@ DropShardPlacementsFromMetadata(List *shardList, char *nodeName, int32 nodePort) { ShardInterval *shardInverval = NULL; - foreach_ptr(shardInverval, shardList) + foreach_declared_ptr(shardInverval, shardList) { uint64 shardId = shardInverval->shardId; List *shardPlacementList = ShardPlacementList(shardId); @@ -2066,7 +2066,7 @@ WorkerApplyShardDDLCommandList(List *ddlCommandList, int64 shardId) List *applyDDLCommandList = NIL; TableDDLCommand *ddlCommand = NULL; - foreach_ptr(ddlCommand, ddlCommandList) + foreach_declared_ptr(ddlCommand, ddlCommandList) { Assert(CitusIsA(ddlCommand, TableDDLCommand)); char *applyDDLCommand = GetShardedTableDDLCommand(ddlCommand, shardId, NULL); @@ -2100,7 +2100,7 @@ UpdatePlacementUpdateStatusForShardIntervalList(List *shardIntervalList, } ProgressMonitorData *monitor = NULL; - foreach_ptr(monitor, rebalanceMonitorList) + foreach_declared_ptr(monitor, rebalanceMonitorList) { PlacementUpdateEventProgress *steps = ProgressMonitorSteps(monitor); @@ -2111,7 +2111,7 @@ UpdatePlacementUpdateStatusForShardIntervalList(List *shardIntervalList, bool foundInList = false; ShardInterval *candidateShard = NULL; - foreach_ptr(candidateShard, shardIntervalList) + foreach_declared_ptr(candidateShard, shardIntervalList) { if (candidateShard->shardId == currentShardId) { diff --git a/src/backend/distributed/operations/stage_protocol.c b/src/backend/distributed/operations/stage_protocol.c index 5770d648e21..9881d87752a 100644 --- a/src/backend/distributed/operations/stage_protocol.c +++ b/src/backend/distributed/operations/stage_protocol.c @@ -431,7 +431,7 @@ CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements, int poolSize = 1; ShardPlacement *shardPlacement = NULL; - foreach_ptr(shardPlacement, shardPlacements) + foreach_declared_ptr(shardPlacement, shardPlacements) { uint64 shardId = shardPlacement->shardId; ShardInterval *shardInterval = LoadShardInterval(shardId); @@ -516,7 +516,7 @@ RelationShardListForShardCreate(ShardInterval *shardInterval) /* all foregin key constraint relations */ Oid fkeyRelationid = InvalidOid; - foreach_oid(fkeyRelationid, allForeignKeyRelations) + foreach_declared_oid(fkeyRelationid, allForeignKeyRelations) { uint64 fkeyShardId = INVALID_SHARD_ID; @@ -590,7 +590,7 @@ WorkerCreateShardCommandList(Oid relationId, uint64 shardId, char *schemaName = get_namespace_name(schemaId); TableDDLCommand *ddlCommand = NULL; - foreach_ptr(ddlCommand, ddlCommandList) + foreach_declared_ptr(ddlCommand, ddlCommandList) { Assert(CitusIsA(ddlCommand, TableDDLCommand)); char *applyDDLCommand = GetShardedTableDDLCommand(ddlCommand, shardId, @@ -645,7 +645,7 @@ UpdateShardStatistics(int64 shardId) /* get shard's statistics from a shard placement */ ShardPlacement *placement = NULL; - foreach_ptr(placement, shardPlacementList) + foreach_declared_ptr(placement, shardPlacementList) { statsOK = WorkerShardStats(placement, relationId, shardQualifiedName, &shardSize); @@ -713,7 +713,7 @@ ReceiveAndUpdateShardsSizes(List *connectionList) "oid visited hash set"); MultiConnection *connection = NULL; - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { if (PQstatus(connection->pgConn) != CONNECTION_OK) { @@ -809,7 +809,7 @@ UpdateShardSize(uint64 shardId, ShardInterval *shardInterval, Oid relationId, ShardPlacement *placement = NULL; /* update metadata for each shard placement */ - foreach_ptr(placement, shardPlacementList) + foreach_declared_ptr(placement, shardPlacementList) { uint64 placementId = placement->placementId; int32 groupId = placement->groupId; diff --git a/src/backend/distributed/operations/worker_node_manager.c b/src/backend/distributed/operations/worker_node_manager.c index ba622e4d7ab..8a4245ca0a0 100644 --- a/src/backend/distributed/operations/worker_node_manager.c +++ b/src/backend/distributed/operations/worker_node_manager.c @@ -421,7 +421,7 @@ GetFirstPrimaryWorkerNode(void) List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(RowShareLock); WorkerNode *firstWorkerNode = NULL; WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { if (firstWorkerNode == NULL || CompareWorkerNodes(&workerNode, &firstWorkerNode) < 0) diff --git a/src/backend/distributed/operations/worker_split_copy_udf.c b/src/backend/distributed/operations/worker_split_copy_udf.c index 03354ea047b..eb97dab1a0f 100644 --- a/src/backend/distributed/operations/worker_split_copy_udf.c +++ b/src/backend/distributed/operations/worker_split_copy_udf.c @@ -146,7 +146,7 @@ TraceWorkerSplitCopyUdf(char *sourceShardToCopySchemaName, int index = 1; int splitWayCount = list_length(splitCopyInfoList); SplitCopyInfo *splitCopyInfo = NULL; - foreach_ptr(splitCopyInfo, splitCopyInfoList) + foreach_declared_ptr(splitCopyInfo, splitCopyInfoList) { char *shardNameCopy = pstrdup(sourceShardToCopyPrefix); AppendShardIdToName(&shardNameCopy, splitCopyInfo->destinationShardId); @@ -236,7 +236,7 @@ BuildMinMaxRangeArrays(List *splitCopyInfoList, ArrayType **minValueArray, SplitCopyInfo *splitCopyInfo = NULL; int index = 0; - foreach_ptr(splitCopyInfo, splitCopyInfoList) + foreach_declared_ptr(splitCopyInfo, splitCopyInfoList) { minValues[index] = splitCopyInfo->destinationShardMinHashValue; maxValues[index] = splitCopyInfo->destinationShardMaxHashValue; @@ -269,7 +269,7 @@ CreateShardCopyDestReceivers(EState *estate, ShardInterval *shardIntervalToSplit SplitCopyInfo *splitCopyInfo = NULL; int index = 0; char *sourceShardNamePrefix = get_rel_name(shardIntervalToSplitCopy->relationId); - foreach_ptr(splitCopyInfo, splitCopyInfoList) + foreach_declared_ptr(splitCopyInfo, splitCopyInfoList) { Oid destinationShardSchemaOid = get_rel_namespace( shardIntervalToSplitCopy->relationId); diff --git a/src/backend/distributed/operations/worker_split_shard_replication_setup_udf.c b/src/backend/distributed/operations/worker_split_shard_replication_setup_udf.c index d4775995c17..c65893fbc91 100644 --- a/src/backend/distributed/operations/worker_split_shard_replication_setup_udf.c +++ b/src/backend/distributed/operations/worker_split_shard_replication_setup_udf.c @@ -298,7 +298,7 @@ PopulateShardSplitInfoInSM(ShardSplitInfoSMHeader *shardSplitInfoSMHeader, List *shardSplitInfoList = entry->shardSplitInfoList; ShardSplitInfo *splitShardInfo = NULL; - foreach_ptr(splitShardInfo, shardSplitInfoList) + foreach_declared_ptr(splitShardInfo, shardSplitInfoList) { shardSplitInfoSMHeader->splitInfoArray[splitInfoIndex] = *splitShardInfo; strcpy_s(shardSplitInfoSMHeader->splitInfoArray[splitInfoIndex].slotName, diff --git a/src/backend/distributed/planner/combine_query_planner.c b/src/backend/distributed/planner/combine_query_planner.c index e3aa7b3e637..f81ade91c93 100644 --- a/src/backend/distributed/planner/combine_query_planner.c +++ b/src/backend/distributed/planner/combine_query_planner.c @@ -217,7 +217,7 @@ CitusCustomScanPathPlan(PlannerInfo *root, { TargetEntry *targetEntry = NULL; - foreach_ptr(targetEntry, citusPath->remoteScan->custom_scan_tlist) + foreach_declared_ptr(targetEntry, citusPath->remoteScan->custom_scan_tlist) { /* we created this list, so we know it only contains Var */ Assert(IsA(targetEntry->expr, Var)); @@ -231,7 +231,7 @@ CitusCustomScanPathPlan(PlannerInfo *root, /* clauses might have been added by the planner, need to add them to our scan */ RestrictInfo *restrictInfo = NULL; List **quals = &citusPath->remoteScan->scan.plan.qual; - foreach_ptr(restrictInfo, clauses) + foreach_declared_ptr(restrictInfo, clauses) { *quals = lappend(*quals, restrictInfo->clause); } @@ -273,7 +273,7 @@ BuildSelectStatementViaStdPlanner(Query *combineQuery, List *remoteScanTargetLis /* extract column names from the remoteScanTargetList */ List *columnNameList = NIL; TargetEntry *targetEntry = NULL; - foreach_ptr(targetEntry, remoteScanTargetList) + foreach_declared_ptr(targetEntry, remoteScanTargetList) { columnNameList = lappend(columnNameList, makeString(targetEntry->resname)); } diff --git a/src/backend/distributed/planner/deparse_shard_query.c b/src/backend/distributed/planner/deparse_shard_query.c index 43b5f14933a..6b8ad3fdeb0 100644 --- a/src/backend/distributed/planner/deparse_shard_query.c +++ b/src/backend/distributed/planner/deparse_shard_query.c @@ -67,7 +67,7 @@ RebuildQueryStrings(Job *workerJob) AddInsertAliasIfNeeded(originalQuery); } - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { Query *query = originalQuery; @@ -298,7 +298,7 @@ FindRelationShard(Oid inputRelationId, List *relationShardList) * some, otherwise this query wouldn't be eligible as a router query. * FIXME: We should probably use a hashtable here, to do efficient lookup. */ - foreach_ptr(relationShard, relationShardList) + foreach_declared_ptr(relationShard, relationShardList) { if (inputRelationId == relationShard->relationId) { diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index 1d6550afdb5..06f556c88a9 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -1547,7 +1547,7 @@ FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan) /* extract the column names from the final targetlist*/ TargetEntry *targetEntry = NULL; - foreach_ptr(targetEntry, customScan->scan.plan.targetlist) + foreach_declared_ptr(targetEntry, customScan->scan.plan.targetlist) { String *columnName = makeString(targetEntry->resname); columnNameList = lappend(columnNameList, columnName); @@ -1588,7 +1588,7 @@ makeCustomScanTargetlistFromExistingTargetList(List *existingTargetlist) /* build a targetlist to read from the custom scan output */ TargetEntry *targetEntry = NULL; - foreach_ptr(targetEntry, existingTargetlist) + foreach_declared_ptr(targetEntry, existingTargetlist) { Assert(IsA(targetEntry, TargetEntry)); @@ -1638,7 +1638,7 @@ makeTargetListFromCustomScanList(List *custom_scan_tlist) List *targetList = NIL; TargetEntry *targetEntry = NULL; int resno = 1; - foreach_ptr(targetEntry, custom_scan_tlist) + foreach_declared_ptr(targetEntry, custom_scan_tlist) { /* * INDEX_VAR is used to reference back to the TargetEntry in custom_scan_tlist by @@ -2107,7 +2107,7 @@ TranslatedVars(PlannerInfo *root, int relationIndex) { /* postgres deletes translated_vars, hence we deep copy them here */ Node *targetNode = NULL; - foreach_ptr(targetNode, targetAppendRelInfo->translated_vars) + foreach_declared_ptr(targetNode, targetAppendRelInfo->translated_vars) { translatedVars = lappend(translatedVars, copyObject(targetNode)); @@ -2128,7 +2128,7 @@ FindTargetAppendRelInfo(PlannerInfo *root, int relationRteIndex) AppendRelInfo *appendRelInfo = NULL; /* iterate on the queries that are part of UNION ALL subselects */ - foreach_ptr(appendRelInfo, root->append_rel_list) + foreach_declared_ptr(appendRelInfo, root->append_rel_list) { /* * We're only interested in the child rel that is equal to the @@ -2451,7 +2451,7 @@ TranslatedVarsForRteIdentity(int rteIdentity) currentPlannerRestrictionContext->relationRestrictionContext-> relationRestrictionList; RelationRestriction *relationRestriction = NULL; - foreach_ptr(relationRestriction, relationRestrictionList) + foreach_declared_ptr(relationRestriction, relationRestrictionList) { if (GetRTEIdentity(relationRestriction->rte) == rteIdentity) { @@ -2621,7 +2621,7 @@ GetRTEListProperties(List *rangeTableList) RTEListProperties *rteListProperties = palloc0(sizeof(RTEListProperties)); RangeTblEntry *rangeTableEntry = NULL; - foreach_ptr(rangeTableEntry, rangeTableList) + foreach_declared_ptr(rangeTableEntry, rangeTableList) { if (rangeTableEntry->rtekind != RTE_RELATION) { @@ -2714,7 +2714,7 @@ WarnIfListHasForeignDistributedTable(List *rangeTableList) static bool DistributedForeignTableWarningPrompted = false; RangeTblEntry *rangeTableEntry = NULL; - foreach_ptr(rangeTableEntry, rangeTableList) + foreach_declared_ptr(rangeTableEntry, rangeTableList) { if (DistributedForeignTableWarningPrompted) { diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c index 60d6ce466ca..178ea235d80 100644 --- a/src/backend/distributed/planner/insert_select_planner.c +++ b/src/backend/distributed/planner/insert_select_planner.c @@ -566,7 +566,7 @@ CreateCombineQueryForRouterPlan(DistributedPlan *distPlan) List *funcCollations = NIL; TargetEntry *targetEntry = NULL; - foreach_ptr(targetEntry, dependentTargetList) + foreach_declared_ptr(targetEntry, dependentTargetList) { Node *expr = (Node *) targetEntry->expr; @@ -640,7 +640,7 @@ CreateTargetListForCombineQuery(List *targetList) /* iterate over original target entries */ TargetEntry *originalTargetEntry = NULL; - foreach_ptr(originalTargetEntry, targetList) + foreach_declared_ptr(originalTargetEntry, targetList) { TargetEntry *newTargetEntry = flatCopyTargetEntry(originalTargetEntry); @@ -1571,7 +1571,7 @@ WrapSubquery(Query *subquery) /* create a target list that matches the SELECT */ TargetEntry *selectTargetEntry = NULL; - foreach_ptr(selectTargetEntry, subquery->targetList) + foreach_declared_ptr(selectTargetEntry, subquery->targetList) { /* exactly 1 entry in FROM */ int indexInRangeTable = 1; @@ -1723,7 +1723,7 @@ AddInsertSelectCasts(List *insertTargetList, List *selectTargetList, selectTargetList = list_concat(projectedEntries, nonProjectedEntries); int entryResNo = 1; TargetEntry *selectTargetEntry = NULL; - foreach_ptr(selectTargetEntry, selectTargetList) + foreach_declared_ptr(selectTargetEntry, selectTargetList) { selectTargetEntry->resno = entryResNo++; } diff --git a/src/backend/distributed/planner/intermediate_result_pruning.c b/src/backend/distributed/planner/intermediate_result_pruning.c index 5c9ee6c4331..ab41faffb72 100644 --- a/src/backend/distributed/planner/intermediate_result_pruning.c +++ b/src/backend/distributed/planner/intermediate_result_pruning.c @@ -276,7 +276,7 @@ AppendAllWorkerNodes(IntermediateResultsHashEntry *entry) List *workerNodeList = ActiveReadableNodeList(); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { entry->nodeIdList = list_append_unique_int(entry->nodeIdList, workerNode->nodeId); @@ -420,7 +420,7 @@ LogIntermediateResultMulticastSummary(IntermediateResultsHashEntry *entry, } WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { elog(logLevel, "Subplan %s will be sent to %s:%d", resultId, workerNode->workerName, workerNode->workerPort); diff --git a/src/backend/distributed/planner/local_distributed_join_planner.c b/src/backend/distributed/planner/local_distributed_join_planner.c index a6502bf43c4..2760377bb9f 100644 --- a/src/backend/distributed/planner/local_distributed_join_planner.c +++ b/src/backend/distributed/planner/local_distributed_join_planner.c @@ -328,7 +328,7 @@ static void ConvertRTEsToSubquery(List *rangeTableEntryDetailsList, RecursivePlanningContext *context) { RangeTableEntryDetails *rangeTableEntryDetails = NULL; - foreach_ptr(rangeTableEntryDetails, rangeTableEntryDetailsList) + foreach_declared_ptr(rangeTableEntryDetails, rangeTableEntryDetailsList) { RangeTblEntry *rangeTableEntry = rangeTableEntryDetails->rangeTableEntry; List *requiredAttributeNumbers = rangeTableEntryDetails->requiredAttributeNumbers; @@ -351,7 +351,7 @@ static bool AllRangeTableEntriesHaveUniqueIndex(List *rangeTableEntryDetailsList) { RangeTableEntryDetails *rangeTableEntryDetails = NULL; - foreach_ptr(rangeTableEntryDetails, rangeTableEntryDetailsList) + foreach_declared_ptr(rangeTableEntryDetails, rangeTableEntryDetailsList) { if (!rangeTableEntryDetails->hasConstantFilterOnUniqueColumn) { @@ -420,7 +420,7 @@ HasConstantFilterOnUniqueColumn(RangeTblEntry *rangeTableEntry, AppendUniqueIndexColumnsToList, INCLUDE_INDEX_ALL_STATEMENTS); IndexColumns *indexColumns = NULL; - foreach_ptr(indexColumns, uniqueIndexColumnsList) + foreach_declared_ptr(indexColumns, uniqueIndexColumnsList) { List *uniqueIndexColumnNos = indexColumns->indexColumnNos; if (FirstIsSuperSetOfSecond(rteEqualityColumnsNos, @@ -441,7 +441,7 @@ static bool FirstIsSuperSetOfSecond(List *firstIntList, List *secondIntList) { int curInt = 0; - foreach_int(curInt, secondIntList) + foreach_declared_int(curInt, secondIntList) { if (!list_member_int(firstIntList, curInt)) { @@ -526,7 +526,7 @@ RequiredAttrNumbersForRelationInternal(Query *queryToProcess, int rteIndex) List *requiredAttrNumbers = NIL; Var *var = NULL; - foreach_ptr(var, allVarsInQuery) + foreach_declared_ptr(var, allVarsInQuery) { if (var->varno == rteIndex) { @@ -554,7 +554,7 @@ CreateConversionCandidates(PlannerRestrictionContext *plannerRestrictionContext, RangeTblEntry *rangeTableEntry = NULL; - foreach_ptr(rangeTableEntry, rangeTableList) + foreach_declared_ptr(rangeTableEntry, rangeTableList) { /* we're only interested in tables */ if (!IsRecursivelyPlannableRelation(rangeTableEntry)) diff --git a/src/backend/distributed/planner/local_plan_cache.c b/src/backend/distributed/planner/local_plan_cache.c index 2e5ca4e550a..443297df04a 100644 --- a/src/backend/distributed/planner/local_plan_cache.c +++ b/src/backend/distributed/planner/local_plan_cache.c @@ -244,7 +244,7 @@ GetCachedLocalPlan(Task *task, DistributedPlan *distributedPlan) int32 localGroupId = GetLocalGroupId(); - foreach_ptr(localPlannedStatement, cachedPlanList) + foreach_declared_ptr(localPlannedStatement, cachedPlanList) { if (localPlannedStatement->shardId == task->anchorShardId && localPlannedStatement->localGroupId == localGroupId) diff --git a/src/backend/distributed/planner/merge_planner.c b/src/backend/distributed/planner/merge_planner.c index 1f9d17c43fe..8048002e005 100644 --- a/src/backend/distributed/planner/merge_planner.c +++ b/src/backend/distributed/planner/merge_planner.c @@ -375,7 +375,7 @@ static void ErrorIfMergeHasUnsupportedTables(Oid targetRelationId, List *rangeTableList) { RangeTblEntry *rangeTableEntry = NULL; - foreach_ptr(rangeTableEntry, rangeTableList) + foreach_declared_ptr(rangeTableEntry, rangeTableList) { Oid relationId = rangeTableEntry->relid; @@ -734,7 +734,7 @@ ErrorIfRepartitionMergeNotSupported(Oid targetRelationId, Query *mergeQuery, } MergeAction *action = NULL; - foreach_ptr(action, mergeQuery->mergeActionList) + foreach_declared_ptr(action, mergeQuery->mergeActionList) { if (FindNodeMatchingCheckFunction((Node *) action, IsNodeSubquery)) { @@ -763,7 +763,7 @@ ConvertCteRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte) * Presently, CTEs are only permitted within the USING clause, and thus, * we search for the corresponding one */ - foreach_ptr(candidateCte, mergeQuery->cteList) + foreach_declared_ptr(candidateCte, mergeQuery->cteList) { if (strcmp(candidateCte->ctename, sourceRte->ctename) == 0) { @@ -1018,7 +1018,7 @@ DeferErrorIfRoutableMergeNotSupported(Query *query, List *rangeTableList, List *localTablesList = NIL; RangeTblEntry *rangeTableEntry = NULL; - foreach_ptr(rangeTableEntry, rangeTableList) + foreach_declared_ptr(rangeTableEntry, rangeTableList) { Oid relationId = rangeTableEntry->relid; @@ -1224,7 +1224,7 @@ ErrorIfMergeQueryQualAndTargetListNotSupported(Oid targetRelationId, Query *orig * within itself. Check each INSERT/UPDATE/DELETE individually. */ MergeAction *action = NULL; - foreach_ptr(action, originalQuery->mergeActionList) + foreach_declared_ptr(action, originalQuery->mergeActionList) { Assert(originalQuery->returningList == NULL); deferredError = MergeQualAndTargetListFunctionsSupported(targetRelationId, @@ -1472,10 +1472,10 @@ FetchAndValidateInsertVarIfExists(Oid targetRelationId, Query *query) bool foundDistributionColumn = false; MergeAction *action = NULL; uint32 targetRangeTableIndex = query->resultRelation; - foreach_ptr(action, query->mergeActionList) + foreach_declared_ptr(action, query->mergeActionList) { /* Skip MATCHED clause as INSERTS are not allowed in it */ - if (action->matched) + if (matched_compat(action)) { continue; } @@ -1502,7 +1502,7 @@ FetchAndValidateInsertVarIfExists(Oid targetRelationId, Query *query) PartitionColumn(targetRelationId, targetRangeTableIndex); TargetEntry *targetEntry = NULL; - foreach_ptr(targetEntry, action->targetList) + foreach_declared_ptr(targetEntry, action->targetList) { AttrNumber originalAttrNo = targetEntry->resno; diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c index 4584e774024..531b9b36257 100644 --- a/src/backend/distributed/planner/multi_explain.c +++ b/src/backend/distributed/planner/multi_explain.c @@ -373,6 +373,21 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es) BufferUsage bufusage_start, bufusage; +#if PG_VERSION_NUM >= PG_VERSION_17 + MemoryContextCounters mem_counters; + MemoryContext planner_ctx = NULL; + MemoryContext saved_ctx = NULL; + + if (es->memory) + { + /* copy paste from postgres code */ + planner_ctx = AllocSetContextCreate(CurrentMemoryContext, + "explain analyze planner context", + ALLOCSET_DEFAULT_SIZES); + saved_ctx = MemoryContextSwitchTo(planner_ctx); + } +#endif + if (es->buffers) { bufusage_start = pgBufferUsage; @@ -430,8 +445,20 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es) ExplainOpenGroup("PlannedStmt", "PlannedStmt", false, es); +#if PG_VERSION_NUM >= PG_VERSION_17 + if (es->memory) + { + MemoryContextSwitchTo(saved_ctx); + MemoryContextMemConsumed(planner_ctx, &mem_counters); + } + + ExplainOnePlan(plan, into, es, queryString, params, NULL, &planduration, + (es->buffers ? &bufusage : NULL), + (es->memory ? &mem_counters : NULL)); +#else ExplainOnePlan(plan, into, es, queryString, params, NULL, &planduration, (es->buffers ? &bufusage : NULL)); +#endif ExplainCloseGroup("PlannedStmt", "PlannedStmt", false, es); ExplainCloseGroup("Subplan", NULL, true, es); @@ -493,7 +520,7 @@ ExplainJob(CitusScanState *scanState, Job *job, ExplainState *es, { Task *task = NULL; uint64 totalReceivedTupleDataForAllTasks = 0; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { totalReceivedTupleDataForAllTasks += TaskReceivedTupleData(task); } @@ -671,7 +698,7 @@ ExplainTaskList(CitusScanState *scanState, List *taskList, ExplainState *es, } Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { RemoteExplainPlan *remoteExplain = RemoteExplain(task, es, params); remoteExplainList = lappend(remoteExplainList, remoteExplain); @@ -1251,6 +1278,21 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into, BufferUsage bufusage_start, bufusage; +#if PG_VERSION_NUM >= PG_VERSION_17 + MemoryContextCounters mem_counters; + MemoryContext planner_ctx = NULL; + MemoryContext saved_ctx = NULL; + + if (es->memory) + { + /* copy paste from postgres code */ + planner_ctx = AllocSetContextCreate(CurrentMemoryContext, + "explain analyze planner context", + ALLOCSET_DEFAULT_SIZES); + saved_ctx = MemoryContextSwitchTo(planner_ctx); + } +#endif + if (es->buffers) { bufusage_start = pgBufferUsage; @@ -1284,9 +1326,23 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into, BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); } +#if PG_VERSION_NUM >= PG_VERSION_17 + if (es->memory) + { + MemoryContextSwitchTo(saved_ctx); + MemoryContextMemConsumed(planner_ctx, &mem_counters); + } + + /* run it (if needed) and produce output */ + ExplainOnePlan(plan, into, es, queryString, params, queryEnv, + &planduration, (es->buffers ? &bufusage : NULL), + (es->memory ? &mem_counters : NULL)); +#else + /* run it (if needed) and produce output */ ExplainOnePlan(plan, into, es, queryString, params, queryEnv, &planduration, (es->buffers ? &bufusage : NULL)); +#endif } @@ -1398,7 +1454,7 @@ void ResetExplainAnalyzeData(List *taskList) { Task *task = NULL; - foreach_ptr(task, taskList) + foreach_declared_ptr(task, taskList) { if (task->fetchedExplainAnalyzePlan != NULL) { @@ -1461,7 +1517,7 @@ ExplainAnalyzeTaskList(List *originalTaskList, List *explainAnalyzeTaskList = NIL; Task *originalTask = NULL; - foreach_ptr(originalTask, originalTaskList) + foreach_declared_ptr(originalTask, originalTaskList) { if (originalTask->queryCount != 1) { @@ -1699,6 +1755,21 @@ ExplainOneQuery(Query *query, int cursorOptions, BufferUsage bufusage_start, bufusage; +#if PG_VERSION_NUM >= PG_VERSION_17 + MemoryContextCounters mem_counters; + MemoryContext planner_ctx = NULL; + MemoryContext saved_ctx = NULL; + + if (es->memory) + { + /* copy paste from postgres code */ + planner_ctx = AllocSetContextCreate(CurrentMemoryContext, + "explain analyze planner context", + ALLOCSET_DEFAULT_SIZES); + saved_ctx = MemoryContextSwitchTo(planner_ctx); + } +#endif + if (es->buffers) bufusage_start = pgBufferUsage; INSTR_TIME_SET_CURRENT(planstart); @@ -1716,9 +1787,21 @@ ExplainOneQuery(Query *query, int cursorOptions, BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); } +#if PG_VERSION_NUM >= PG_VERSION_17 + if (es->memory) + { + MemoryContextSwitchTo(saved_ctx); + MemoryContextMemConsumed(planner_ctx, &mem_counters); + } + /* run it (if needed) and produce output */ + ExplainOnePlan(plan, into, es, queryString, params, queryEnv, + &planduration, (es->buffers ? &bufusage : NULL), + (es->memory ? &mem_counters : NULL)); +#else /* run it (if needed) and produce output */ ExplainOnePlan(plan, into, es, queryString, params, queryEnv, &planduration, (es->buffers ? &bufusage : NULL)); +#endif } } diff --git a/src/backend/distributed/planner/multi_join_order.c b/src/backend/distributed/planner/multi_join_order.c index 908ed206ea6..7f25e08cc6e 100644 --- a/src/backend/distributed/planner/multi_join_order.c +++ b/src/backend/distributed/planner/multi_join_order.c @@ -224,10 +224,10 @@ JoinOnColumns(List *currentPartitionColumnList, Var *candidateColumn, } Var *currentColumn = NULL; - foreach_ptr(currentColumn, currentPartitionColumnList) + foreach_declared_ptr(currentColumn, currentPartitionColumnList) { Node *joinClause = NULL; - foreach_ptr(joinClause, joinClauseList) + foreach_declared_ptr(joinClause, joinClauseList) { if (!NodeIsEqualsOpExpr(joinClause)) { @@ -1094,10 +1094,10 @@ SinglePartitionJoinClause(List *partitionColumnList, List *applicableJoinClauses } Var *partitionColumn = NULL; - foreach_ptr(partitionColumn, partitionColumnList) + foreach_declared_ptr(partitionColumn, partitionColumnList) { Node *applicableJoinClause = NULL; - foreach_ptr(applicableJoinClause, applicableJoinClauses) + foreach_declared_ptr(applicableJoinClause, applicableJoinClauses) { if (!NodeIsEqualsOpExpr(applicableJoinClause)) { @@ -1177,7 +1177,7 @@ OpExpr * DualPartitionJoinClause(List *applicableJoinClauses) { Node *applicableJoinClause = NULL; - foreach_ptr(applicableJoinClause, applicableJoinClauses) + foreach_declared_ptr(applicableJoinClause, applicableJoinClauses) { if (!NodeIsEqualsOpExpr(applicableJoinClause)) { @@ -1262,7 +1262,7 @@ IsApplicableJoinClause(List *leftTableIdList, uint32 rightTableId, Node *joinCla List *varList = pull_var_clause_default(joinClause); Var *var = NULL; bool joinContainsRightTable = false; - foreach_ptr(var, varList) + foreach_declared_ptr(var, varList) { uint32 columnTableId = var->varno; if (rightTableId == columnTableId) @@ -1301,7 +1301,7 @@ ApplicableJoinClauses(List *leftTableIdList, uint32 rightTableId, List *joinClau joinClauseList = JoinClauseList(joinClauseList); Node *joinClause = NULL; - foreach_ptr(joinClause, joinClauseList) + foreach_declared_ptr(joinClause, joinClauseList) { if (IsApplicableJoinClause(leftTableIdList, rightTableId, joinClause)) { diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index 76e38237ad6..393a5a0d3e5 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -414,7 +414,7 @@ MultiLogicalPlanOptimize(MultiTreeRoot *multiLogicalPlan) /* pull up collect nodes and merge duplicate collects */ List *collectNodeList = FindNodesOfType(logicalPlanNode, T_MultiCollect); MultiCollect *collectNode = NULL; - foreach_ptr(collectNode, collectNodeList) + foreach_declared_ptr(collectNode, collectNodeList) { PullUpCollectLoop(collectNode); } @@ -436,7 +436,7 @@ MultiLogicalPlanOptimize(MultiTreeRoot *multiLogicalPlan) List *tableNodeList = FindNodesOfType(logicalPlanNode, T_MultiTable); MultiTable *tableNode = NULL; - foreach_ptr(tableNode, tableNodeList) + foreach_declared_ptr(tableNode, tableNodeList) { if (tableNode->relationId == SUBQUERY_RELATION_ID) { @@ -542,7 +542,7 @@ OrSelectClauseList(List *selectClauseList) List *orSelectClauseList = NIL; Node *selectClause = NULL; - foreach_ptr(selectClause, selectClauseList) + foreach_declared_ptr(selectClause, selectClauseList) { bool orClause = is_orclause(selectClause); if (orClause) @@ -968,7 +968,7 @@ SelectClauseTableIdList(List *selectClauseList) List *tableIdList = NIL; Node *selectClause = NULL; - foreach_ptr(selectClause, selectClauseList) + foreach_declared_ptr(selectClause, selectClauseList) { List *selectColumnList = pull_var_clause_default(selectClause); @@ -1077,7 +1077,7 @@ TableIdListColumns(List *tableIdList, List *columnList) List *tableColumnList = NIL; Var *column = NULL; - foreach_ptr(column, columnList) + foreach_declared_ptr(column, columnList) { int columnTableId = (int) column->varno; @@ -1103,7 +1103,7 @@ TableIdListSelectClauses(List *tableIdList, List *selectClauseList) List *tableSelectClauseList = NIL; Node *selectClause = NULL; - foreach_ptr(selectClause, selectClauseList) + foreach_declared_ptr(selectClause, selectClauseList) { List *selectColumnList = pull_var_clause_default(selectClause); if (list_length(selectColumnList) == 0) @@ -1425,7 +1425,7 @@ MasterExtendedOpNode(MultiExtendedOp *originalOpNode, /* iterate over original target entries */ TargetEntry *originalTargetEntry = NULL; - foreach_ptr(originalTargetEntry, targetEntryList) + foreach_declared_ptr(originalTargetEntry, targetEntryList) { TargetEntry *newTargetEntry = flatCopyTargetEntry(originalTargetEntry); Expr *originalExpression = originalTargetEntry->expr; @@ -1598,7 +1598,7 @@ MasterAggregateExpression(Aggref *originalAggregate, Aggref *aggregate = (Aggref *) copyObject(originalAggregate); TargetEntry *targetEntry; - foreach_ptr(targetEntry, aggregate->args) + foreach_declared_ptr(targetEntry, aggregate->args) { targetEntry->expr = (Expr *) makeVar(masterTableId, walkerContext->columnId, @@ -1611,7 +1611,7 @@ MasterAggregateExpression(Aggref *originalAggregate, aggregate->aggdirectargs = NIL; Expr *directarg; - foreach_ptr(directarg, originalAggregate->aggdirectargs) + foreach_declared_ptr(directarg, originalAggregate->aggdirectargs) { /* * Need to replace nodes that contain any Vars with Vars referring @@ -1662,7 +1662,7 @@ MasterAggregateExpression(Aggref *originalAggregate, /* determine unique vars that were placed in target list by worker */ Var *column = NULL; - foreach_ptr(column, varList) + foreach_declared_ptr(column, varList) { uniqueVarList = list_append_unique(uniqueVarList, copyObject(column)); } @@ -1672,12 +1672,12 @@ MasterAggregateExpression(Aggref *originalAggregate, * worker query target entry column index. */ Var *columnToUpdate = NULL; - foreach_ptr(columnToUpdate, varList) + foreach_declared_ptr(columnToUpdate, varList) { int columnIndex = 0; Var *currentVar = NULL; - foreach_ptr(currentVar, uniqueVarList) + foreach_declared_ptr(currentVar, uniqueVarList) { if (equal(columnToUpdate, currentVar)) { @@ -2526,7 +2526,7 @@ ProcessTargetListForWorkerQuery(List *targetEntryList, /* iterate over original target entries */ TargetEntry *originalTargetEntry = NULL; - foreach_ptr(originalTargetEntry, targetEntryList) + foreach_declared_ptr(originalTargetEntry, targetEntryList) { Expr *originalExpression = originalTargetEntry->expr; List *newExpressionList = NIL; @@ -2733,7 +2733,7 @@ ProcessWindowFunctionPullUpForWorkerQuery(List *windowClause, List *columnList = pull_var_clause_default((Node *) windowClause); Expr *newExpression = NULL; - foreach_ptr(newExpression, columnList) + foreach_declared_ptr(newExpression, columnList) { TargetEntry *newTargetEntry = makeNode(TargetEntry); @@ -2823,7 +2823,7 @@ bool TargetListHasAggregates(List *targetEntryList) { TargetEntry *targetEntry = NULL; - foreach_ptr(targetEntry, targetEntryList) + foreach_declared_ptr(targetEntry, targetEntryList) { Expr *targetExpr = targetEntry->expr; bool hasAggregates = contain_aggs_of_level((Node *) targetExpr, 0); @@ -2867,7 +2867,7 @@ ExpandWorkerTargetEntry(List *expressionList, TargetEntry *originalTargetEntry, { /* now create target entries for each new expression */ Expr *newExpression = NULL; - foreach_ptr(newExpression, expressionList) + foreach_declared_ptr(newExpression, expressionList) { /* generate and add the new target entry to the target list */ TargetEntry *newTargetEntry = @@ -2904,7 +2904,7 @@ GetNextSortGroupRef(List *targetEntryList) /* find max of sort group ref index */ TargetEntry *targetEntry = NULL; - foreach_ptr(targetEntry, targetEntryList) + foreach_declared_ptr(targetEntry, targetEntryList) { if (targetEntry->ressortgroupref > nextSortGroupRefIndex) { @@ -3060,13 +3060,13 @@ WorkerAggregateExpressionList(Aggref *originalAggregate, if (walkerContext->extendedOpNodeProperties->pullUpIntermediateRows) { TargetEntry *targetEntry; - foreach_ptr(targetEntry, originalAggregate->args) + foreach_declared_ptr(targetEntry, originalAggregate->args) { workerAggregateList = lappend(workerAggregateList, targetEntry->expr); } Expr *directarg; - foreach_ptr(directarg, originalAggregate->aggdirectargs) + foreach_declared_ptr(directarg, originalAggregate->aggdirectargs) { /* * The worker aggregation should execute any node that contains any @@ -3099,7 +3099,7 @@ WorkerAggregateExpressionList(Aggref *originalAggregate, List *columnList = pull_var_clause_default((Node *) aggregate); Var *column = NULL; - foreach_ptr(column, columnList) + foreach_declared_ptr(column, columnList) { workerAggregateList = list_append_unique(workerAggregateList, column); } @@ -3326,7 +3326,7 @@ WorkerAggregateExpressionList(Aggref *originalAggregate, rowExpr->colnames = NIL; TargetEntry *arg = NULL; - foreach_ptr(arg, originalAggregate->args) + foreach_declared_ptr(arg, originalAggregate->args) { rowExpr->args = lappend(rowExpr->args, copyObject(arg->expr)); } @@ -3830,7 +3830,7 @@ HasNonDistributableAggregates(MultiNode *logicalPlanNode) pull_var_clause(havingQual, PVC_INCLUDE_AGGREGATES)); Node *expression = NULL; - foreach_ptr(expression, expressionList) + foreach_declared_ptr(expression, expressionList) { /* only consider aggregate expressions */ if (!IsA(expression, Aggref)) @@ -3936,7 +3936,7 @@ DeferErrorIfHasNonDistributableAggregates(MultiNode *logicalPlanNode) pull_var_clause(havingQual, PVC_INCLUDE_AGGREGATES)); Node *expression = NULL; - foreach_ptr(expression, expressionList) + foreach_declared_ptr(expression, expressionList) { /* only consider aggregate expressions */ if (!IsA(expression, Aggref)) @@ -4079,7 +4079,7 @@ DeferErrorIfUnsupportedAggregateDistinct(Aggref *aggregateExpression, List *columnList = pull_var_clause_default(aggregateArgument); Var *column = NULL; - foreach_ptr(column, columnList) + foreach_declared_ptr(column, columnList) { if (column->varattno <= 0) { @@ -4095,7 +4095,7 @@ DeferErrorIfUnsupportedAggregateDistinct(Aggref *aggregateExpression, List *multiTableNodeList = FindNodesOfType(logicalPlanNode, T_MultiTable); MultiTable *multiTable = NULL; - foreach_ptr(multiTable, multiTableNodeList) + foreach_declared_ptr(multiTable, multiTableNodeList) { if (multiTable->relationId == SUBQUERY_RELATION_ID || multiTable->relationId == SUBQUERY_PUSHDOWN_RELATION_ID) @@ -4251,7 +4251,7 @@ TablePartitioningSupportsDistinct(List *tableNodeList, MultiExtendedOp *opNode, bool distinctSupported = true; MultiTable *tableNode = NULL; - foreach_ptr(tableNode, tableNodeList) + foreach_declared_ptr(tableNode, tableNodeList) { Oid relationId = tableNode->relationId; bool tableDistinctSupported = false; @@ -4327,7 +4327,7 @@ GroupedByColumn(List *groupClauseList, List *targetList, Var *column) } SortGroupClause *groupClause = NULL; - foreach_ptr(groupClause, groupClauseList) + foreach_declared_ptr(groupClause, groupClauseList) { TargetEntry *groupTargetEntry = get_sortgroupclause_tle(groupClause, targetList); @@ -4359,7 +4359,7 @@ SubqueryMultiTableList(MultiNode *multiNode) List *multiTableNodeList = FindNodesOfType(multiNode, T_MultiTable); MultiTable *multiTable = NULL; - foreach_ptr(multiTable, multiTableNodeList) + foreach_declared_ptr(multiTable, multiTableNodeList) { Query *subquery = multiTable->subquery; @@ -4383,7 +4383,7 @@ GroupTargetEntryList(List *groupClauseList, List *targetEntryList) List *groupTargetEntryList = NIL; SortGroupClause *groupClause = NULL; - foreach_ptr(groupClause, groupClauseList) + foreach_declared_ptr(groupClause, groupClauseList) { TargetEntry *groupTargetEntry = get_sortgroupclause_tle(groupClause, targetEntryList); @@ -4585,7 +4585,7 @@ FindReferencedTableColumn(Expr *columnExpression, List *parentQueryList, Query * } CommonTableExpr *candidateCte = NULL; - foreach_ptr(candidateCte, cteList) + foreach_declared_ptr(candidateCte, cteList) { if (strcmp(candidateCte->ctename, rangeTableEntry->ctename) == 0) { @@ -4878,7 +4878,7 @@ HasOrderByAggregate(List *sortClauseList, List *targetList) bool hasOrderByAggregate = false; SortGroupClause *sortClause = NULL; - foreach_ptr(sortClause, sortClauseList) + foreach_declared_ptr(sortClause, sortClauseList) { Node *sortExpression = get_sortgroupclause_expr(sortClause, targetList); @@ -4904,7 +4904,7 @@ HasOrderByNonCommutativeAggregate(List *sortClauseList, List *targetList) bool hasOrderByNonCommutativeAggregate = false; SortGroupClause *sortClause = NULL; - foreach_ptr(sortClause, sortClauseList) + foreach_declared_ptr(sortClause, sortClauseList) { Node *sortExpression = get_sortgroupclause_expr(sortClause, targetList); @@ -4944,7 +4944,7 @@ HasOrderByComplexExpression(List *sortClauseList, List *targetList) bool hasOrderByComplexExpression = false; SortGroupClause *sortClause = NULL; - foreach_ptr(sortClause, sortClauseList) + foreach_declared_ptr(sortClause, sortClauseList) { Node *sortExpression = get_sortgroupclause_expr(sortClause, targetList); @@ -4986,7 +4986,7 @@ HasOrderByHllType(List *sortClauseList, List *targetList) Oid hllTypeId = TypeOid(hllSchemaOid, HLL_TYPE_NAME); SortGroupClause *sortClause = NULL; - foreach_ptr(sortClause, sortClauseList) + foreach_declared_ptr(sortClause, sortClauseList) { Node *sortExpression = get_sortgroupclause_expr(sortClause, targetList); @@ -5070,12 +5070,12 @@ IsGroupBySubsetOfDistinct(List *groupClauses, List *distinctClauses) } SortGroupClause *groupClause = NULL; - foreach_ptr(groupClause, groupClauses) + foreach_declared_ptr(groupClause, groupClauses) { bool isFound = false; SortGroupClause *distinctClause = NULL; - foreach_ptr(distinctClause, distinctClauses) + foreach_declared_ptr(distinctClause, distinctClauses) { if (groupClause->tleSortGroupRef == distinctClause->tleSortGroupRef) { diff --git a/src/backend/distributed/planner/multi_logical_planner.c b/src/backend/distributed/planner/multi_logical_planner.c index 5201195c715..b7bee3aadcb 100644 --- a/src/backend/distributed/planner/multi_logical_planner.c +++ b/src/backend/distributed/planner/multi_logical_planner.c @@ -1414,7 +1414,7 @@ IsJoinClause(Node *clause) } Var *initialVar = castNode(Var, linitial(varList)); - foreach_ptr(var, varList) + foreach_declared_ptr(var, varList) { if (var->varno != initialVar->varno) { diff --git a/src/backend/distributed/planner/multi_physical_planner.c b/src/backend/distributed/planner/multi_physical_planner.c index fb7f844c7b4..dee3464cf14 100644 --- a/src/backend/distributed/planner/multi_physical_planner.c +++ b/src/backend/distributed/planner/multi_physical_planner.c @@ -547,7 +547,7 @@ BuildJobQuery(MultiNode *multiNode, List *dependentJobList) List *sortClauseList = NIL; Node *limitCount = NULL; Node *limitOffset = NULL; - LimitOption limitOption = LIMIT_OPTION_DEFAULT; + LimitOption limitOption = LIMIT_OPTION_COUNT; Node *havingQual = NULL; bool hasDistinctOn = false; List *distinctClause = NIL; @@ -895,7 +895,7 @@ WrapUngroupedVarsInAnyValueAggregate(Node *expression, List *groupClauseList, * subexpression equality check. */ TargetEntry *targetEntry = NULL; - foreach_ptr(targetEntry, context.groupByTargetEntryList) + foreach_declared_ptr(targetEntry, context.groupByTargetEntryList) { if (!IsA(targetEntry->expr, Var)) { @@ -953,7 +953,7 @@ AddAnyValueAggregates(Node *node, AddAnyValueAggregatesContext *context) * Check whether this Var appears in the GROUP BY. */ TargetEntry *groupByTargetEntry = NULL; - foreach_ptr(groupByTargetEntry, context->groupByTargetEntryList) + foreach_declared_ptr(groupByTargetEntry, context->groupByTargetEntryList) { if (!IsA(groupByTargetEntry->expr, Var)) { @@ -996,7 +996,7 @@ AddAnyValueAggregates(Node *node, AddAnyValueAggregatesContext *context) * Otherwise, continue to descend into subexpressions. */ TargetEntry *groupByTargetEntry = NULL; - foreach_ptr(groupByTargetEntry, context->groupByTargetEntryList) + foreach_declared_ptr(groupByTargetEntry, context->groupByTargetEntryList) { if (equal(node, groupByTargetEntry->expr)) { @@ -1192,7 +1192,7 @@ QueryJoinTree(MultiNode *multiNode, List *dependentJobList, List **rangeTableLis List *funcCollations = NIL; TargetEntry *targetEntry = NULL; - foreach_ptr(targetEntry, dependentTargetList) + foreach_declared_ptr(targetEntry, dependentTargetList) { Node *expr = (Node *) targetEntry->expr; @@ -2237,7 +2237,7 @@ QueryPushdownSqlTaskList(Query *query, uint64 jobId, } ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, prunedShardList) + foreach_declared_ptr(shardInterval, prunedShardList) { int shardIndex = shardInterval->shardIndex; @@ -2305,7 +2305,7 @@ static bool IsInnerTableOfOuterJoin(RelationRestriction *relationRestriction) { RestrictInfo *joinInfo = NULL; - foreach_ptr(joinInfo, relationRestriction->relOptInfo->joininfo) + foreach_declared_ptr(joinInfo, relationRestriction->relOptInfo->joininfo) { if (joinInfo->outer_relids == NULL) { @@ -3473,7 +3473,7 @@ FetchEqualityAttrNumsForList(List *nodeList) List *attributeNums = NIL; Node *node = NULL; bool hasAtLeastOneEquality = false; - foreach_ptr(node, nodeList) + foreach_declared_ptr(node, nodeList) { List *fetchedEqualityAttrNums = FetchEqualityAttrNumsForRTE(node); @@ -3531,7 +3531,7 @@ FetchEqualityAttrNumsForRTEBoolExpr(BoolExpr *boolExpr) List *attributeNums = NIL; bool hasEquality = true; Node *arg = NULL; - foreach_ptr(arg, boolExpr->args) + foreach_declared_ptr(arg, boolExpr->args) { List *attributeNumsInSubExpression = FetchEqualityAttrNumsForRTE(arg); if (boolExpr->boolop == AND_EXPR) @@ -3622,7 +3622,7 @@ JoinSequenceArray(List *rangeTableFragmentsList, Query *jobQuery, List *dependen * tables and this new one. */ Node *nextJoinClause = NULL; - foreach_ptr(nextJoinClause, nextJoinClauseList) + foreach_declared_ptr(nextJoinClause, nextJoinClauseList) { if (!NodeIsEqualsOpExpr(nextJoinClause)) { @@ -4183,7 +4183,7 @@ FetchTaskResultNameList(List *mapOutputFetchTaskList) List *resultNameList = NIL; Task *mapOutputFetchTask = NULL; - foreach_ptr(mapOutputFetchTask, mapOutputFetchTaskList) + foreach_declared_ptr(mapOutputFetchTask, mapOutputFetchTaskList) { Task *mapTask = linitial(mapOutputFetchTask->dependentTaskList); int partitionId = mapOutputFetchTask->partitionId; @@ -4344,7 +4344,7 @@ PartitionColumnIndex(Var *targetVar, List *targetList) { TargetEntry *targetEntry = NULL; int resNo = 1; - foreach_ptr(targetEntry, targetList) + foreach_declared_ptr(targetEntry, targetList) { if (IsA(targetEntry->expr, Var)) { @@ -4571,7 +4571,7 @@ RowModifyLevelForQuery(Query *query) { /* skip checking for INSERT as those CTEs are recursively planned */ CommonTableExpr *cte = NULL; - foreach_ptr(cte, query->cteList) + foreach_declared_ptr(cte, query->cteList) { Query *cteQuery = (Query *) cte->ctequery; diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c index 44f955a3227..96a946a34f2 100644 --- a/src/backend/distributed/planner/multi_router_planner.c +++ b/src/backend/distributed/planner/multi_router_planner.c @@ -851,7 +851,7 @@ ModifiesLocalTableWithRemoteCitusLocalTable(List *rangeTableList) bool containsRemoteCitusLocalTable = false; RangeTblEntry *rangeTableEntry = NULL; - foreach_ptr(rangeTableEntry, rangeTableList) + foreach_declared_ptr(rangeTableEntry, rangeTableList) { if (!IsRecursivelyPlannableRelation(rangeTableEntry)) { @@ -982,7 +982,7 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer ContainsLocalTableDistributedTableJoin(queryTree->rtable); RangeTblEntry *rangeTableEntry = NULL; - foreach_ptr(rangeTableEntry, rangeTableList) + foreach_declared_ptr(rangeTableEntry, rangeTableList) { if (rangeTableEntry->rtekind == RTE_RELATION) { @@ -1744,7 +1744,7 @@ RouterInsertTaskList(Query *query, bool parametersInQueryResolved, } ModifyRoute *modifyRoute = NULL; - foreach_ptr(modifyRoute, modifyRouteList) + foreach_declared_ptr(modifyRoute, modifyRouteList) { Task *modifyTask = CreateTask(MODIFY_TASK); modifyTask->anchorShardId = modifyRoute->shardId; @@ -2137,7 +2137,7 @@ SingleShardTaskList(Query *query, uint64 jobId, List *relationShardList, /* assume ErrorIfQueryHasUnroutableModifyingCTE checked query already */ CommonTableExpr *cte = NULL; - foreach_ptr(cte, query->cteList) + foreach_declared_ptr(cte, query->cteList) { Query *cteQuery = (Query *) cte->ctequery; @@ -2466,7 +2466,7 @@ AllShardsColocated(List *relationShardList) int colocationId = -1; CitusTableType tableType = ANY_CITUS_TABLE_TYPE; - foreach_ptr(relationShard, relationShardList) + foreach_declared_ptr(relationShard, relationShardList) { Oid relationId = relationShard->relationId; uint64 shardId = relationShard->shardId; @@ -2591,7 +2591,7 @@ CreateTaskPlacementListForShardIntervals(List *shardIntervalListList, bool shard * If there is a local table, we only allow the local placement to * be used. If there is none, we disallow the query. */ - foreach_ptr(taskPlacement, shardPlacementList) + foreach_declared_ptr(taskPlacement, shardPlacementList) { if (taskPlacement->groupId == GetLocalGroupId()) { @@ -3008,7 +3008,7 @@ PlacementsForWorkersContainingAllShards(List *shardIntervalListList) List *currentPlacementList = NIL; List *shardIntervalList = NIL; - foreach_ptr(shardIntervalList, shardIntervalListList) + foreach_declared_ptr(shardIntervalList, shardIntervalListList) { if (shardIntervalList == NIL) { @@ -3907,7 +3907,7 @@ ErrorIfQueryHasUnroutableModifyingCTE(Query *queryTree) char replicationModel = 0; CommonTableExpr *cte = NULL; - foreach_ptr(cte, queryTree->cteList) + foreach_declared_ptr(cte, queryTree->cteList) { Query *cteQuery = (Query *) cte->ctequery; diff --git a/src/backend/distributed/planner/query_pushdown_planning.c b/src/backend/distributed/planner/query_pushdown_planning.c index 2eda4e42a76..65de8680ccb 100644 --- a/src/backend/distributed/planner/query_pushdown_planning.c +++ b/src/backend/distributed/planner/query_pushdown_planning.c @@ -1187,7 +1187,7 @@ DeferErrorIfUnsupportedTableCombination(Query *queryTree) ExtractRangeTableIndexWalker((Node *) queryTree->jointree, &joinTreeTableIndexList); - foreach_int(joinTreeTableIndex, joinTreeTableIndexList) + foreach_declared_int(joinTreeTableIndex, joinTreeTableIndexList) { /* * Join tree's range table index starts from 1 in the query tree. But, @@ -2010,7 +2010,7 @@ CreateSubqueryTargetListAndAdjustVars(List *columnList) Var *column = NULL; List *subqueryTargetEntryList = NIL; - foreach_ptr(column, columnList) + foreach_declared_ptr(column, columnList) { /* * To avoid adding the same column multiple times, we first check whether there @@ -2064,7 +2064,7 @@ static AttrNumber FindResnoForVarInTargetList(List *targetList, int varno, int varattno) { TargetEntry *targetEntry = NULL; - foreach_ptr(targetEntry, targetList) + foreach_declared_ptr(targetEntry, targetList) { if (!IsA(targetEntry->expr, Var)) { @@ -2127,7 +2127,7 @@ PartitionColumnForPushedDownSubquery(Query *query) List *targetEntryList = query->targetList; TargetEntry *targetEntry = NULL; - foreach_ptr(targetEntry, targetEntryList) + foreach_declared_ptr(targetEntry, targetEntryList) { if (targetEntry->resjunk) { diff --git a/src/backend/distributed/planner/recursive_planning.c b/src/backend/distributed/planner/recursive_planning.c index 9f520fa5f51..9335b5ffc94 100644 --- a/src/backend/distributed/planner/recursive_planning.c +++ b/src/backend/distributed/planner/recursive_planning.c @@ -1736,7 +1736,7 @@ NodeContainsSubqueryReferencingOuterQuery(Node *node) ExtractSublinkWalker(node, &sublinks); SubLink *sublink; - foreach_ptr(sublink, sublinks) + foreach_declared_ptr(sublink, sublinks) { if (ContainsReferencesToOuterQuery(castNode(Query, sublink->subselect))) { @@ -1894,7 +1894,7 @@ GenerateRequiredColNamesFromTargetList(List *targetList) { TargetEntry *entry = NULL; List *innerSubqueryColNames = NIL; - foreach_ptr(entry, targetList) + foreach_declared_ptr(entry, targetList) { if (IsA(entry->expr, Var)) { @@ -1921,7 +1921,7 @@ UpdateVarNosInNode(Node *node, Index newVarNo) List *varList = pull_var_clause(node, PVC_RECURSE_AGGREGATES | PVC_RECURSE_PLACEHOLDERS); Var *var = NULL; - foreach_ptr(var, varList) + foreach_declared_ptr(var, varList) { var->varno = newVarNo; } @@ -1958,7 +1958,7 @@ ContainsLocalTableDistributedTableJoin(List *rangeTableList) bool containsDistributedTable = false; RangeTblEntry *rangeTableEntry = NULL; - foreach_ptr(rangeTableEntry, rangeTableList) + foreach_declared_ptr(rangeTableEntry, rangeTableList) { if (FindNodeMatchingCheckFunctionInRangeTableList(list_make1(rangeTableEntry), IsDistributedOrReferenceTableRTE)) diff --git a/src/backend/distributed/planner/relation_restriction_equivalence.c b/src/backend/distributed/planner/relation_restriction_equivalence.c index 83d7cbcdb77..89516640aa0 100644 --- a/src/backend/distributed/planner/relation_restriction_equivalence.c +++ b/src/backend/distributed/planner/relation_restriction_equivalence.c @@ -1516,7 +1516,7 @@ ParentCountPriorToAppendRel(List *appendRelList, AppendRelInfo *targetAppendRelI int targetParentIndex = targetAppendRelInfo->parent_relid; Bitmapset *parent_ids = NULL; AppendRelInfo *appendRelInfo = NULL; - foreach_ptr(appendRelInfo, appendRelList) + foreach_declared_ptr(appendRelInfo, appendRelList) { int curParentIndex = appendRelInfo->parent_relid; if (curParentIndex <= targetParentIndex) @@ -1962,7 +1962,7 @@ AllDistributedRelationsInRestrictionContextColocated( List *relationIdList = NIL; /* check whether all relations exists in the main restriction list */ - foreach_ptr(relationRestriction, restrictionContext->relationRestrictionList) + foreach_declared_ptr(relationRestriction, restrictionContext->relationRestrictionList) { relationIdList = lappend_oid(relationIdList, relationRestriction->relationId); } @@ -1981,7 +1981,7 @@ AllDistributedRelationsInRTEListColocated(List *rangeTableEntryList) RangeTblEntry *rangeTableEntry = NULL; List *relationIdList = NIL; - foreach_ptr(rangeTableEntry, rangeTableEntryList) + foreach_declared_ptr(rangeTableEntry, rangeTableEntryList) { relationIdList = lappend_oid(relationIdList, rangeTableEntry->relid); } @@ -2000,7 +2000,7 @@ AllDistributedRelationsInListColocated(List *relationList) int initialColocationId = INVALID_COLOCATION_ID; Oid relationId = InvalidOid; - foreach_oid(relationId, relationList) + foreach_declared_oid(relationId, relationList) { if (!IsCitusTable(relationId)) { @@ -2155,7 +2155,7 @@ GetRestrictInfoListForRelation(RangeTblEntry *rangeTblEntry, List *restrictExprList = NIL; RestrictInfo *restrictInfo = NULL; - foreach_ptr(restrictInfo, baseRestrictInfo) + foreach_declared_ptr(restrictInfo, baseRestrictInfo) { Expr *restrictionClause = restrictInfo->clause; @@ -2199,7 +2199,7 @@ GetRestrictInfoListForRelation(RangeTblEntry *rangeTblEntry, Expr *copyOfRestrictClause = (Expr *) copyObject((Node *) restrictionClause); List *varClauses = pull_var_clause_default((Node *) copyOfRestrictClause); Var *column = NULL; - foreach_ptr(column, varClauses) + foreach_declared_ptr(column, varClauses) { column->varno = SINGLE_RTE_INDEX; column->varnosyn = SINGLE_RTE_INDEX; diff --git a/src/backend/distributed/planner/shard_pruning.c b/src/backend/distributed/planner/shard_pruning.c index e68ac72b01c..2fd8ffdd625 100644 --- a/src/backend/distributed/planner/shard_pruning.c +++ b/src/backend/distributed/planner/shard_pruning.c @@ -1390,7 +1390,7 @@ DeepCopyShardIntervalList(List *originalShardIntervalList) List *copiedShardIntervalList = NIL; ShardInterval *originalShardInterval = NULL; - foreach_ptr(originalShardInterval, originalShardIntervalList) + foreach_declared_ptr(originalShardInterval, originalShardIntervalList) { ShardInterval *copiedShardInterval = CopyShardInterval(originalShardInterval); diff --git a/src/backend/distributed/progress/multi_progress.c b/src/backend/distributed/progress/multi_progress.c index 64e0a5b4762..5d1550ddda2 100644 --- a/src/backend/distributed/progress/multi_progress.c +++ b/src/backend/distributed/progress/multi_progress.c @@ -286,7 +286,7 @@ void DetachFromDSMSegments(List *dsmSegmentList) { dsm_segment *dsmSegment = NULL; - foreach_ptr(dsmSegment, dsmSegmentList) + foreach_declared_ptr(dsmSegment, dsmSegmentList) { dsm_detach(dsmSegment); } diff --git a/src/backend/distributed/relay/relay_event_utility.c b/src/backend/distributed/relay/relay_event_utility.c index d0267025bcc..630c783e544 100644 --- a/src/backend/distributed/relay/relay_event_utility.c +++ b/src/backend/distributed/relay/relay_event_utility.c @@ -150,7 +150,7 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) AppendShardIdToName(relationName, shardId); AlterTableCmd *command = NULL; - foreach_ptr(command, commandList) + foreach_declared_ptr(command, commandList) { if (command->subtype == AT_AddConstraint) { @@ -162,7 +162,7 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) { ColumnDef *columnDefinition = (ColumnDef *) command->def; Constraint *constraint = NULL; - foreach_ptr(constraint, columnDefinition->constraints) + foreach_declared_ptr(constraint, columnDefinition->constraints) { RelayEventExtendConstraintAndIndexNames(alterTableStmt, constraint, shardId); @@ -385,7 +385,7 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) { List *shardStatisticsList = NIL; List *objectNameList = NULL; - foreach_ptr(objectNameList, dropStmt->objects) + foreach_declared_ptr(objectNameList, dropStmt->objects) { RangeVar *stat = makeRangeVarFromNameList(objectNameList); @@ -415,7 +415,7 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) grantStmt->objtype == OBJECT_TABLE) { RangeVar *relation = NULL; - foreach_ptr(relation, grantStmt->objects) + foreach_declared_ptr(relation, grantStmt->objects) { char **relationName = &(relation->relname); char **relationSchemaName = &(relation->schemaname); @@ -673,7 +673,7 @@ RelayEventExtendNamesForInterShardCommands(Node *parseTree, uint64 leftShardId, List *commandList = alterTableStmt->cmds; AlterTableCmd *command = NULL; - foreach_ptr(command, commandList) + foreach_declared_ptr(command, commandList) { char **referencedTableName = NULL; char **relationSchemaName = NULL; @@ -693,7 +693,7 @@ RelayEventExtendNamesForInterShardCommands(Node *parseTree, uint64 leftShardId, List *columnConstraints = columnDefinition->constraints; Constraint *constraint = NULL; - foreach_ptr(constraint, columnConstraints) + foreach_declared_ptr(constraint, columnConstraints) { if (constraint->contype == CONSTR_FOREIGN) { diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index 08e6c557308..7189216d09e 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -282,7 +282,7 @@ CreateGroupedLogicalRepTargetsHash(List *logicalRepTargetList) { HTAB *logicalRepTargetsHash = CreateSimpleHash(uint32, GroupedLogicalRepTargets); LogicalRepTarget *target = NULL; - foreach_ptr(target, logicalRepTargetList) + foreach_declared_ptr(target, logicalRepTargetList) { bool found = false; GroupedLogicalRepTargets *groupedLogicalRepTargets = @@ -413,7 +413,7 @@ CreateShardMovePublicationInfoHash(WorkerNode *targetNode, List *shardIntervals) { HTAB *publicationInfoHash = CreateSimpleHash(NodeAndOwner, PublicationInfo); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervals) + foreach_declared_ptr(shardInterval, shardIntervals) { NodeAndOwner key; key.nodeId = targetNode->nodeId; @@ -474,7 +474,7 @@ CreateShardMoveLogicalRepTargetList(HTAB *publicationInfoHash, List *shardList) } ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardList) + foreach_declared_ptr(shardInterval, shardList) { NodeAndOwner key; key.nodeId = nodeId; @@ -552,7 +552,7 @@ void CreateReplicaIdentities(List *logicalRepTargetList) { LogicalRepTarget *target = NULL; - foreach_ptr(target, logicalRepTargetList) + foreach_declared_ptr(target, logicalRepTargetList) { MultiConnection *superuserConnection = target->superuserConnection; CreateReplicaIdentitiesOnNode( @@ -576,7 +576,7 @@ CreateReplicaIdentitiesOnNode(List *shardList, char *nodeName, int32 nodePort) MemoryContext oldContext = MemoryContextSwitchTo(localContext); ShardInterval *shardInterval; - foreach_ptr(shardInterval, shardList) + foreach_declared_ptr(shardInterval, shardList) { uint64 shardId = shardInterval->shardId; Oid relationId = shardInterval->relationId; @@ -725,10 +725,10 @@ ExecuteCreateIndexCommands(List *logicalRepTargetList) { List *taskList = NIL; LogicalRepTarget *target = NULL; - foreach_ptr(target, logicalRepTargetList) + foreach_declared_ptr(target, logicalRepTargetList) { ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, target->newShards) + foreach_declared_ptr(shardInterval, target->newShards) { Oid relationId = shardInterval->relationId; @@ -787,10 +787,10 @@ ExecuteCreateConstraintsBackedByIndexCommands(List *logicalRepTargetList) MemoryContext oldContext = MemoryContextSwitchTo(localContext); LogicalRepTarget *target = NULL; - foreach_ptr(target, logicalRepTargetList) + foreach_declared_ptr(target, logicalRepTargetList) { ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, target->newShards) + foreach_declared_ptr(shardInterval, target->newShards) { Oid relationId = shardInterval->relationId; @@ -873,10 +873,10 @@ ExecuteClusterOnCommands(List *logicalRepTargetList) { List *taskList = NIL; LogicalRepTarget *target = NULL; - foreach_ptr(target, logicalRepTargetList) + foreach_declared_ptr(target, logicalRepTargetList) { ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, target->newShards) + foreach_declared_ptr(shardInterval, target->newShards) { Oid relationId = shardInterval->relationId; @@ -925,10 +925,10 @@ ExecuteCreateIndexStatisticsCommands(List *logicalRepTargetList) MemoryContext oldContext = MemoryContextSwitchTo(localContext); LogicalRepTarget *target = NULL; - foreach_ptr(target, logicalRepTargetList) + foreach_declared_ptr(target, logicalRepTargetList) { ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, target->newShards) + foreach_declared_ptr(shardInterval, target->newShards) { Oid relationId = shardInterval->relationId; @@ -983,10 +983,10 @@ ExecuteRemainingPostLoadTableCommands(List *logicalRepTargetList) MemoryContext oldContext = MemoryContextSwitchTo(localContext); LogicalRepTarget *target = NULL; - foreach_ptr(target, logicalRepTargetList) + foreach_declared_ptr(target, logicalRepTargetList) { ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, target->newShards) + foreach_declared_ptr(shardInterval, target->newShards) { Oid relationId = shardInterval->relationId; @@ -1042,10 +1042,10 @@ CreatePartitioningHierarchy(List *logicalRepTargetList) MemoryContext oldContext = MemoryContextSwitchTo(localContext); LogicalRepTarget *target = NULL; - foreach_ptr(target, logicalRepTargetList) + foreach_declared_ptr(target, logicalRepTargetList) { ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, target->newShards) + foreach_declared_ptr(shardInterval, target->newShards) { if (PartitionTable(shardInterval->relationId)) { @@ -1100,14 +1100,14 @@ CreateUncheckedForeignKeyConstraints(List *logicalRepTargetList) * Iterate over all the shards in the shard group. */ LogicalRepTarget *target = NULL; - foreach_ptr(target, logicalRepTargetList) + foreach_declared_ptr(target, logicalRepTargetList) { ShardInterval *shardInterval = NULL; /* * Iterate on split shards list for a given shard and create constraints. */ - foreach_ptr(shardInterval, target->newShards) + foreach_declared_ptr(shardInterval, target->newShards) { List *commandList = CopyShardForeignConstraintCommandList( shardInterval); @@ -1320,7 +1320,7 @@ CreatePublications(MultiConnection *connection, quote_identifier(entry->name)); ShardInterval *shard = NULL; - foreach_ptr(shard, entry->shardIntervals) + foreach_declared_ptr(shard, entry->shardIntervals) { char *shardName = ConstructQualifiedShardName(shard); @@ -1429,7 +1429,7 @@ CreateReplicationSlots(MultiConnection *sourceConnection, ReplicationSlotInfo *firstReplicationSlot = NULL; char *snapshot = NULL; LogicalRepTarget *target = NULL; - foreach_ptr(target, logicalRepTargetList) + foreach_declared_ptr(target, logicalRepTargetList) { ReplicationSlotInfo *replicationSlot = target->replicationSlot; @@ -1481,7 +1481,7 @@ CreateSubscriptions(MultiConnection *sourceConnection, List *logicalRepTargetList) { LogicalRepTarget *target = NULL; - foreach_ptr(target, logicalRepTargetList) + foreach_declared_ptr(target, logicalRepTargetList) { int ownerId = target->tableOwnerId; @@ -1603,7 +1603,7 @@ void EnableSubscriptions(List *logicalRepTargetList) { LogicalRepTarget *target = NULL; - foreach_ptr(target, logicalRepTargetList) + foreach_declared_ptr(target, logicalRepTargetList) { ExecuteCriticalRemoteCommand(target->superuserConnection, psprintf( "ALTER SUBSCRIPTION %s ENABLE", @@ -1737,7 +1737,7 @@ CreateGroupedLogicalRepTargetsConnections(HTAB *groupedLogicalRepTargetsHash, groupedLogicalRepTargets->superuserConnection = superuserConnection; LogicalRepTarget *target = NULL; - foreach_ptr(target, groupedLogicalRepTargets->logicalRepTargetList) + foreach_declared_ptr(target, groupedLogicalRepTargets->logicalRepTargetList) { target->superuserConnection = superuserConnection; } @@ -1774,7 +1774,7 @@ SubscriptionNamesValueList(List *logicalRepTargetList) bool first = true; LogicalRepTarget *target = NULL; - foreach_ptr(target, logicalRepTargetList) + foreach_declared_ptr(target, logicalRepTargetList) { if (!first) { diff --git a/src/backend/distributed/shardsplit/shardsplit_decoder.c b/src/backend/distributed/shardsplit/shardsplit_decoder.c index f14f105576c..20dd01b0c76 100644 --- a/src/backend/distributed/shardsplit/shardsplit_decoder.c +++ b/src/backend/distributed/shardsplit/shardsplit_decoder.c @@ -14,6 +14,8 @@ #include "utils/lsyscache.h" #include "utils/typcache.h" +#include "pg_version_constants.h" + #include "distributed/listutils.h" #include "distributed/metadata/distobject.h" #include "distributed/shardinterval_utils.h" @@ -180,6 +182,43 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, } Oid targetRelationOid = InvalidOid; + +#if PG_VERSION_NUM >= PG_VERSION_17 + switch (change->action) + { + case REORDER_BUFFER_CHANGE_INSERT: + { + HeapTuple newTuple = change->data.tp.newtuple; + targetRelationOid = FindTargetRelationOid(relation, newTuple, + replicationSlotName); + break; + } + + /* updating non-partition column value */ + case REORDER_BUFFER_CHANGE_UPDATE: + { + HeapTuple newTuple = change->data.tp.newtuple; + targetRelationOid = FindTargetRelationOid(relation, newTuple, + replicationSlotName); + break; + } + + case REORDER_BUFFER_CHANGE_DELETE: + { + HeapTuple oldTuple = change->data.tp.oldtuple; + targetRelationOid = FindTargetRelationOid(relation, oldTuple, + replicationSlotName); + + break; + } + + /* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */ + default: + ereport(ERROR, errmsg( + "Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE", + change->action)); + } +#else switch (change->action) { case REORDER_BUFFER_CHANGE_INSERT: @@ -214,6 +253,7 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, "Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE", change->action)); } +#endif /* Current replication slot is not responsible for handling the change */ if (targetRelationOid == InvalidOid) @@ -231,6 +271,62 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, TupleDesc targetRelationDesc = RelationGetDescr(targetRelation); if (sourceRelationDesc->natts > targetRelationDesc->natts) { +#if PG_VERSION_NUM >= PG_VERSION_17 + switch (change->action) + { + case REORDER_BUFFER_CHANGE_INSERT: + { + HeapTuple sourceRelationNewTuple = change->data.tp.newtuple; + HeapTuple targetRelationNewTuple = GetTupleForTargetSchema( + sourceRelationNewTuple, sourceRelationDesc, targetRelationDesc); + + change->data.tp.newtuple = targetRelationNewTuple; + break; + } + + case REORDER_BUFFER_CHANGE_UPDATE: + { + HeapTuple sourceRelationNewTuple = change->data.tp.newtuple; + HeapTuple targetRelationNewTuple = GetTupleForTargetSchema( + sourceRelationNewTuple, sourceRelationDesc, targetRelationDesc); + + change->data.tp.newtuple = targetRelationNewTuple; + + /* + * Format oldtuple according to the target relation. If the column values of replica + * identiy change, then the old tuple is non-null and needs to be formatted according + * to the target relation schema. + */ + if (change->data.tp.oldtuple != NULL) + { + HeapTuple sourceRelationOldTuple = change->data.tp.oldtuple; + HeapTuple targetRelationOldTuple = GetTupleForTargetSchema( + sourceRelationOldTuple, + sourceRelationDesc, + targetRelationDesc); + + change->data.tp.oldtuple = targetRelationOldTuple; + } + break; + } + + case REORDER_BUFFER_CHANGE_DELETE: + { + HeapTuple sourceRelationOldTuple = change->data.tp.oldtuple; + HeapTuple targetRelationOldTuple = GetTupleForTargetSchema( + sourceRelationOldTuple, sourceRelationDesc, targetRelationDesc); + + change->data.tp.oldtuple = targetRelationOldTuple; + break; + } + + /* Only INSERT/DELETE/UPDATE actions are visible in the replication path of split shard */ + default: + ereport(ERROR, errmsg( + "Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE", + change->action)); + } +#else switch (change->action) { case REORDER_BUFFER_CHANGE_INSERT: @@ -285,6 +381,7 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, "Unexpected Action :%d. Expected action is INSERT/DELETE/UPDATE", change->action)); } +#endif } pgOutputPluginChangeCB(ctx, txn, targetRelation, change); @@ -337,7 +434,7 @@ FindTargetRelationOid(Relation sourceShardRelation, shardSplitInfo->distributedTableOid); shardSplitInfo = NULL; - foreach_ptr(shardSplitInfo, entry->shardSplitInfoList) + foreach_declared_ptr(shardSplitInfo, entry->shardSplitInfoList) { if (shardSplitInfo->shardMinValue <= hashValue && shardSplitInfo->shardMaxValue >= hashValue) diff --git a/src/backend/distributed/shardsplit/shardsplit_logical_replication.c b/src/backend/distributed/shardsplit/shardsplit_logical_replication.c index 328dc9af946..a18135372cb 100644 --- a/src/backend/distributed/shardsplit/shardsplit_logical_replication.c +++ b/src/backend/distributed/shardsplit/shardsplit_logical_replication.c @@ -154,7 +154,7 @@ AddPublishableShardEntryInMap(uint32 targetNodeId, ShardInterval *shardInterval, /* Check if parent is already added */ ShardInterval *existingShardInterval = NULL; - foreach_ptr(existingShardInterval, publicationInfo->shardIntervals) + foreach_declared_ptr(existingShardInterval, publicationInfo->shardIntervals) { if (existingShardInterval->shardId == shardInterval->shardId) { @@ -204,7 +204,7 @@ PopulateShardSplitSubscriptionsMetadataList(HTAB *shardSplitInfoHashMap, } List *shardIntervalList = NIL; - foreach_ptr(shardIntervalList, shardGroupSplitIntervalListList) + foreach_declared_ptr(shardIntervalList, shardGroupSplitIntervalListList) { ShardInterval *shardInterval = NULL; WorkerNode *workerPlacementNode = NULL; @@ -256,7 +256,7 @@ CreateLogicalRepTarget(Oid tableOwnerId, uint32 nodeId, * table owner and node. */ ReplicationSlotInfo *replicationSlot = NULL; - foreach_ptr(replicationSlot, replicationSlotInfoList) + foreach_declared_ptr(replicationSlot, replicationSlotInfoList) { if (nodeId == replicationSlot->targetNodeId && tableOwnerId == replicationSlot->tableOwnerId) diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index bd65fa60c01..16b21e323ea 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -2842,7 +2842,7 @@ ShowShardsForAppNamePrefixesCheckHook(char **newval, void **extra, GucSource sou } char *appNamePrefix = NULL; - foreach_ptr(appNamePrefix, prefixList) + foreach_declared_ptr(appNamePrefix, prefixList) { int prefixLength = strlen(appNamePrefix); if (prefixLength >= NAMEDATALEN) diff --git a/src/backend/distributed/test/colocation_utils.c b/src/backend/distributed/test/colocation_utils.c index 6a87539c46b..d1a738b80ad 100644 --- a/src/backend/distributed/test/colocation_utils.c +++ b/src/backend/distributed/test/colocation_utils.c @@ -93,7 +93,7 @@ get_colocated_table_array(PG_FUNCTION_ARGS) int colocatedTableIndex = 0; Oid colocatedTableId = InvalidOid; - foreach_oid(colocatedTableId, colocatedTableList) + foreach_declared_oid(colocatedTableId, colocatedTableList) { Datum colocatedTableDatum = ObjectIdGetDatum(colocatedTableId); diff --git a/src/backend/distributed/test/create_shards.c b/src/backend/distributed/test/create_shards.c index 4ef13f1cb78..d92a7605932 100644 --- a/src/backend/distributed/test/create_shards.c +++ b/src/backend/distributed/test/create_shards.c @@ -46,7 +46,7 @@ sort_names(PG_FUNCTION_ARGS) StringInfo sortedNames = makeStringInfo(); const char *name = NULL; - foreach_ptr(name, nameList) + foreach_declared_ptr(name, nameList) { appendStringInfo(sortedNames, "%s\n", name); } diff --git a/src/backend/distributed/test/deparse_shard_query.c b/src/backend/distributed/test/deparse_shard_query.c index a9b4ced1da4..1af5945cfd0 100644 --- a/src/backend/distributed/test/deparse_shard_query.c +++ b/src/backend/distributed/test/deparse_shard_query.c @@ -49,14 +49,14 @@ deparse_shard_query_test(PG_FUNCTION_ARGS) List *parseTreeList = pg_parse_query(queryStringChar); Node *parsetree = NULL; - foreach_ptr(parsetree, parseTreeList) + foreach_declared_ptr(parsetree, parseTreeList) { List *queryTreeList = pg_analyze_and_rewrite_fixedparams((RawStmt *) parsetree, queryStringChar, NULL, 0, NULL); Query *query = NULL; - foreach_ptr(query, queryTreeList) + foreach_declared_ptr(query, queryTreeList) { StringInfo buffer = makeStringInfo(); diff --git a/src/backend/distributed/test/dependency.c b/src/backend/distributed/test/dependency.c index 7afbfdec732..25a7ae6e4f7 100644 --- a/src/backend/distributed/test/dependency.c +++ b/src/backend/distributed/test/dependency.c @@ -50,7 +50,7 @@ citus_get_all_dependencies_for_object(PG_FUNCTION_ARGS) List *dependencies = GetAllSupportedDependenciesForObject(&address); ObjectAddress *dependency = NULL; - foreach_ptr(dependency, dependencies) + foreach_declared_ptr(dependency, dependencies) { Datum values[3]; bool isNulls[3]; @@ -95,7 +95,7 @@ citus_get_dependencies_for_object(PG_FUNCTION_ARGS) List *dependencies = GetDependenciesForObject(&address); ObjectAddress *dependency = NULL; - foreach_ptr(dependency, dependencies) + foreach_declared_ptr(dependency, dependencies) { Datum values[3]; bool isNulls[3]; diff --git a/src/backend/distributed/test/distributed_intermediate_results.c b/src/backend/distributed/test/distributed_intermediate_results.c index 843bda476aa..adbcbff89dd 100644 --- a/src/backend/distributed/test/distributed_intermediate_results.c +++ b/src/backend/distributed/test/distributed_intermediate_results.c @@ -90,7 +90,7 @@ partition_task_list_results(PG_FUNCTION_ARGS) Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor); DistributedResultFragment *fragment = NULL; - foreach_ptr(fragment, fragmentList) + foreach_declared_ptr(fragment, fragmentList) { bool columnNulls[5] = { 0 }; Datum columnValues[5] = { @@ -169,7 +169,7 @@ redistribute_task_list_results(PG_FUNCTION_ARGS) const char *resultId = NULL; int resultIdIndex = 0; - foreach_ptr(resultId, sortedResultIds) + foreach_declared_ptr(resultId, sortedResultIds) { resultIdValues[resultIdIndex++] = CStringGetTextDatum(resultId); } diff --git a/src/backend/distributed/test/distribution_metadata.c b/src/backend/distributed/test/distribution_metadata.c index 01117922e36..e75d3110bcf 100644 --- a/src/backend/distributed/test/distribution_metadata.c +++ b/src/backend/distributed/test/distribution_metadata.c @@ -74,7 +74,7 @@ load_shard_id_array(PG_FUNCTION_ARGS) Datum *shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum)); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardList) + foreach_declared_ptr(shardInterval, shardList) { Datum shardIdDatum = Int64GetDatum(shardInterval->shardId); @@ -144,7 +144,7 @@ load_shard_placement_array(PG_FUNCTION_ARGS) Datum *placementDatumArray = palloc0(placementCount * sizeof(Datum)); ShardPlacement *placement = NULL; - foreach_ptr(placement, placementList) + foreach_declared_ptr(placement, placementList) { appendStringInfo(placementInfo, "%s:%d", placement->nodeName, placement->nodePort); @@ -263,14 +263,14 @@ relation_count_in_query(PG_FUNCTION_ARGS) List *parseTreeList = pg_parse_query(queryStringChar); Node *parsetree = NULL; - foreach_ptr(parsetree, parseTreeList) + foreach_declared_ptr(parsetree, parseTreeList) { List *queryTreeList = pg_analyze_and_rewrite_fixedparams((RawStmt *) parsetree, queryStringChar, NULL, 0, NULL); Query *query = NULL; - foreach_ptr(query, queryTreeList) + foreach_declared_ptr(query, queryTreeList) { List *rangeTableList = NIL; diff --git a/src/backend/distributed/test/fake_am.c b/src/backend/distributed/test/fake_am.c index cff124961ac..92805194242 100644 --- a/src/backend/distributed/test/fake_am.c +++ b/src/backend/distributed/test/fake_am.c @@ -372,8 +372,13 @@ fake_vacuum(Relation onerel, VacuumParams *params, static bool -fake_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno, +fake_scan_analyze_next_block(TableScanDesc scan, +#if PG_VERSION_NUM >= PG_VERSION_17 + ReadStream *stream) +#else + BlockNumber blockno, BufferAccessStrategy bstrategy) +#endif { /* we don't support analyze, so return false */ return false; diff --git a/src/backend/distributed/test/fake_fdw.c b/src/backend/distributed/test/fake_fdw.c index 585e61d4108..90b205b1ea1 100644 --- a/src/backend/distributed/test/fake_fdw.c +++ b/src/backend/distributed/test/fake_fdw.c @@ -29,7 +29,7 @@ #include "optimizer/restrictinfo.h" #include "utils/palloc.h" -#include "pg_version_constants.h" +#include "pg_version_compat.h" /* local function forward declarations */ static void FakeGetForeignRelSize(PlannerInfo *root, RelOptInfo *baserel, @@ -91,9 +91,11 @@ FakeGetForeignPaths(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid) Cost startup_cost = 0; Cost total_cost = startup_cost + baserel->rows; - add_path(baserel, (Path *) create_foreignscan_path(root, baserel, NULL, baserel->rows, - startup_cost, total_cost, NIL, - NULL, NULL, NIL)); + add_path(baserel, (Path *) create_foreignscan_path_compat(root, baserel, NULL, + baserel->rows, + startup_cost, total_cost, + NIL, + NULL, NULL, NIL, NIL)); } diff --git a/src/backend/distributed/test/foreign_key_relationship_query.c b/src/backend/distributed/test/foreign_key_relationship_query.c index af187111a70..8f96f5e3a82 100644 --- a/src/backend/distributed/test/foreign_key_relationship_query.c +++ b/src/backend/distributed/test/foreign_key_relationship_query.c @@ -205,7 +205,7 @@ get_foreign_key_connected_relations(PG_FUNCTION_ARGS) Oid connectedRelationId; List *fkeyConnectedRelationIdList = GetForeignKeyConnectedRelationIdList(relationId); - foreach_oid(connectedRelationId, fkeyConnectedRelationIdList) + foreach_declared_oid(connectedRelationId, fkeyConnectedRelationIdList) { Datum values[GET_FKEY_CONNECTED_RELATIONS_COLUMNS]; bool nulls[GET_FKEY_CONNECTED_RELATIONS_COLUMNS]; diff --git a/src/backend/distributed/test/metadata_sync.c b/src/backend/distributed/test/metadata_sync.c index ce025cff9bd..449ae31e84f 100644 --- a/src/backend/distributed/test/metadata_sync.c +++ b/src/backend/distributed/test/metadata_sync.c @@ -80,7 +80,7 @@ activate_node_snapshot(PG_FUNCTION_ARGS) sizeof(Datum)); const char *activateNodeSnapshotCommand = NULL; - foreach_ptr(activateNodeSnapshotCommand, activateNodeCommandList) + foreach_declared_ptr(activateNodeSnapshotCommand, activateNodeCommandList) { Datum activateNodeSnapshotCommandDatum = CStringGetTextDatum( activateNodeSnapshotCommand); @@ -109,7 +109,7 @@ IsMetadataSynced(void) List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerList) + foreach_declared_ptr(workerNode, workerList) { if (workerNode->hasMetadata && !workerNode->metadataSynced) { diff --git a/src/backend/distributed/test/partitioning_utils.c b/src/backend/distributed/test/partitioning_utils.c index be916356145..f1e186bada9 100644 --- a/src/backend/distributed/test/partitioning_utils.c +++ b/src/backend/distributed/test/partitioning_utils.c @@ -85,7 +85,7 @@ print_partitions(PG_FUNCTION_ARGS) partitionList = SortList(partitionList, CompareOids); Oid partitionOid = InvalidOid; - foreach_oid(partitionOid, partitionList) + foreach_declared_oid(partitionOid, partitionList) { /* at least one table is already added, add comma */ if (resultRelationNames->len > 0) diff --git a/src/backend/distributed/test/progress_utils.c b/src/backend/distributed/test/progress_utils.c index e1ea09e3d2f..7c335ce8ae6 100644 --- a/src/backend/distributed/test/progress_utils.c +++ b/src/backend/distributed/test/progress_utils.c @@ -95,7 +95,7 @@ show_progress(PG_FUNCTION_ARGS) Tuplestorestate *tupstore = SetupTuplestore(fcinfo, &tupdesc); ProgressMonitorData *monitor = NULL; - foreach_ptr(monitor, monitorList) + foreach_declared_ptr(monitor, monitorList) { uint64 *steps = ProgressMonitorSteps(monitor); diff --git a/src/backend/distributed/test/prune_shard_list.c b/src/backend/distributed/test/prune_shard_list.c index f972281ecc6..f5bb9c97995 100644 --- a/src/backend/distributed/test/prune_shard_list.c +++ b/src/backend/distributed/test/prune_shard_list.c @@ -224,7 +224,7 @@ PrunedShardIdsForTable(Oid distributedTableId, List *whereClauseList) Datum *shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum)); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardList) + foreach_declared_ptr(shardInterval, shardList) { Datum shardIdDatum = Int64GetDatum(shardInterval->shardId); diff --git a/src/backend/distributed/test/shard_rebalancer.c b/src/backend/distributed/test/shard_rebalancer.c index 32bfd9f463c..1b79fc27af6 100644 --- a/src/backend/distributed/test/shard_rebalancer.c +++ b/src/backend/distributed/test/shard_rebalancer.c @@ -128,13 +128,13 @@ shard_placement_rebalance_array(PG_FUNCTION_ARGS) pfree(shardPlacementJsonArray); /* map workerTestInfoList to a list of its WorkerNodes */ - foreach_ptr(workerTestInfo, context.workerTestInfoList) + foreach_declared_ptr(workerTestInfo, context.workerTestInfoList) { workerNodeList = lappend(workerNodeList, workerTestInfo->node); } /* map shardPlacementTestInfoList to a list of list of its ShardPlacements */ - foreach_ptr(shardPlacementTestInfo, context.shardPlacementTestInfoList) + foreach_declared_ptr(shardPlacementTestInfo, context.shardPlacementTestInfoList) { if (shardPlacementTestInfo->nextColocationGroup) { @@ -197,7 +197,7 @@ ShardAllowedOnNode(uint64 shardId, WorkerNode *workerNode, void *voidContext) RebalancePlacementContext *context = voidContext; WorkerTestInfo *workerTestInfo = NULL; uint64 *disallowedShardIdPtr = NULL; - foreach_ptr(workerTestInfo, context->workerTestInfoList) + foreach_declared_ptr(workerTestInfo, context->workerTestInfoList) { if (workerTestInfo->node == workerNode) { @@ -206,7 +206,7 @@ ShardAllowedOnNode(uint64 shardId, WorkerNode *workerNode, void *voidContext) } Assert(workerTestInfo != NULL); - foreach_ptr(disallowedShardIdPtr, workerTestInfo->disallowedShardIds) + foreach_declared_ptr(disallowedShardIdPtr, workerTestInfo->disallowedShardIds) { if (shardId == *disallowedShardIdPtr) { @@ -226,7 +226,7 @@ NodeCapacity(WorkerNode *workerNode, void *voidContext) { RebalancePlacementContext *context = voidContext; WorkerTestInfo *workerTestInfo = NULL; - foreach_ptr(workerTestInfo, context->workerTestInfoList) + foreach_declared_ptr(workerTestInfo, context->workerTestInfoList) { if (workerTestInfo->node == workerNode) { @@ -251,7 +251,7 @@ GetShardCost(uint64 shardId, void *voidContext) shardCost.shardId = shardId; ShardPlacementTestInfo *shardPlacementTestInfo = NULL; - foreach_ptr(shardPlacementTestInfo, context->shardPlacementTestInfoList) + foreach_declared_ptr(shardPlacementTestInfo, context->shardPlacementTestInfoList) { if (shardPlacementTestInfo->placement->shardId == shardId) { @@ -300,12 +300,12 @@ shard_placement_replication_array(PG_FUNCTION_ARGS) pfree(workerNodeJsonArray); pfree(shardPlacementJsonArray); - foreach_ptr(workerTestInfo, workerTestInfoList) + foreach_declared_ptr(workerTestInfo, workerTestInfoList) { workerNodeList = lappend(workerNodeList, workerTestInfo->node); } - foreach_ptr(shardPlacementTestInfo, shardPlacementTestInfoList) + foreach_declared_ptr(shardPlacementTestInfo, shardPlacementTestInfoList) { shardPlacementList = lappend(shardPlacementList, shardPlacementTestInfo->placement); diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index 67acadd2940..e2afd18f74a 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -33,7 +33,7 @@ #include "storage/spin.h" #include "utils/timestamp.h" -#include "pg_version_constants.h" +#include "pg_version_compat.h" #include "distributed/backend_data.h" #include "distributed/connection_management.h" @@ -267,7 +267,7 @@ get_global_active_transactions(PG_FUNCTION_ARGS) /* open connections in parallel */ WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { const char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; @@ -289,7 +289,7 @@ get_global_active_transactions(PG_FUNCTION_ARGS) /* send commands in parallel */ MultiConnection *connection = NULL; - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { int querySent = SendRemoteCommand(connection, queryToSend->data); if (querySent == 0) @@ -299,7 +299,7 @@ get_global_active_transactions(PG_FUNCTION_ARGS) } /* receive query results */ - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { bool raiseInterrupts = true; Datum values[ACTIVE_TRANSACTION_COLUMN_COUNT]; @@ -700,7 +700,7 @@ InitializeBackendData(const char *applicationName) uint64 gpid = ExtractGlobalPID(applicationName); - MyBackendData = &backendManagementShmemData->backends[MyProc->pgprocno]; + MyBackendData = &backendManagementShmemData->backends[getProcNo_compat(MyProc)]; Assert(MyBackendData); @@ -1174,11 +1174,11 @@ CurrentDistributedTransactionNumber(void) void GetBackendDataForProc(PGPROC *proc, BackendData *result) { - int pgprocno = proc->pgprocno; + int pgprocno = getProcNo_compat(proc); if (proc->lockGroupLeader != NULL) { - pgprocno = proc->lockGroupLeader->pgprocno; + pgprocno = getProcNo_compat(proc->lockGroupLeader); } BackendData *backendData = &backendManagementShmemData->backends[pgprocno]; @@ -1198,7 +1198,8 @@ GetBackendDataForProc(PGPROC *proc, BackendData *result) void CancelTransactionDueToDeadlock(PGPROC *proc) { - BackendData *backendData = &backendManagementShmemData->backends[proc->pgprocno]; + BackendData *backendData = &backendManagementShmemData->backends[getProcNo_compat( + proc)]; /* backend might not have used citus yet and thus not initialized backend data */ if (!backendData) @@ -1330,7 +1331,7 @@ ActiveDistributedTransactionNumbers(void) LocalTransactionId GetMyProcLocalTransactionId(void) { - return MyProc->lxid; + return getLxid_compat(MyProc); } diff --git a/src/backend/distributed/transaction/distributed_deadlock_detection.c b/src/backend/distributed/transaction/distributed_deadlock_detection.c index 5e8060a4f63..30b42302875 100644 --- a/src/backend/distributed/transaction/distributed_deadlock_detection.c +++ b/src/backend/distributed/transaction/distributed_deadlock_detection.c @@ -177,7 +177,7 @@ CheckForDistributedDeadlocks(void) * this node. */ TransactionNode *currentNode = NULL; - foreach_ptr(currentNode, deadlockPath) + foreach_declared_ptr(currentNode, deadlockPath) { bool transactionAssociatedWithProc = AssociateDistributedTransactionWithBackendProc(currentNode); @@ -305,7 +305,7 @@ PrependOutgoingNodesToQueue(TransactionNode *transactionNode, int currentStackDe /* prepend to the list to continue depth-first search */ TransactionNode *waitForTransaction = NULL; - foreach_ptr(waitForTransaction, transactionNode->waitsFor) + foreach_declared_ptr(waitForTransaction, transactionNode->waitsFor) { QueuedTransactionNode *queuedNode = palloc0(sizeof(QueuedTransactionNode)); @@ -672,7 +672,7 @@ WaitsForToString(List *waitsFor) StringInfo transactionIdStr = makeStringInfo(); TransactionNode *waitingNode = NULL; - foreach_ptr(waitingNode, waitsFor) + foreach_declared_ptr(waitingNode, waitsFor) { if (transactionIdStr->len != 0) { diff --git a/src/backend/distributed/transaction/lock_graph.c b/src/backend/distributed/transaction/lock_graph.c index 695df2bf4c2..11982ec5a30 100644 --- a/src/backend/distributed/transaction/lock_graph.c +++ b/src/backend/distributed/transaction/lock_graph.c @@ -23,6 +23,8 @@ #include "utils/hsearch.h" #include "utils/timestamp.h" +#include "pg_version_compat.h" + #include "distributed/backend_data.h" #include "distributed/connection_management.h" #include "distributed/hash_helpers.h" @@ -149,7 +151,7 @@ BuildGlobalWaitGraph(bool onlyDistributedTx) /* open connections in parallel */ WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { const char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; @@ -172,7 +174,7 @@ BuildGlobalWaitGraph(bool onlyDistributedTx) /* send commands in parallel */ MultiConnection *connection = NULL; - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { StringInfo queryString = makeStringInfo(); @@ -203,7 +205,7 @@ BuildGlobalWaitGraph(bool onlyDistributedTx) } /* receive dump_local_wait_edges results */ - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { bool raiseInterrupts = true; @@ -993,7 +995,7 @@ AllocWaitEdge(WaitGraph *waitGraph) static void AddProcToVisit(PROCStack *remaining, PGPROC *proc) { - if (remaining->procAdded[proc->pgprocno]) + if (remaining->procAdded[getProcNo_compat(proc)]) { return; } @@ -1001,7 +1003,7 @@ AddProcToVisit(PROCStack *remaining, PGPROC *proc) Assert(remaining->procCount < TotalProcCount()); remaining->procs[remaining->procCount++] = proc; - remaining->procAdded[proc->pgprocno] = true; + remaining->procAdded[getProcNo_compat(proc)] = true; } diff --git a/src/backend/distributed/transaction/relation_access_tracking.c b/src/backend/distributed/transaction/relation_access_tracking.c index 5044941c471..0ffa68d9566 100644 --- a/src/backend/distributed/transaction/relation_access_tracking.c +++ b/src/backend/distributed/transaction/relation_access_tracking.c @@ -367,7 +367,7 @@ RecordRelationParallelSelectAccessForTask(Task *task) List *relationShardList = task->relationShardList; RelationShard *relationShard = NULL; - foreach_ptr(relationShard, relationShardList) + foreach_declared_ptr(relationShard, relationShardList) { Oid currentRelationId = relationShard->relationId; @@ -412,7 +412,7 @@ RecordRelationParallelModifyAccessForTask(Task *task) { relationShardList = task->relationShardList; RelationShard *relationShard = NULL; - foreach_ptr(relationShard, relationShardList) + foreach_declared_ptr(relationShard, relationShardList) { Oid currentRelationId = relationShard->relationId; @@ -446,7 +446,7 @@ RecordRelationParallelDDLAccessForTask(Task *task) Oid lastRelationId = InvalidOid; RelationShard *relationShard = NULL; - foreach_ptr(relationShard, relationShardList) + foreach_declared_ptr(relationShard, relationShardList) { Oid currentRelationId = relationShard->relationId; @@ -534,7 +534,7 @@ RecordParallelRelationAccess(Oid relationId, ShardPlacementAccessType placementA List *partitionList = PartitionList(relationId); Oid partitionOid = InvalidOid; - foreach_oid(partitionOid, partitionList) + foreach_declared_oid(partitionOid, partitionList) { /* recursively record all relation accesses of its partitions */ RecordParallelRelationAccess(partitionOid, placementAccess); @@ -926,7 +926,7 @@ HoldsConflictingLockWithReferencedRelations(Oid relationId, ShardPlacementAccess CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(relationId); Oid referencedRelation = InvalidOid; - foreach_oid(referencedRelation, cacheEntry->referencedRelationsViaForeignKey) + foreach_declared_oid(referencedRelation, cacheEntry->referencedRelationsViaForeignKey) { /* * We're only interested in foreign keys to reference tables and citus @@ -997,7 +997,8 @@ HoldsConflictingLockWithReferencingRelations(Oid relationId, ShardPlacementAcces Assert(!IsCitusTableTypeCacheEntry(cacheEntry, DISTRIBUTED_TABLE)); Oid referencingRelation = InvalidOid; - foreach_oid(referencingRelation, cacheEntry->referencingRelationsViaForeignKey) + foreach_declared_oid(referencingRelation, + cacheEntry->referencingRelationsViaForeignKey) { /* * We're only interested in foreign keys to reference tables from diff --git a/src/backend/distributed/transaction/remote_transaction.c b/src/backend/distributed/transaction/remote_transaction.c index 4c26e2478ca..9ef7595168d 100644 --- a/src/backend/distributed/transaction/remote_transaction.c +++ b/src/backend/distributed/transaction/remote_transaction.c @@ -266,7 +266,7 @@ StartRemoteTransactionBegin(struct MultiConnection *connection) transaction->lastQueuedSubXact = TopSubTransactionId; SubXactContext *subXactState = NULL; - foreach_ptr(subXactState, activeSubXacts) + foreach_declared_ptr(subXactState, activeSubXacts) { /* append SET LOCAL state from when SAVEPOINT was encountered... */ if (subXactState->setLocalCmds != NULL) @@ -477,13 +477,13 @@ RemoteTransactionListBegin(List *connectionList) MultiConnection *connection = NULL; /* send BEGIN to all nodes */ - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { StartRemoteTransactionBegin(connection); } /* wait for BEGIN to finish on all nodes */ - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { FinishRemoteTransactionBegin(connection); } @@ -890,7 +890,7 @@ RemoteTransactionsBeginIfNecessary(List *connectionList) } /* issue BEGIN to all connections needing it */ - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { RemoteTransaction *transaction = &connection->remoteTransaction; @@ -914,7 +914,7 @@ RemoteTransactionsBeginIfNecessary(List *connectionList) WaitForAllConnections(connectionList, raiseInterrupts); /* get result of all the BEGINs */ - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { RemoteTransaction *transaction = &connection->remoteTransaction; diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 9c7b456807e..d126577729d 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -1150,7 +1150,7 @@ TrackPropagatedTableAndSequences(Oid relationId) /* track its sequences */ List *ownedSeqIdList = getOwnedSequences(relationId); Oid ownedSeqId = InvalidOid; - foreach_oid(ownedSeqId, ownedSeqIdList) + foreach_declared_oid(ownedSeqId, ownedSeqIdList) { ObjectAddress *seqAddress = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*seqAddress, RelationRelationId, ownedSeqId); @@ -1178,7 +1178,7 @@ bool HasAnyObjectInPropagatedObjects(List *objectList) { ObjectAddress *object = NULL; - foreach_ptr(object, objectList) + foreach_declared_ptr(object, objectList) { /* first search in root transaction */ if (DependencyInPropagatedObjectsHash(PropagatedObjectsInTx, object)) @@ -1192,7 +1192,7 @@ HasAnyObjectInPropagatedObjects(List *objectList) continue; } SubXactContext *state = NULL; - foreach_ptr(state, activeSubXactContexts) + foreach_declared_ptr(state, activeSubXactContexts) { if (DependencyInPropagatedObjectsHash(state->propagatedObjects, object)) { diff --git a/src/backend/distributed/transaction/transaction_recovery.c b/src/backend/distributed/transaction/transaction_recovery.c index c31dc85a2a9..0eede84caf6 100644 --- a/src/backend/distributed/transaction/transaction_recovery.c +++ b/src/backend/distributed/transaction/transaction_recovery.c @@ -128,7 +128,7 @@ RecoverTwoPhaseCommits(void) List *workerList = ActivePrimaryNodeList(NoLock); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerList) + foreach_declared_ptr(workerNode, workerList) { recoveredTransactionCount += RecoverWorkerTransactions(workerNode); } diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c index c6fcee107d6..08781105380 100644 --- a/src/backend/distributed/transaction/worker_transaction.c +++ b/src/backend/distributed/transaction/worker_transaction.c @@ -74,7 +74,7 @@ SendCommandToWorkersAsUser(TargetWorkerSet targetWorkerSet, const char *nodeUser /* run commands serially */ WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { const char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; @@ -147,7 +147,7 @@ void SendCommandListToWorkersWithMetadata(List *commands) { char *command = NULL; - foreach_ptr(command, commands) + foreach_declared_ptr(command, commands) { SendCommandToWorkersWithMetadata(command); } @@ -193,7 +193,7 @@ void SendCommandListToRemoteNodesWithMetadata(List *commands) { char *command = NULL; - foreach_ptr(command, commands) + foreach_declared_ptr(command, commands) { SendCommandToRemoteNodesWithMetadata(command); } @@ -253,7 +253,7 @@ TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode) List *result = NIL; WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { if ((targetWorkerSet == NON_COORDINATOR_METADATA_NODES || targetWorkerSet == REMOTE_METADATA_NODES || @@ -314,7 +314,7 @@ SendBareCommandListToMetadataNodesInternal(List *commandList, /* run commands serially */ WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { const char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; @@ -327,7 +327,7 @@ SendBareCommandListToMetadataNodesInternal(List *commandList, /* iterate over the commands and execute them in the same connection */ const char *commandString = NULL; - foreach_ptr(commandString, commandList) + foreach_declared_ptr(commandString, commandList) { ExecuteCriticalRemoteCommand(workerConnection, commandString); } @@ -380,7 +380,7 @@ SendCommandToWorkersParamsInternal(TargetWorkerSet targetWorkerSet, const char * /* open connections in parallel */ WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { const char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; @@ -408,7 +408,7 @@ SendCommandToWorkersParamsInternal(TargetWorkerSet targetWorkerSet, const char * /* send commands in parallel */ MultiConnection *connection = NULL; - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { int querySent = SendRemoteCommandParams(connection, command, parameterCount, parameterTypes, parameterValues, false); @@ -419,7 +419,7 @@ SendCommandToWorkersParamsInternal(TargetWorkerSet targetWorkerSet, const char * } /* get results */ - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { PGresult *result = GetRemoteCommandResult(connection, true); if (!IsResponseOK(result)) @@ -490,7 +490,7 @@ SendCommandListToWorkerOutsideTransactionWithConnection(MultiConnection *workerC /* iterate over the commands and execute them in the same connection */ const char *commandString = NULL; - foreach_ptr(commandString, commandList) + foreach_declared_ptr(commandString, commandList) { ExecuteCriticalRemoteCommand(workerConnection, commandString); } @@ -531,7 +531,7 @@ SendCommandListToWorkerListWithBareConnections(List *workerConnectionList, /* send commands in parallel */ MultiConnection *connection = NULL; - foreach_ptr(connection, workerConnectionList) + foreach_declared_ptr(connection, workerConnectionList) { int querySent = SendRemoteCommand(connection, stringToSend); if (querySent == 0) @@ -541,7 +541,7 @@ SendCommandListToWorkerListWithBareConnections(List *workerConnectionList, } bool failOnError = true; - foreach_ptr(connection, workerConnectionList) + foreach_declared_ptr(connection, workerConnectionList) { ClearResults(connection, failOnError); } @@ -571,7 +571,7 @@ SendMetadataCommandListToWorkerListInCoordinatedTransaction(List *workerNodeList List *connectionList = NIL; WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { const char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; @@ -608,7 +608,7 @@ SendMetadataCommandListToWorkerListInCoordinatedTransaction(List *workerNodeList /* send commands in parallel */ bool failOnError = true; MultiConnection *connection = NULL; - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { int querySent = SendRemoteCommand(connection, stringToSend); if (querySent == 0) @@ -617,7 +617,7 @@ SendMetadataCommandListToWorkerListInCoordinatedTransaction(List *workerNodeList } } - foreach_ptr(connection, connectionList) + foreach_declared_ptr(connection, connectionList) { ClearResults(connection, failOnError); } @@ -646,7 +646,7 @@ SendOptionalCommandListToWorkerOutsideTransactionWithConnection( /* iterate over the commands and execute them in the same connection */ bool failed = false; const char *commandString = NULL; - foreach_ptr(commandString, commandList) + foreach_declared_ptr(commandString, commandList) { if (ExecuteOptionalRemoteCommand(workerConnection, commandString, NULL) != 0) { @@ -722,7 +722,7 @@ SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction(const char *node /* iterate over the commands and execute them in the same connection */ const char *commandString = NULL; - foreach_ptr(commandString, commandList) + foreach_declared_ptr(commandString, commandList) { if (ExecuteOptionalRemoteCommand(workerConnection, commandString, NULL) != RESPONSE_OKAY) @@ -757,7 +757,7 @@ static void ErrorIfAnyMetadataNodeOutOfSync(List *metadataNodeList) { WorkerNode *metadataNode = NULL; - foreach_ptr(metadataNode, metadataNodeList) + foreach_declared_ptr(metadataNode, metadataNodeList) { Assert(metadataNode->hasMetadata); diff --git a/src/backend/distributed/utils/background_jobs.c b/src/backend/distributed/utils/background_jobs.c index a7a124c7487..73a635f21f7 100644 --- a/src/backend/distributed/utils/background_jobs.c +++ b/src/backend/distributed/utils/background_jobs.c @@ -158,7 +158,7 @@ citus_job_cancel(PG_FUNCTION_ARGS) /* send cancellation to any running backends */ int pid = 0; - foreach_int(pid, pids) + foreach_declared_int(pid, pids) { Datum pidDatum = Int32GetDatum(pid); Datum signalSuccessDatum = DirectFunctionCall1(pg_cancel_backend, pidDatum); @@ -891,7 +891,7 @@ IncrementParallelTaskCountForNodesInvolved(BackgroundTask *task) int node; /* first check whether we have reached the limit for any of the nodes */ - foreach_int(node, task->nodesInvolved) + foreach_declared_int(node, task->nodesInvolved) { bool found; ParallelTasksPerNodeEntry *hashEntry = hash_search( @@ -908,7 +908,7 @@ IncrementParallelTaskCountForNodesInvolved(BackgroundTask *task) } /* then, increment the parallel task count per each node */ - foreach_int(node, task->nodesInvolved) + foreach_declared_int(node, task->nodesInvolved) { ParallelTasksPerNodeEntry *hashEntry = hash_search( ParallelTasksPerNode, &(node), HASH_FIND, NULL); @@ -934,7 +934,7 @@ DecrementParallelTaskCountForNodesInvolved(BackgroundTask *task) if (task->nodesInvolved) { int node; - foreach_int(node, task->nodesInvolved) + foreach_declared_int(node, task->nodesInvolved) { ParallelTasksPerNodeEntry *hashEntry = hash_search(ParallelTasksPerNode, &(node), @@ -1278,7 +1278,7 @@ CitusBackgroundTaskQueueMonitorMain(Datum arg) /* iterate over all handle entries and monitor each task's output */ BackgroundExecutorHashEntry *handleEntry = NULL; - foreach_ptr(handleEntry, runningTaskEntries) + foreach_declared_ptr(handleEntry, runningTaskEntries) { /* create task execution context and assign it to queueMonitorExecutionContext */ TaskExecutionContext taskExecutionContext = { @@ -1916,7 +1916,7 @@ ExecuteSqlString(const char *sql) * analysis on the next one, since there may be interdependencies. */ RawStmt *parsetree = NULL; - foreach_ptr(parsetree, raw_parsetree_list) + foreach_declared_ptr(parsetree, raw_parsetree_list) { /* * We don't allow transaction-control commands like COMMIT and ABORT diff --git a/src/backend/distributed/utils/citus_copyfuncs.c b/src/backend/distributed/utils/citus_copyfuncs.c index e283a3034c2..4b4a334c832 100644 --- a/src/backend/distributed/utils/citus_copyfuncs.c +++ b/src/backend/distributed/utils/citus_copyfuncs.c @@ -78,7 +78,7 @@ CitusSetTag(Node *node, int tag) do { \ char *curString = NULL; \ List *newList = NIL; \ - foreach_ptr(curString, from->fldname) { \ + foreach_declared_ptr(curString, from->fldname) { \ char *newString = curString ? pstrdup(curString) : (char *) NULL; \ newList = lappend(newList, newString); \ } \ diff --git a/src/backend/distributed/utils/citus_depended_object.c b/src/backend/distributed/utils/citus_depended_object.c index 7588f85949c..3babf76f088 100644 --- a/src/backend/distributed/utils/citus_depended_object.c +++ b/src/backend/distributed/utils/citus_depended_object.c @@ -138,7 +138,7 @@ HideCitusDependentObjectsOnQueriesOfPgMetaTables(Node *node, void *context) int varno = 0; RangeTblEntry *rangeTableEntry = NULL; - foreach_ptr(rangeTableEntry, query->rtable) + foreach_declared_ptr(rangeTableEntry, query->rtable) { varno++; @@ -376,7 +376,7 @@ DistOpsValidityState(Node *node, const DistributeObjectOps *ops) bool isPostprocess = false; List *objectAddresses = ops->address(node, missingOk, isPostprocess); ObjectAddress *objectAddress = NULL; - foreach_ptr(objectAddress, objectAddresses) + foreach_declared_ptr(objectAddress, objectAddresses) { if (OidIsValid(objectAddress->objectId)) { @@ -478,7 +478,7 @@ AnyObjectViolatesOwnership(DropStmt *dropStmt) PG_TRY(); { Node *object = NULL; - foreach_ptr(object, dropStmt->objects) + foreach_declared_ptr(object, dropStmt->objects) { Relation rel = NULL; objectAddress = get_object_address(objectType, object, diff --git a/src/backend/distributed/utils/colocation_utils.c b/src/backend/distributed/utils/colocation_utils.c index c189195271d..e2af11a1de5 100644 --- a/src/backend/distributed/utils/colocation_utils.c +++ b/src/backend/distributed/utils/colocation_utils.c @@ -204,7 +204,7 @@ get_colocated_shard_array(PG_FUNCTION_ARGS) int colocatedShardIndex = 0; ShardInterval *colocatedShardInterval = NULL; - foreach_ptr(colocatedShardInterval, colocatedShardList) + foreach_declared_ptr(colocatedShardInterval, colocatedShardList) { uint64 colocatedShardId = colocatedShardInterval->shardId; @@ -1063,7 +1063,7 @@ ColocatedShardIntervalList(ShardInterval *shardInterval) Assert(shardIntervalIndex >= 0); Oid colocatedTableId = InvalidOid; - foreach_oid(colocatedTableId, colocatedTableList) + foreach_declared_oid(colocatedTableId, colocatedTableList) { CitusTableCacheEntry *colocatedTableCacheEntry = GetCitusTableCacheEntry(colocatedTableId); @@ -1129,7 +1129,7 @@ ColocatedNonPartitionShardIntervalList(ShardInterval *shardInterval) Assert(shardIntervalIndex >= 0); Oid colocatedTableId = InvalidOid; - foreach_oid(colocatedTableId, colocatedTableList) + foreach_declared_oid(colocatedTableId, colocatedTableList) { if (PartitionTable(colocatedTableId)) { diff --git a/src/backend/distributed/utils/distribution_column_map.c b/src/backend/distributed/utils/distribution_column_map.c index 43f9939b1d2..380a5f98f30 100644 --- a/src/backend/distributed/utils/distribution_column_map.c +++ b/src/backend/distributed/utils/distribution_column_map.c @@ -81,7 +81,7 @@ AddDistributionColumnForRelation(DistributionColumnMap *distributionColumnMap, List *partitionList = PartitionList(relationId); Oid partitionRelationId = InvalidOid; - foreach_oid(partitionRelationId, partitionList) + foreach_declared_oid(partitionRelationId, partitionList) { AddDistributionColumnForRelation(distributionColumnMap, partitionRelationId, distributionColumnName); diff --git a/src/backend/distributed/utils/foreign_key_relationship.c b/src/backend/distributed/utils/foreign_key_relationship.c index 1abb7ae0717..0025becb448 100644 --- a/src/backend/distributed/utils/foreign_key_relationship.c +++ b/src/backend/distributed/utils/foreign_key_relationship.c @@ -190,7 +190,7 @@ GetRelationshipNodesForFKeyConnectedRelations( { List *allNeighboursList = GetAllNeighboursList(currentNode); ForeignConstraintRelationshipNode *neighbourNode = NULL; - foreach_ptr(neighbourNode, allNeighboursList) + foreach_declared_ptr(neighbourNode, allNeighboursList) { Oid neighbourRelationId = neighbourNode->relationId; if (OidVisited(oidVisitedMap, neighbourRelationId)) @@ -437,7 +437,7 @@ GetConnectedListHelper(ForeignConstraintRelationshipNode *node, bool isReferenci List *neighbourList = GetNeighbourList(currentNode, isReferencing); ForeignConstraintRelationshipNode *neighbourNode = NULL; - foreach_ptr(neighbourNode, neighbourList) + foreach_declared_ptr(neighbourNode, neighbourList) { Oid neighbourRelationId = neighbourNode->relationId; if (!OidVisited(oidVisitedMap, neighbourRelationId)) @@ -508,7 +508,7 @@ GetRelationIdsFromRelationshipNodeList(List *fKeyRelationshipNodeList) List *relationIdList = NIL; ForeignConstraintRelationshipNode *fKeyRelationshipNode = NULL; - foreach_ptr(fKeyRelationshipNode, fKeyRelationshipNodeList) + foreach_declared_ptr(fKeyRelationshipNode, fKeyRelationshipNodeList) { Oid relationId = fKeyRelationshipNode->relationId; relationIdList = lappend_oid(relationIdList, relationId); @@ -561,7 +561,7 @@ PopulateAdjacencyLists(void) frelEdgeList = SortList(frelEdgeList, CompareForeignConstraintRelationshipEdges); ForeignConstraintRelationshipEdge *currentFConstraintRelationshipEdge = NULL; - foreach_ptr(currentFConstraintRelationshipEdge, frelEdgeList) + foreach_declared_ptr(currentFConstraintRelationshipEdge, frelEdgeList) { /* we just saw this edge, no need to add it twice */ if (currentFConstraintRelationshipEdge->referencingRelationOID == diff --git a/src/backend/distributed/utils/listutils.c b/src/backend/distributed/utils/listutils.c index eddef1fea09..6f3c73e55cf 100644 --- a/src/backend/distributed/utils/listutils.c +++ b/src/backend/distributed/utils/listutils.c @@ -43,7 +43,7 @@ SortList(List *pointerList, int (*comparisonFunction)(const void *, const void * void **array = (void **) palloc0(arraySize * sizeof(void *)); void *pointer = NULL; - foreach_ptr(pointer, pointerList) + foreach_declared_ptr(pointer, pointerList) { array[arrayIndex] = pointer; @@ -82,7 +82,7 @@ PointerArrayFromList(List *pointerList) int pointerIndex = 0; void *pointer = NULL; - foreach_ptr(pointer, pointerList) + foreach_declared_ptr(pointer, pointerList) { pointerArray[pointerIndex] = pointer; pointerIndex += 1; @@ -130,7 +130,7 @@ ListToHashSet(List *itemList, Size keySize, bool isStringList) HTAB *itemSet = hash_create("ListToHashSet", capacity, &info, flags); void *item = NULL; - foreach_ptr(item, itemList) + foreach_declared_ptr(item, itemList) { bool foundInSet = false; @@ -188,7 +188,7 @@ StringJoinParams(List *stringList, char delimiter, char *prefix, char *postfix) const char *command = NULL; int curIndex = 0; - foreach_ptr(command, stringList) + foreach_declared_ptr(command, stringList) { if (curIndex > 0) { @@ -219,7 +219,7 @@ ListTake(List *pointerList, int size) int listIndex = 0; void *pointer = NULL; - foreach_ptr(pointer, pointerList) + foreach_declared_ptr(pointer, pointerList) { result = lappend(result, pointer); listIndex++; @@ -279,7 +279,7 @@ list_filter_oid(List *list, bool (*keepElement)(Oid element)) { List *result = NIL; Oid element = InvalidOid; - foreach_oid(element, list) + foreach_declared_oid(element, list) { if (keepElement(element)) { diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c index ede2008cabc..063465beb7c 100644 --- a/src/backend/distributed/utils/multi_partitioning_utils.c +++ b/src/backend/distributed/utils/multi_partitioning_utils.c @@ -259,7 +259,7 @@ worker_fix_partition_shard_index_names(PG_FUNCTION_ARGS) List *partitionShardIndexIds = find_inheritance_children(parentShardIndexId, ShareRowExclusiveLock); Oid partitionShardIndexId = InvalidOid; - foreach_oid(partitionShardIndexId, partitionShardIndexIds) + foreach_declared_oid(partitionShardIndexId, partitionShardIndexIds) { if (IndexGetRelation(partitionShardIndexId, false) == partitionShardId) { @@ -372,7 +372,7 @@ CreateFixPartitionConstraintsTaskList(Oid relationId) LockShardListMetadata(shardIntervalList, ShareLock); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { uint64 shardId = shardInterval->shardId; @@ -458,7 +458,7 @@ WorkerFixPartitionConstraintCommandList(Oid relationId, uint64 shardId, char *quotedShardName = quote_qualified_identifier(schemaName, shardRelationName); char *constraintName = NULL; - foreach_ptr(constraintName, checkConstraintList) + foreach_declared_ptr(constraintName, checkConstraintList) { StringInfo shardQueryString = makeStringInfo(); appendStringInfo(shardQueryString, @@ -543,7 +543,7 @@ CreateFixPartitionShardIndexNames(Oid parentRelationId, Oid partitionRelationId, else { Oid partitionId = InvalidOid; - foreach_oid(partitionId, partitionList) + foreach_declared_oid(partitionId, partitionList) { List *partitionShardIntervalList = LoadShardIntervalList(partitionId); LockShardListMetadata(partitionShardIntervalList, ShareLock); @@ -563,7 +563,7 @@ CreateFixPartitionShardIndexNames(Oid parentRelationId, Oid partitionRelationId, int taskId = 1; ShardInterval *parentShardInterval = NULL; - foreach_ptr(parentShardInterval, parentShardIntervalList) + foreach_declared_ptr(parentShardInterval, parentShardIntervalList) { uint64 parentShardId = parentShardInterval->shardId; @@ -615,7 +615,7 @@ WorkerFixPartitionShardIndexNamesCommandList(uint64 parentShardId, { List *commandList = NIL; Oid parentIndexId = InvalidOid; - foreach_oid(parentIndexId, parentIndexIdList) + foreach_declared_oid(parentIndexId, parentIndexIdList) { if (!has_subclass(parentIndexId)) { @@ -666,7 +666,7 @@ WorkerFixPartitionShardIndexNamesCommandListForParentShardIndex( bool addAllPartitions = (partitionRelationId == InvalidOid); Oid partitionIndexId = InvalidOid; - foreach_oid(partitionIndexId, partitionIndexIds) + foreach_declared_oid(partitionIndexId, partitionIndexIds) { Oid partitionId = IndexGetRelation(partitionIndexId, false); if (addAllPartitions || partitionId == partitionRelationId) @@ -701,7 +701,7 @@ WorkerFixPartitionShardIndexNamesCommandListForPartitionIndex(Oid partitionIndex List *partitionShardIntervalList = LoadShardIntervalList(partitionId); ShardInterval *partitionShardInterval = NULL; - foreach_ptr(partitionShardInterval, partitionShardIntervalList) + foreach_declared_ptr(partitionShardInterval, partitionShardIntervalList) { /* * Prepare commands for each shard of current partition @@ -1044,7 +1044,7 @@ PartitionWithLongestNameRelationId(Oid parentRelationId) List *partitionList = PartitionList(parentRelationId); Oid partitionRelationId = InvalidOid; - foreach_oid(partitionRelationId, partitionList) + foreach_declared_oid(partitionRelationId, partitionList) { char *partitionName = get_rel_name(partitionRelationId); int partitionNameLength = strnlen(partitionName, NAMEDATALEN); @@ -1130,7 +1130,7 @@ GenerateDetachPartitionCommandRelationIdList(List *relationIds) { List *detachPartitionCommands = NIL; Oid relationId = InvalidOid; - foreach_oid(relationId, relationIds) + foreach_declared_oid(relationId, relationIds) { Assert(PartitionTable(relationId)); char *detachCommand = GenerateDetachPartitionCommand(relationId); @@ -1246,7 +1246,7 @@ GenerateAttachPartitionCommandRelationIdList(List *relationIds) { List *attachPartitionCommands = NIL; Oid relationId = InvalidOid; - foreach_oid(relationId, relationIds) + foreach_declared_oid(relationId, relationIds) { char *attachCommand = GenerateAlterTableAttachPartitionCommand(relationId); attachPartitionCommands = lappend(attachPartitionCommands, attachCommand); @@ -1318,7 +1318,7 @@ ListShardsUnderParentRelation(Oid relationId) List *partitionList = PartitionList(relationId); Oid partitionRelationId = InvalidOid; - foreach_oid(partitionRelationId, partitionList) + foreach_declared_oid(partitionRelationId, partitionList) { List *childShardList = ListShardsUnderParentRelation(partitionRelationId); shardList = list_concat(shardList, childShardList); diff --git a/src/backend/distributed/utils/reference_table_utils.c b/src/backend/distributed/utils/reference_table_utils.c index b1710c1d6d2..8f0d89fc91f 100644 --- a/src/backend/distributed/utils/reference_table_utils.c +++ b/src/backend/distributed/utils/reference_table_utils.c @@ -228,7 +228,7 @@ EnsureReferenceTablesExistOnAllNodesExtended(char transferMode) } WorkerNode *newWorkerNode = NULL; - foreach_ptr(newWorkerNode, newWorkersList) + foreach_declared_ptr(newWorkerNode, newWorkersList) { ereport(NOTICE, (errmsg("replicating reference table '%s' to %s:%d ...", referenceTableName, newWorkerNode->workerName, @@ -360,7 +360,7 @@ AnyRelationsModifiedInTransaction(List *relationIdList) { Oid relationId = InvalidOid; - foreach_oid(relationId, relationIdList) + foreach_declared_oid(relationId, relationIdList) { if (GetRelationDDLAccessMode(relationId) != RELATION_NOT_ACCESSED || GetRelationDMLAccessMode(relationId) != RELATION_NOT_ACCESSED) @@ -389,7 +389,7 @@ WorkersWithoutReferenceTablePlacement(uint64 shardId, LOCKMODE lockMode) workerNodeList = SortList(workerNodeList, CompareWorkerNodes); WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { char *nodeName = workerNode->workerName; uint32 nodePort = workerNode->workerPort; @@ -538,7 +538,7 @@ ReplicatedPlacementsForNodeGroup(int32 groupId) List *replicatedPlacementsForNodeGroup = NIL; Oid replicatedTableId = InvalidOid; - foreach_oid(replicatedTableId, replicatedTableList) + foreach_declared_oid(replicatedTableId, replicatedTableList) { List *placements = GroupShardPlacementsForTableOnGroup(replicatedTableId, groupId); @@ -591,7 +591,7 @@ DeleteAllReplicatedTablePlacementsFromNodeGroup(int32 groupId, bool localOnly) } GroupShardPlacement *placement = NULL; - foreach_ptr(placement, replicatedPlacementListForGroup) + foreach_declared_ptr(placement, replicatedPlacementListForGroup) { LockShardDistributionMetadata(placement->shardId, ExclusiveLock); @@ -627,7 +627,7 @@ DeleteAllReplicatedTablePlacementsFromNodeGroupViaMetadataContext( MemoryContext oldContext = MemoryContextSwitchTo(context->context); GroupShardPlacement *placement = NULL; - foreach_ptr(placement, replicatedPlacementListForGroup) + foreach_declared_ptr(placement, replicatedPlacementListForGroup) { LockShardDistributionMetadata(placement->shardId, ExclusiveLock); @@ -663,7 +663,7 @@ ReplicatedMetadataSyncedDistributedTableList(void) List *replicatedHashDistributedTableList = NIL; Oid relationId = InvalidOid; - foreach_oid(relationId, distributedRelationList) + foreach_declared_oid(relationId, distributedRelationList) { if (ShouldSyncTableMetadata(relationId) && !SingleReplicatedTable(relationId)) { @@ -707,7 +707,7 @@ ErrorIfNotAllNodesHaveReferenceTableReplicas(List *workerNodeList) { WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { if (!NodeHasAllReferenceTableReplicas(workerNode)) { @@ -763,7 +763,7 @@ NodeHasAllReferenceTableReplicas(WorkerNode *workerNode) List *shardPlacementList = ActiveShardPlacementList(shardInterval->shardId); ShardPlacement *placement = NULL; - foreach_ptr(placement, shardPlacementList) + foreach_declared_ptr(placement, shardPlacementList) { if (placement->groupId == workerNode->groupId) { diff --git a/src/backend/distributed/utils/resource_lock.c b/src/backend/distributed/utils/resource_lock.c index 8ac269e4314..3f50b682ec5 100644 --- a/src/backend/distributed/utils/resource_lock.c +++ b/src/backend/distributed/utils/resource_lock.c @@ -299,7 +299,7 @@ LockShardListResourcesOnFirstWorker(LOCKMODE lockmode, List *shardIntervalList) appendStringInfo(lockCommand, "SELECT lock_shard_resources(%d, ARRAY[", lockmode); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { int64 shardId = shardInterval->shardId; @@ -388,7 +388,7 @@ LockShardListMetadataOnWorkers(LOCKMODE lockmode, List *shardIntervalList) appendStringInfo(lockCommand, "SELECT lock_shard_metadata(%d, ARRAY[", lockmode); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { int64 shardId = shardInterval->shardId; @@ -529,7 +529,7 @@ LockReferencedReferenceShardDistributionMetadata(uint64 shardId, LOCKMODE lockMo } ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { LockShardDistributionMetadata(shardInterval->shardId, lockMode); } @@ -573,7 +573,7 @@ LockReferencedReferenceShardResources(uint64 shardId, LOCKMODE lockMode) } ShardInterval *referencedShardInterval = NULL; - foreach_ptr(referencedShardInterval, referencedShardIntervalList) + foreach_declared_ptr(referencedShardInterval, referencedShardIntervalList) { LockShardResource(referencedShardInterval->shardId, lockMode); } @@ -590,7 +590,7 @@ GetSortedReferenceShardIntervals(List *relationList) List *shardIntervalList = NIL; Oid relationId = InvalidOid; - foreach_oid(relationId, relationList) + foreach_declared_oid(relationId, relationList) { if (!IsCitusTableType(relationId, REFERENCE_TABLE)) { @@ -652,7 +652,7 @@ LockShardListMetadata(List *shardIntervalList, LOCKMODE lockMode) shardIntervalList = SortList(shardIntervalList, CompareShardIntervalsById); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { int64 shardId = shardInterval->shardId; @@ -673,7 +673,7 @@ LockShardsInPlacementListMetadata(List *shardPlacementList, LOCKMODE lockMode) SortList(shardPlacementList, CompareShardPlacementsByShardId); GroupShardPlacement *placement = NULL; - foreach_ptr(placement, shardPlacementList) + foreach_declared_ptr(placement, shardPlacementList) { int64 shardId = placement->shardId; @@ -760,7 +760,7 @@ AnyTableReplicated(List *shardIntervalList, List **replicatedShardIntervalList) List *localList = NIL; ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { int64 shardId = shardInterval->shardId; @@ -797,7 +797,7 @@ LockShardListResources(List *shardIntervalList, LOCKMODE lockMode) shardIntervalList = SortList(shardIntervalList, CompareShardIntervalsById); ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { int64 shardId = shardInterval->shardId; @@ -820,7 +820,7 @@ LockRelationShardResources(List *relationShardList, LOCKMODE lockMode) List *shardIntervalList = NIL; RelationShard *relationShard = NULL; - foreach_ptr(relationShard, relationShardList) + foreach_declared_ptr(relationShard, relationShardList) { uint64 shardId = relationShard->shardId; @@ -846,7 +846,7 @@ LockParentShardResourceIfPartition(List *shardIntervalList, LOCKMODE lockMode) List *parentShardIntervalList = NIL; ShardInterval *shardInterval = NULL; - foreach_ptr(shardInterval, shardIntervalList) + foreach_declared_ptr(shardInterval, shardIntervalList) { Oid relationId = shardInterval->relationId; @@ -1092,7 +1092,7 @@ static bool LockRelationRecordListMember(List *lockRelationRecordList, Oid relationId) { LockRelationRecord *record = NULL; - foreach_ptr(record, lockRelationRecordList) + foreach_declared_ptr(record, lockRelationRecordList) { if (record->relationId == relationId) { @@ -1131,7 +1131,7 @@ ConcatLockRelationRecordList(List *lockRelationRecordList, List *relationOidList List *constructedList = NIL; Oid relationId = InvalidOid; - foreach_oid(relationId, relationOidList) + foreach_declared_oid(relationId, relationOidList) { if (!LockRelationRecordListMember(lockRelationRecordList, relationId)) { @@ -1178,7 +1178,7 @@ AcquireDistributedLockOnRelations_Internal(List *lockRelationRecordList, int lockedRelations = 0; LockRelationRecord *lockRelationRecord; - foreach_ptr(lockRelationRecord, lockRelationRecordList) + foreach_declared_ptr(lockRelationRecord, lockRelationRecordList) { Oid relationId = lockRelationRecord->relationId; bool lockDescendants = lockRelationRecord->inh; @@ -1251,7 +1251,7 @@ AcquireDistributedLockOnRelations_Internal(List *lockRelationRecordList, WorkerNode *workerNode = NULL; const char *currentUser = CurrentUserName(); - foreach_ptr(workerNode, workerNodeList) + foreach_declared_ptr(workerNode, workerNodeList) { /* if local node is one of the targets, acquire the lock locally */ if (workerNode->groupId == localGroupId) @@ -1294,7 +1294,7 @@ AcquireDistributedLockOnRelations(List *relationList, LOCKMODE lockMode, uint32 bool nowait = (configs & DIST_LOCK_NOWAIT) > 0; RangeVar *rangeVar = NULL; - foreach_ptr(rangeVar, relationList) + foreach_declared_ptr(rangeVar, relationList) { Oid relationId = RangeVarGetRelid(rangeVar, NoLock, false); diff --git a/src/backend/distributed/utils/shardinterval_utils.c b/src/backend/distributed/utils/shardinterval_utils.c index 124bfbdf1c6..05df7d8161d 100644 --- a/src/backend/distributed/utils/shardinterval_utils.c +++ b/src/backend/distributed/utils/shardinterval_utils.c @@ -471,7 +471,7 @@ SingleReplicatedTable(Oid relationId) } uint64 *shardIdPointer = NULL; - foreach_ptr(shardIdPointer, shardList) + foreach_declared_ptr(shardIdPointer, shardList) { uint64 shardId = *shardIdPointer; shardPlacementList = ShardPlacementList(shardId); diff --git a/src/backend/distributed/utils/statistics_collection.c b/src/backend/distributed/utils/statistics_collection.c index 1cadea968c3..649c9dc826d 100644 --- a/src/backend/distributed/utils/statistics_collection.c +++ b/src/backend/distributed/utils/statistics_collection.c @@ -184,7 +184,7 @@ DistributedTablesSize(List *distTableOids) uint64 totalSize = 0; Oid relationId = InvalidOid; - foreach_oid(relationId, distTableOids) + foreach_declared_oid(relationId, distTableOids) { /* * Relations can get dropped after getting the Oid list and before we diff --git a/src/backend/distributed/worker/worker_create_or_replace.c b/src/backend/distributed/worker/worker_create_or_replace.c index 2fab84ac6b6..451649969df 100644 --- a/src/backend/distributed/worker/worker_create_or_replace.c +++ b/src/backend/distributed/worker/worker_create_or_replace.c @@ -85,7 +85,7 @@ WrapCreateOrReplaceList(List *sqls) appendStringInfoString(&textArrayLitteral, "ARRAY["); const char *sql = NULL; bool first = true; - foreach_ptr(sql, sqls) + foreach_declared_ptr(sql, sqls) { if (!first) { @@ -251,7 +251,7 @@ WorkerCreateOrReplaceObject(List *sqlStatements) /* apply all statement locally */ char *sqlStatement = NULL; - foreach_ptr(sqlStatement, sqlStatements) + foreach_declared_ptr(sqlStatement, sqlStatements) { parseTree = ParseTreeNode(sqlStatement); ProcessUtilityParseTree(parseTree, sqlStatement, PROCESS_UTILITY_QUERY, NULL, diff --git a/src/backend/distributed/worker/worker_data_fetch_protocol.c b/src/backend/distributed/worker/worker_data_fetch_protocol.c index f51d9c80c31..0370001eec2 100644 --- a/src/backend/distributed/worker/worker_data_fetch_protocol.c +++ b/src/backend/distributed/worker/worker_data_fetch_protocol.c @@ -170,7 +170,8 @@ worker_adjust_identity_column_seq_ranges(PG_FUNCTION_ARGS) if (attributeForm->attidentity) { - Oid sequenceOid = getIdentitySequence(tableRelationId, + Oid sequenceOid = getIdentitySequence(identitySequenceRelation_compat( + tableRelation), attributeForm->attnum, missingSequenceOk); @@ -377,7 +378,7 @@ check_log_statement(List *statementList) /* else we have to inspect the statement(s) to see whether to log */ Node *statement = NULL; - foreach_ptr(statement, statementList) + foreach_declared_ptr(statement, statementList) { if (GetCommandLogLevel(statement) <= log_statement) { @@ -480,7 +481,7 @@ void SetDefElemArg(AlterSeqStmt *statement, const char *name, Node *arg) { DefElem *defElem = NULL; - foreach_ptr(defElem, statement->options) + foreach_declared_ptr(defElem, statement->options) { if (strcmp(defElem->defname, name) == 0) { diff --git a/src/backend/distributed/worker/worker_drop_protocol.c b/src/backend/distributed/worker/worker_drop_protocol.c index 280de4493bd..c4c2fe5b574 100644 --- a/src/backend/distributed/worker/worker_drop_protocol.c +++ b/src/backend/distributed/worker/worker_drop_protocol.c @@ -93,7 +93,7 @@ worker_drop_distributed_table(PG_FUNCTION_ARGS) */ List *partitionList = PartitionList(relationId); Oid partitionOid = InvalidOid; - foreach_oid(partitionOid, partitionList) + foreach_declared_oid(partitionOid, partitionList) { WorkerDropDistributedTable(partitionOid); } @@ -128,7 +128,7 @@ WorkerDropDistributedTable(Oid relationId) List *ownedSequences = getOwnedSequences(relationId); Oid ownedSequenceOid = InvalidOid; - foreach_oid(ownedSequenceOid, ownedSequences) + foreach_declared_oid(ownedSequenceOid, ownedSequences) { ObjectAddress ownedSequenceAddress = { 0 }; ObjectAddressSet(ownedSequenceAddress, RelationRelationId, ownedSequenceOid); @@ -144,13 +144,13 @@ WorkerDropDistributedTable(Oid relationId) */ List *shardList = LoadShardList(relationId); uint64 *shardIdPointer = NULL; - foreach_ptr(shardIdPointer, shardList) + foreach_declared_ptr(shardIdPointer, shardList) { uint64 shardId = *shardIdPointer; List *shardPlacementList = ShardPlacementList(shardId); ShardPlacement *placement = NULL; - foreach_ptr(placement, shardPlacementList) + foreach_declared_ptr(placement, shardPlacementList) { /* delete the row from pg_dist_placement */ DeleteShardPlacementRow(placement->placementId); @@ -236,7 +236,7 @@ worker_drop_shell_table(PG_FUNCTION_ARGS) List *ownedSequences = getOwnedSequences(relationId); Oid ownedSequenceOid = InvalidOid; - foreach_oid(ownedSequenceOid, ownedSequences) + foreach_declared_oid(ownedSequenceOid, ownedSequences) { ObjectAddress ownedSequenceAddress = { 0 }; ObjectAddressSet(ownedSequenceAddress, RelationRelationId, ownedSequenceOid); @@ -284,7 +284,7 @@ worker_drop_sequence_dependency(PG_FUNCTION_ARGS) List *ownedSequences = getOwnedSequences(relationId); Oid ownedSequenceOid = InvalidOid; - foreach_oid(ownedSequenceOid, ownedSequences) + foreach_declared_oid(ownedSequenceOid, ownedSequences) { /* the caller doesn't want to drop the sequence, so break the dependency */ deleteDependencyRecordsForSpecific(RelationRelationId, ownedSequenceOid, diff --git a/src/backend/distributed/worker/worker_shard_visibility.c b/src/backend/distributed/worker/worker_shard_visibility.c index 3725800c30b..f783d514d2d 100644 --- a/src/backend/distributed/worker/worker_shard_visibility.c +++ b/src/backend/distributed/worker/worker_shard_visibility.c @@ -382,7 +382,7 @@ ShouldHideShardsInternal(void) } char *appNamePrefix = NULL; - foreach_ptr(appNamePrefix, prefixList) + foreach_declared_ptr(appNamePrefix, prefixList) { /* never hide shards when one of the prefixes is * */ if (strcmp(appNamePrefix, "*") == 0) @@ -446,7 +446,7 @@ FilterShardsFromPgclass(Node *node, void *context) int varno = 0; RangeTblEntry *rangeTableEntry = NULL; - foreach_ptr(rangeTableEntry, query->rtable) + foreach_declared_ptr(rangeTableEntry, query->rtable) { varno++; diff --git a/src/include/distributed/listutils.h b/src/include/distributed/listutils.h index 2a52cbc7527..db9ea7ce712 100644 --- a/src/include/distributed/listutils.h +++ b/src/include/distributed/listutils.h @@ -36,7 +36,7 @@ typedef struct ListCellAndListWrapper } ListCellAndListWrapper; /* - * foreach_ptr - + * foreach_declared_ptr - * a convenience macro which loops through a pointer list without needing a * ListCell, just a declared pointer variable to store the pointer of the * cell in. @@ -50,7 +50,7 @@ typedef struct ListCellAndListWrapper * - || true is used to always enter the loop when cell is not null even if * var is NULL. */ -#define foreach_ptr(var, l) \ +#define foreach_declared_ptr(var, l) \ for (ListCell *(var ## CellDoNotUse) = list_head(l); \ (var ## CellDoNotUse) != NULL && \ (((var) = lfirst(var ## CellDoNotUse)) || true); \ @@ -58,12 +58,12 @@ typedef struct ListCellAndListWrapper /* - * foreach_int - + * foreach_declared_int - * a convenience macro which loops through an int list without needing a * ListCell, just a declared int variable to store the int of the cell in. - * For explanation of how it works see foreach_ptr. + * For explanation of how it works see foreach_declared_ptr. */ -#define foreach_int(var, l) \ +#define foreach_declared_int(var, l) \ for (ListCell *(var ## CellDoNotUse) = list_head(l); \ (var ## CellDoNotUse) != NULL && \ (((var) = lfirst_int(var ## CellDoNotUse)) || true); \ @@ -71,12 +71,12 @@ typedef struct ListCellAndListWrapper /* - * foreach_oid - + * foreach_declared_oid - * a convenience macro which loops through an oid list without needing a * ListCell, just a declared Oid variable to store the oid of the cell in. - * For explanation of how it works see foreach_ptr. + * For explanation of how it works see foreach_declared_ptr. */ -#define foreach_oid(var, l) \ +#define foreach_declared_oid(var, l) \ for (ListCell *(var ## CellDoNotUse) = list_head(l); \ (var ## CellDoNotUse) != NULL && \ (((var) = lfirst_oid(var ## CellDoNotUse)) || true); \ diff --git a/src/include/distributed/resource_lock.h b/src/include/distributed/resource_lock.h index 576d2bf1516..3b1e9a459e7 100644 --- a/src/include/distributed/resource_lock.h +++ b/src/include/distributed/resource_lock.h @@ -169,7 +169,7 @@ IsNodeWideObjectClass(ObjectClass objectClass) * If new object classes are added and none of them are node-wide, then update * this assertion check based on latest supported major Postgres version. */ - StaticAssertStmt(PG_MAJORVERSION_NUM <= 16, + StaticAssertStmt(PG_MAJORVERSION_NUM <= 17, "better to check if any of newly added ObjectClass'es are node-wide"); switch (objectClass) diff --git a/src/include/pg_version_compat.h b/src/include/pg_version_compat.h index 665cd30c264..d8616cc301b 100644 --- a/src/include/pg_version_compat.h +++ b/src/include/pg_version_compat.h @@ -13,6 +13,142 @@ #include "pg_version_constants.h" +#if PG_VERSION_NUM >= PG_VERSION_17 + +#include "catalog/pg_am.h" +#include "catalog/pg_auth_members.h" +#include "catalog/pg_authid.h" +#include "catalog/pg_class.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_constraint.h" +#include "catalog/pg_database.h" +#include "catalog/pg_extension.h" +#include "catalog/pg_foreign_server.h" +#include "catalog/pg_namespace.h" +#include "catalog/pg_parameter_acl.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_publication.h" +#include "catalog/pg_tablespace.h" +#include "catalog/pg_transform.h" +#include "catalog/pg_ts_config.h" +#include "catalog/pg_ts_dict.h" +#include "catalog/pg_ts_template.h" +#include "catalog/pg_type.h" + +typedef int ObjectClass; +#define getObjectClass(a) a->classId +#define LAST_OCLASS TransformRelationId +#define OCLASS_ROLE AuthIdRelationId +#define OCLASS_DATABASE DatabaseRelationId +#define OCLASS_TBLSPACE TableSpaceRelationId +#define OCLASS_PARAMETER_ACL ParameterAclRelationId +#define OCLASS_ROLE_MEMBERSHIP AuthMemRelationId +#define OCLASS_CLASS RelationRelationId +#define OCLASS_COLLATION CollationRelationId +#define OCLASS_CONSTRAINT ConstraintRelationId +#define OCLASS_PROC ProcedureRelationId +#define OCLASS_PUBLICATION PublicationRelationId +#define OCLASS_SCHEMA NamespaceRelationId +#define OCLASS_TSCONFIG TSConfigRelationId +#define OCLASS_TSDICT TSDictionaryRelationId +#define OCLASS_TYPE TypeRelationId +#define OCLASS_EXTENSION ExtensionRelationId +#define OCLASS_FOREIGN_SERVER ForeignServerRelationId +#define OCLASS_AM AccessMethodRelationId +#define OCLASS_TSTEMPLATE TSTemplateRelationId + +#define Anum_pg_collation_colliculocale Anum_pg_collation_colllocale +#define Anum_pg_database_daticulocale Anum_pg_database_datlocale + +#include "commands/tablecmds.h" + +static inline void +RangeVarCallbackOwnsTable(const RangeVar *relation, + Oid relId, Oid oldRelId, void *arg) +{ + return RangeVarCallbackMaintainsTable(relation, relId, oldRelId, arg); +} + + +#include "catalog/pg_attribute.h" +#include "utils/syscache.h" + +static inline int32 +getAttstattarget_compat(HeapTuple attTuple) +{ + bool isnull; + Datum dat = SysCacheGetAttr(ATTNUM, attTuple, + Anum_pg_attribute_attstattarget, &isnull); + return (isnull ? -1 : DatumGetInt16(dat)); +} + + +#include "catalog/pg_statistic_ext.h" + +static inline int16 +getStxstattarget_compat(HeapTuple tup) +{ + bool isnull; + Datum dat = SysCacheGetAttr(STATEXTOID, tup, + Anum_pg_statistic_ext_stxstattarget, &isnull); + return (isnull ? -1 : DatumGetInt16(dat)); +} + + +#define getAlterStatsStxstattarget_compat(a) ((Node *) makeInteger(a)) +#define getIntStxstattarget_compat(a) (intVal(a)) + +#define WaitEventSetTracker_compat CurrentResourceOwner + +#define identitySequenceRelation_compat(a) (a) + +#define matched_compat(a) (a->matchKind == MERGE_WHEN_MATCHED) + +#define create_foreignscan_path_compat(a, b, c, d, e, f, g, h, i, j, \ + k) create_foreignscan_path(a, b, c, d, e, f, g, h, \ + i, j, k) + +#define getProcNo_compat(a) (a->vxid.procNumber) +#define getLxid_compat(a) (a->vxid.lxid) + +#else + +#include "access/htup_details.h" +static inline int32 +getAttstattarget_compat(HeapTuple attTuple) +{ + return ((Form_pg_attribute) GETSTRUCT(attTuple))->attstattarget; +} + + +#include "catalog/pg_statistic_ext.h" +static inline int32 +getStxstattarget_compat(HeapTuple tup) +{ + return ((Form_pg_statistic_ext) GETSTRUCT(tup))->stxstattarget; +} + + +#define getAlterStatsStxstattarget_compat(a) (a) +#define getIntStxstattarget_compat(a) (a) + +#define WaitEventSetTracker_compat CurrentMemoryContext + +#define identitySequenceRelation_compat(a) (RelationGetRelid(a)) + +#define matched_compat(a) (a->matched) + +#define create_foreignscan_path_compat(a, b, c, d, e, f, g, h, i, j, \ + k) create_foreignscan_path(a, b, c, d, e, f, g, h, \ + i, k) + +#define getProcNo_compat(a) (a->pgprocno) +#define getLxid_compat(a) (a->lxid) + +#define COLLPROVIDER_BUILTIN 'b' + +#endif + #if PG_VERSION_NUM >= PG_VERSION_16 #include "utils/guc_tables.h" diff --git a/src/include/pg_version_constants.h b/src/include/pg_version_constants.h index 9761dff8340..ba2a9a03e62 100644 --- a/src/include/pg_version_constants.h +++ b/src/include/pg_version_constants.h @@ -15,5 +15,6 @@ #define PG_VERSION_15 150000 #define PG_VERSION_16 160000 #define PG_VERSION_17 170000 +#define PG_VERSION_18 180000 #endif /* PG_VERSION_CONSTANTS */ diff --git a/src/test/regress/citus_tests/common.py b/src/test/regress/citus_tests/common.py index 6c09e0b3852..19b747c09e2 100644 --- a/src/test/regress/citus_tests/common.py +++ b/src/test/regress/citus_tests/common.py @@ -93,6 +93,7 @@ def get_pg_major_version(): 14: "10.2.0", 15: "11.1.5", 16: "12.1.1", + 17: "12.1.1", } OLDEST_SUPPORTED_CITUS_VERSION = OLDEST_SUPPORTED_CITUS_VERSION_MATRIX[PG_MAJOR_VERSION] diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index b9d3f7faaab..f5882e5e75f 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -60,7 +60,14 @@ SET search_path TO public; SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \gset -\if :server_version_ge_16 +SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 +\gset +\if :server_version_ge_17 +-- PG17 renamed colliculocale to colllocale +-- Relevant PG commit: +-- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d +SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN datlocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset +\elif :server_version_ge_16 -- In PG16, read-only server settings lc_collate and lc_ctype are removed -- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982 SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out index 2de95266b33..e6b5ac9a9a9 100644 --- a/src/test/regress/expected/multi_schema_support.out +++ b/src/test/regress/expected/multi_schema_support.out @@ -350,7 +350,14 @@ SET search_path TO public; SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \gset -\if :server_version_ge_16 +SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 +\gset +\if :server_version_ge_17 +-- PG17 renamed colliculocale to colllocale +-- Relevant PG commit: +-- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d +SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN datlocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset +\elif :server_version_ge_16 -- In PG16, read-only server settings lc_collate and lc_ctype are removed -- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982 SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index 0f31f2354eb..f9b65d3f11c 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -565,13 +565,15 @@ CREATE OR REPLACE FUNCTION check_database_on_all_nodes(p_database_name text) RETURNS TABLE (node_type text, result text) AS $func$ DECLARE - pg_ge_15_options text := ''; + pg_ge_15_17_options text := ''; pg_ge_16_options text := ''; BEGIN - IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'datlocprovider') THEN - pg_ge_15_options := ', daticulocale, datcollversion, datlocprovider'; + IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'daticulocale') THEN + pg_ge_15_17_options := ', daticulocale, datcollversion, datlocprovider'; + ELSIF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'datlocale') THEN + pg_ge_15_17_options := ', datlocale as daticulocale, datcollversion, datlocprovider'; ELSE - pg_ge_15_options := $$, null as daticulocale, null as datcollversion, 'c' as datlocprovider$$; + pg_ge_15_17_options := $$, null as daticulocale, null as datcollversion, 'c' as datlocprovider$$; END IF; IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'daticurules') THEN @@ -601,7 +603,7 @@ BEGIN pg_encoding_to_char(pd.encoding) as encoding, datistemplate, datallowconn, datconnlimit, datacl, pt.spcname AS tablespace, datcollate, datctype - %2$s -- >= pg15 options + %2$s -- >= pg15 & pg17 options %3$s -- >= pg16 options FROM pg_database pd JOIN pg_authid pa ON pd.datdba = pa.oid @@ -620,7 +622,7 @@ BEGIN ) AS stale_pg_dist_object_record_for_a_db_exists ) q $$, - p_database_name, pg_ge_15_options, pg_ge_16_options + p_database_name, pg_ge_15_17_options, pg_ge_16_options ) ) q2 JOIN pg_dist_node USING (nodeid); diff --git a/src/test/regress/expected/pg15.out b/src/test/regress/expected/pg15.out index eff8b0ce662..28e746a9169 100644 --- a/src/test/regress/expected/pg15.out +++ b/src/test/regress/expected/pg15.out @@ -51,9 +51,32 @@ SELECT result FROM run_command_on_all_nodes(' (3 rows) -SELECT result FROM run_command_on_all_nodes(' - SELECT colliculocale FROM pg_collation WHERE collname = ''german_phonebook_test''; -'); +-- PG17 renamed colliculocale to colllocale +-- Relevant PG commit: +-- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 +\gset +\if :server_version_ge_17 +SELECT '$$' || + 'SELECT colllocale FROM pg_collation WHERE collname = ''german_phonebook_test'';' + || '$$' + AS worker_query_1 \gset +SELECT '$$' || + 'SELECT colllocale FROM pg_collation WHERE collname = ''default_provider'';' + || '$$' + AS worker_query_2 \gset +\else +SELECT '$$' || + 'SELECT colliculocale FROM pg_collation WHERE collname = ''german_phonebook_test'';' + || '$$' + AS worker_query_1 \gset +SELECT '$$' || + 'SELECT colliculocale FROM pg_collation WHERE collname = ''default_provider'';' + || '$$' + AS worker_query_2 \gset +\endif +SELECT result FROM run_command_on_all_nodes(:worker_query_1); result --------------------------------------------------------------------- de-u-co-phonebk @@ -83,9 +106,7 @@ SELECT result FROM run_command_on_all_nodes(' POSIX (3 rows) -SELECT result FROM run_command_on_all_nodes(' - SELECT colliculocale FROM pg_collation WHERE collname = ''default_provider''; -'); +SELECT result FROM run_command_on_all_nodes(:worker_query_2); result --------------------------------------------------------------------- diff --git a/src/test/regress/expected/pg16.out b/src/test/regress/expected/pg16.out index a035fcfc4a2..f851a6dfba2 100644 --- a/src/test/regress/expected/pg16.out +++ b/src/test/regress/expected/pg16.out @@ -330,14 +330,14 @@ SELECT create_distributed_table('test_collation_rules', 'a'); (1 row) INSERT INTO test_collation_rules VALUES ('Abernathy'), ('apple'), ('bird'), ('Boston'), ('Graham'), ('green'); -SELECT collname, collprovider, colliculocale, collicurules +SELECT collname, collprovider, collicurules FROM pg_collation WHERE collname like '%_rule%' ORDER BY 1; - collname | collprovider | colliculocale | collicurules + collname | collprovider | collicurules --------------------------------------------------------------------- - default_rule | i | und | - special_rule | i | und | &a < g + default_rule | i | + special_rule | i | &a < g (2 rows) SELECT * FROM test_collation_rules ORDER BY a COLLATE default_rule; @@ -364,14 +364,14 @@ SELECT * FROM test_collation_rules ORDER BY a COLLATE special_rule; \c - - - :worker_1_port SET search_path TO pg16; -SELECT collname, collprovider, colliculocale, collicurules +SELECT collname, collprovider, collicurules FROM pg_collation WHERE collname like '%_rule%' ORDER BY 1; - collname | collprovider | colliculocale | collicurules + collname | collprovider | collicurules --------------------------------------------------------------------- - default_rule | i | und | - special_rule | i | und | &a < g + default_rule | i | + special_rule | i | &a < g (2 rows) SELECT * FROM test_collation_rules ORDER BY a COLLATE default_rule; diff --git a/src/test/regress/sql/multi_mx_create_table.sql b/src/test/regress/sql/multi_mx_create_table.sql index 4fb6eadbbc5..1a267b30139 100644 --- a/src/test/regress/sql/multi_mx_create_table.sql +++ b/src/test/regress/sql/multi_mx_create_table.sql @@ -61,8 +61,15 @@ SET search_path TO public; SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \gset +SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 +\gset -\if :server_version_ge_16 +\if :server_version_ge_17 +-- PG17 renamed colliculocale to colllocale +-- Relevant PG commit: +-- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d +SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN datlocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset +\elif :server_version_ge_16 -- In PG16, read-only server settings lc_collate and lc_ctype are removed -- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982 SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset diff --git a/src/test/regress/sql/multi_schema_support.sql b/src/test/regress/sql/multi_schema_support.sql index 146cf78d428..13be94857ac 100644 --- a/src/test/regress/sql/multi_schema_support.sql +++ b/src/test/regress/sql/multi_schema_support.sql @@ -297,8 +297,15 @@ SET search_path TO public; SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 \gset +SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 +\gset -\if :server_version_ge_16 +\if :server_version_ge_17 +-- PG17 renamed colliculocale to colllocale +-- Relevant PG commit: +-- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d +SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN datlocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset +\elif :server_version_ge_16 -- In PG16, read-only server settings lc_collate and lc_ctype are removed -- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982 SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset diff --git a/src/test/regress/sql/multi_test_helpers.sql b/src/test/regress/sql/multi_test_helpers.sql index 7d218361ce7..bd4a40b1ea4 100644 --- a/src/test/regress/sql/multi_test_helpers.sql +++ b/src/test/regress/sql/multi_test_helpers.sql @@ -591,13 +591,15 @@ CREATE OR REPLACE FUNCTION check_database_on_all_nodes(p_database_name text) RETURNS TABLE (node_type text, result text) AS $func$ DECLARE - pg_ge_15_options text := ''; + pg_ge_15_17_options text := ''; pg_ge_16_options text := ''; BEGIN - IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'datlocprovider') THEN - pg_ge_15_options := ', daticulocale, datcollversion, datlocprovider'; + IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'daticulocale') THEN + pg_ge_15_17_options := ', daticulocale, datcollversion, datlocprovider'; + ELSIF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'datlocale') THEN + pg_ge_15_17_options := ', datlocale as daticulocale, datcollversion, datlocprovider'; ELSE - pg_ge_15_options := $$, null as daticulocale, null as datcollversion, 'c' as datlocprovider$$; + pg_ge_15_17_options := $$, null as daticulocale, null as datcollversion, 'c' as datlocprovider$$; END IF; IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'daticurules') THEN @@ -627,7 +629,7 @@ BEGIN pg_encoding_to_char(pd.encoding) as encoding, datistemplate, datallowconn, datconnlimit, datacl, pt.spcname AS tablespace, datcollate, datctype - %2$s -- >= pg15 options + %2$s -- >= pg15 & pg17 options %3$s -- >= pg16 options FROM pg_database pd JOIN pg_authid pa ON pd.datdba = pa.oid @@ -646,7 +648,7 @@ BEGIN ) AS stale_pg_dist_object_record_for_a_db_exists ) q $$, - p_database_name, pg_ge_15_options, pg_ge_16_options + p_database_name, pg_ge_15_17_options, pg_ge_16_options ) ) q2 JOIN pg_dist_node USING (nodeid); diff --git a/src/test/regress/sql/pg15.sql b/src/test/regress/sql/pg15.sql index cd9dab58c59..3773151fdf0 100644 --- a/src/test/regress/sql/pg15.sql +++ b/src/test/regress/sql/pg15.sql @@ -41,9 +41,36 @@ SELECT result FROM run_command_on_all_nodes(' SELECT result FROM run_command_on_all_nodes(' SELECT collctype FROM pg_collation WHERE collname = ''german_phonebook_test''; '); -SELECT result FROM run_command_on_all_nodes(' - SELECT colliculocale FROM pg_collation WHERE collname = ''german_phonebook_test''; -'); + +-- PG17 renamed colliculocale to colllocale +-- Relevant PG commit: +-- https://github.com/postgres/postgres/commit/f696c0cd5f299f1b51e214efc55a22a782cc175d + +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17 +\gset + +\if :server_version_ge_17 +SELECT '$$' || + 'SELECT colllocale FROM pg_collation WHERE collname = ''german_phonebook_test'';' + || '$$' + AS worker_query_1 \gset +SELECT '$$' || + 'SELECT colllocale FROM pg_collation WHERE collname = ''default_provider'';' + || '$$' + AS worker_query_2 \gset +\else +SELECT '$$' || + 'SELECT colliculocale FROM pg_collation WHERE collname = ''german_phonebook_test'';' + || '$$' + AS worker_query_1 \gset +SELECT '$$' || + 'SELECT colliculocale FROM pg_collation WHERE collname = ''default_provider'';' + || '$$' + AS worker_query_2 \gset +\endif + +SELECT result FROM run_command_on_all_nodes(:worker_query_1); -- with non-icu provider, colliculocale will be null, collcollate and collctype will be set CREATE COLLATION default_provider (provider = libc, lc_collate = "POSIX", lc_ctype = "POSIX"); @@ -54,9 +81,7 @@ SELECT result FROM run_command_on_all_nodes(' SELECT result FROM run_command_on_all_nodes(' SELECT collctype FROM pg_collation WHERE collname = ''default_provider''; '); -SELECT result FROM run_command_on_all_nodes(' - SELECT colliculocale FROM pg_collation WHERE collname = ''default_provider''; -'); +SELECT result FROM run_command_on_all_nodes(:worker_query_2); -- -- In PG15, Renaming triggers on partitioned tables had two problems diff --git a/src/test/regress/sql/pg16.sql b/src/test/regress/sql/pg16.sql index 99024edcba8..6b5cc4cf102 100644 --- a/src/test/regress/sql/pg16.sql +++ b/src/test/regress/sql/pg16.sql @@ -159,7 +159,7 @@ CREATE TABLE test_collation_rules (a text); SELECT create_distributed_table('test_collation_rules', 'a'); INSERT INTO test_collation_rules VALUES ('Abernathy'), ('apple'), ('bird'), ('Boston'), ('Graham'), ('green'); -SELECT collname, collprovider, colliculocale, collicurules +SELECT collname, collprovider, collicurules FROM pg_collation WHERE collname like '%_rule%' ORDER BY 1; @@ -170,7 +170,7 @@ SELECT * FROM test_collation_rules ORDER BY a COLLATE special_rule; \c - - - :worker_1_port SET search_path TO pg16; -SELECT collname, collprovider, colliculocale, collicurules +SELECT collname, collprovider, collicurules FROM pg_collation WHERE collname like '%_rule%' ORDER BY 1;