diff --git a/.github/PULL_REQUEST_TEMPLATE b/.github/PULL_REQUEST_TEMPLATE new file mode 100644 index 0000000000..2874dbc9b8 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE @@ -0,0 +1,37 @@ +### Describe the changes in this pull request + + +### Describe if there are any user-facing changes + + +### How was this pull request tested? + + +### Does your PR have changes that can cause upgrade issues? +| Component | Breaking changes? | +| :----------------------------------------------: | :-----------: | +| MetaDB | Yes/No | +| Name registry json | Yes/No | +| Data File Descriptor Json | Yes/No | +| Export Snapshot Status Json | Yes/No | +| Import Data State | Yes/No | +| Export Status Json | Yes/No | +| Data .sql files of tables | Yes/No | +| Export and import data queue | Yes/No | +| Schema Dump | Yes/No | +| AssessmentDB | Yes/No | +| Sizing DB | Yes/No | +| Migration Assessment Report Json | Yes/No | +| Callhome Json | Yes/No | +| YugabyteD Tables | Yes/No | +| TargetDB Metadata Tables | Yes/No | diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 09d8cddc52..88c20428d5 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -8,7 +8,7 @@ on: jobs: - build: + build-and-test: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v3 @@ -26,7 +26,7 @@ jobs: - name: Test run: | cd yb-voyager - go test -v ./... + go test -v ./... -tags 'unit' - name: Vet run: | diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml new file mode 100644 index 0000000000..d94a594cb1 --- /dev/null +++ b/.github/workflows/integration-tests.yml @@ -0,0 +1,53 @@ +name: Go + +on: + push: + branches: ['main', '*.*-dev', '*.*.*-dev'] + pull_request: + branches: [main] + +env: + ORACLE_INSTANT_CLIENT_VERSION: "21.5.0.0.0-1" + +jobs: + integration-tests: + strategy: + fail-fast: false + + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: "1.23.1" + + - name: Build + run: | + cd yb-voyager + go build -v ./... + + # required by godror driver used in the tests + - name: Install Oracle Instant Clients + run: | + # Download and install the YB APT repository package + wget https://s3.us-west-2.amazonaws.com/downloads.yugabyte.com/repos/reporpms/yb-apt-repo_1.0.0_all.deb + sudo apt-get install -y ./yb-apt-repo_1.0.0_all.deb + sudo apt-get update -y + + # Install Oracle Instant Client packages using the defined version + sudo apt-get install -y oracle-instantclient-tools=${{ env.ORACLE_INSTANT_CLIENT_VERSION }} + sudo apt-get install -y oracle-instantclient-basic=${{ env.ORACLE_INSTANT_CLIENT_VERSION }} + sudo apt-get install -y oracle-instantclient-devel=${{ env.ORACLE_INSTANT_CLIENT_VERSION }} + sudo apt-get install -y oracle-instantclient-jdbc=${{ env.ORACLE_INSTANT_CLIENT_VERSION }} + sudo apt-get install -y oracle-instantclient-sqlplus=${{ env.ORACLE_INSTANT_CLIENT_VERSION }} + + # Clean up the YB APT repository package + sudo apt-get remove -y yb-apt-repo + rm -f yb-apt-repo_1.0.0_all.deb + + - name: Run Integration Tests + run: | + cd yb-voyager + go test -v ./... -tags 'integration' diff --git a/.github/workflows/issue-tests.yml b/.github/workflows/issue-tests.yml new file mode 100644 index 0000000000..550eef3c8a --- /dev/null +++ b/.github/workflows/issue-tests.yml @@ -0,0 +1,50 @@ +name: Go + +on: + push: + branches: ['main', '*.*-dev', '*.*.*-dev'] + pull_request: + branches: [main] + +jobs: + + test-issues-against-all-yb-versions: + strategy: + fail-fast: false + matrix: + version: [2.23.1.0-b220, 2024.1.3.1-b8, 2024.2.0.0-b145, 2.20.8.0-b53, 2.18.9.0-b17] + env: + YB_VERSION: ${{ matrix.version }} + YB_CONN_STR: "postgres://yugabyte:yugabyte@127.0.0.1:5433/yugabyte" + + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: "1.23.1" + + - name: Setup YugabyteDB + run: | + # using s3 release instead of docker image to allow testing against un-released versions + wget https://s3.us-west-2.amazonaws.com/releases.yugabyte.com/${YB_VERSION}/yugabyte-${YB_VERSION}-centos-x86_64.tar.gz + mkdir -p yugabyte-${YB_VERSION} + tar -xvzf yugabyte-${YB_VERSION}-centos-x86_64.tar.gz -C yugabyte-${YB_VERSION} --strip-components=1 + yugabyte-${YB_VERSION}/bin/yugabyted start --advertise_address 127.0.0.1 + sleep 20 + + - name: Test YugabyteDB connection + run: | + psql "${YB_CONN_STR}" -c "SELECT version();" + + - name: Build + run: | + cd yb-voyager + go build -v ./... + + - name: Test Issues Against YB Version + run: | + cd yb-voyager + go test -v ./... -tags 'issues_integration' diff --git a/.github/workflows/misc-migtests.yml b/.github/workflows/misc-migtests.yml index 4a36f89aef..3362028481 100644 --- a/.github/workflows/misc-migtests.yml +++ b/.github/workflows/misc-migtests.yml @@ -12,9 +12,9 @@ jobs: services: postgres: - image: postgres:13 + image: postgres:17 env: - POSTGRES_PASSWORD: secret + POSTGRES_PASSWORD: postgres # Set health checks to wait until postgres has started options: >- --health-cmd pg_isready @@ -55,27 +55,32 @@ jobs: sudo apt install -y libpq-dev sudo apt install python3-psycopg2 + #TODO Remove the install PG 17 command once we do that in installer script - name: Run installer script to setup voyager run: | cd installer_scripts yes | ./install-yb-voyager --install-from-local-source --only-pg-support + sudo apt-get -y install postgresql-17 + echo "/usr/lib/postgresql/17/bin" >> "$GITHUB_PATH" env: ON_INSTALLER_ERROR_OUTPUT_LOG: Y - name: Test PostgreSQL Connection run: | - psql "postgresql://postgres:secret@127.0.0.1:5432/postgres" -c "SELECT version();" + psql "postgresql://postgres:postgres@127.0.0.1:5432/postgres" -c "SELECT version();" - name: Create PostgreSQL user run: | ./migtests/scripts/postgresql/create_pg_user - name: "TEST: Assessment Report Test" - if: always() run: migtests/scripts/run-validate-assessment-report.sh pg/assessment-report-test + + - name: "TEST: Assessment Report Test (Schema list UQC)" + run: migtests/scripts/run-validate-assessment-report.sh pg/assessment-report-test-uqc - name: "TEST: analyze-schema" - if: always() + if: ${{ !cancelled() }} run: migtests/tests/analyze-schema/run-analyze-schema-test - name: Run import data file tests on different YugabyteDB versions @@ -85,15 +90,17 @@ jobs: GCS_REFRESH_TOKEN: ${{ secrets.PGUPTA_GCS_REFRESH_TOKEN }} AWS_ACCESS_KEY_ID: ${{ secrets.RAHULB_S3_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.RAHULB_S3_SECRET_ACCESS_KEY }} - if: always() + if: ${{ !cancelled() }} run: | - versions=("2.20.5.0-b72" "2.21.1.0-b271" "2024.1.1.0-b137") + versions=("2024.2.0.0-b145" "2.20.8.0-b53" "2024.1.3.1-b8" "2.23.1.0-b220") for version in "${versions[@]}"; do echo "Running tests on version $version" echo "Start YugabyteDB cluster" - docker pull yugabytedb/yugabyte:$version - VERSION=$version docker compose -f migtests/setup/yb-docker-compose.yaml up -d + docker run -d --name yugabytedb-$version \ + -p7000:7000 -p9000:9000 -p15433:15433 -p5433:5433 -p9042:9042 \ + yugabytedb/yugabyte:$version \ + bin/yugabyted start --background=false --ui=false sleep 20 echo "Test YugabyteDB connection" @@ -114,8 +121,8 @@ jobs: migtests/tests/import-file/run-import-file-test echo "Stop the cluster before the next iteration" - VERSION=$version docker compose -f migtests/setup/yb-docker-compose.yaml down --volumes - docker network prune -f + docker stop yugabytedb-$version + docker remove yugabytedb-$version done shell: bash diff --git a/.github/workflows/mysql-migtests.yml b/.github/workflows/mysql-migtests.yml index 7872bea0f5..1dfda76a62 100644 --- a/.github/workflows/mysql-migtests.yml +++ b/.github/workflows/mysql-migtests.yml @@ -10,7 +10,7 @@ jobs: run-mysql-migration-tests: strategy: matrix: - version: [2.21.1.0-b271, 2024.1.1.0-b137, 2.20.5.0-b72] + version: [2024.2.0.0-b145, 2.20.8.0-b53, 2024.1.3.1-b8, 2.23.1.0-b220] BETA_FAST_DATA_EXPORT: [0, 1] env: BETA_FAST_DATA_EXPORT: ${{ matrix.BETA_FAST_DATA_EXPORT }} @@ -30,6 +30,7 @@ jobs: key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} restore-keys: | ${{ runner.os }}-maven- + - name: Install python3 and psycopg2 run: | sudo apt install -y python3 @@ -59,8 +60,10 @@ jobs: - name: Start YugabyteDB cluster run: | - docker pull yugabytedb/yugabyte:${{ matrix.version }} - VERSION=${{ matrix.version }} docker compose -f migtests/setup/yb-docker-compose.yaml up -d + docker run -d --name yugabytedb \ + -p7000:7000 -p9000:9000 -p15433:15433 -p5433:5433 -p9042:9042 \ + yugabytedb/yugabyte:${{ matrix.version }} \ + bin/yugabyted start --background=false --ui=false sleep 20 - name: Test YugabyteDB connection @@ -77,66 +80,67 @@ jobs: psql "postgresql://yugabyte@yb-tserver-n1:5433/yugabyte" -c "SELECT version();" - name: "TEST: mysql-table-list-flags-test (table-list and exclude-table-list)" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh mysql/table-list-flags-tests - name: "TEST: mysql-table-list-file-path-test (table-list-file-path and exclude-table-list-file-path)" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh mysql/table-list-flags-tests env-file-path-flags.sh - name: "TEST: mysql-sakila" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh mysql/sakila - name: "TEST: mysql-datatypes" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh mysql/datatypes - name: "TEST: mysql-constraints" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh mysql/constraints - name: "TEST: mysql-case-indexes" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh mysql/indexes - name: "TEST: mysql-functions" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh mysql/functions - name: "TEST: mysql-case-sequences" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh mysql/sequences - name: "TEST: mysql-triggers-procedures" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh mysql/triggers-procedures - name: "TEST: mysql-case-views" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh mysql/views - name: "TEST: mysql-partitions" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh mysql/partitions - name: "TEST: mysql-sample-chinook" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh mysql/chinook - name: "TEST: mysql-misc-tests" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh mysql/misc-tests - name: "TEST: mysql-case-sensitivity-reserved-words" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh mysql/case-sensitivity-reserved-words # Placeholder for now so that a basic test can run - name: Create the live migration user + if: always() run: | mysql -uroot -proot -e 'GRANT SELECT, RELOAD, SHOW DATABASES, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'ybvoyager'@'127.0.0.1';' - name: "TEST: mysql-live-migration-test" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/live-migration-run-test.sh mysql/basic-live-test diff --git a/.github/workflows/pg-migtests.yml b/.github/workflows/pg-13-migtests.yml similarity index 71% rename from .github/workflows/pg-migtests.yml rename to .github/workflows/pg-13-migtests.yml index 6f5af1d821..d6b03ec165 100644 --- a/.github/workflows/pg-migtests.yml +++ b/.github/workflows/pg-13-migtests.yml @@ -7,11 +7,16 @@ on: branches: ['main'] jobs: - run-pg-migration-tests: + run-pg-13-migration-tests: strategy: matrix: - version: [2.21.1.0-b271, 2024.1.1.0-b137, 2.20.5.0-b72] + version: [2024.2.0.0-b145, 2.20.8.0-b53, 2024.1.3.1-b8, 2.23.1.0-b220] BETA_FAST_DATA_EXPORT: [0, 1] + test_group: + - offline + - live_basic + - live_advanced + env: BETA_FAST_DATA_EXPORT: ${{ matrix.BETA_FAST_DATA_EXPORT }} runs-on: ubuntu-22.04 @@ -19,7 +24,7 @@ jobs: postgres: image: postgres:13 env: - POSTGRES_PASSWORD: secret + POSTGRES_PASSWORD: postgres # Set health checks to wait until postgres has started options: >- --health-cmd pg_isready @@ -38,6 +43,7 @@ jobs: distribution: "temurin" java-version: "17" check-latest: true + - name: Cache local Maven repository uses: actions/cache@v3 with: @@ -45,12 +51,14 @@ jobs: key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} restore-keys: | ${{ runner.os }}-maven- - - name: "Enable postgres with wal_level as logical" + + - name: "Enable postgres with wal_level as logical and install postgis" run: | docker exec ${{ job.services.postgres.id }} sh -c "echo 'wal_level=logical' >> /var/lib/postgresql/data/postgresql.conf" + docker exec ${{ job.services.postgres.id }} sh -c "apt-get update && apt-get install -y postgresql-13-postgis postgresql-13-postgis-3" docker restart ${{ job.services.postgres.id }} sleep 10 - # if: matrix.BETA_FAST_DATA_EXPORT == 1 + - name: Install python3 and psycopg2 run: | sudo apt install -y python3 @@ -62,16 +70,18 @@ jobs: cd installer_scripts yes | ./install-yb-voyager --install-from-local-source --only-pg-support sudo rm /usr/bin/pg_dump - sudo ln -s /usr/lib/postgresql/16/bin/pg_dump /usr/bin/pg_dump + sudo ln -s /usr/lib/postgresql/17/bin/pg_dump /usr/bin/pg_dump sudo rm /usr/bin/pg_restore - sudo ln -s /usr/lib/postgresql/16/bin/pg_restore /usr/bin/pg_restore + sudo ln -s /usr/lib/postgresql/17/bin/pg_restore /usr/bin/pg_restore pg_dump --version + pg_restore --version + psql --version env: ON_INSTALLER_ERROR_OUTPUT_LOG: Y - name: Test PostgreSQL Connection run: | - psql "postgresql://postgres:secret@127.0.0.1:5432/postgres" -c "SELECT version();" + psql "postgresql://postgres:postgres@127.0.0.1:5432/postgres" -c "SELECT version();" - name: Create PostgreSQL user run: | @@ -79,8 +89,10 @@ jobs: - name: Start YugabyteDB cluster run: | - docker pull yugabytedb/yugabyte:${{ matrix.version }} - VERSION=${{ matrix.version }} docker compose -f migtests/setup/yb-docker-compose.yaml up -d + docker run -d --name yugabytedb \ + -p7000:7000 -p9000:9000 -p15433:15433 -p5433:5433 -p9042:9042 \ + yugabytedb/yugabyte:${{ matrix.version }} \ + bin/yugabyted start --background=false --ui=false sleep 20 - name: Test YugabyteDB connection @@ -98,120 +110,108 @@ jobs: psql "postgresql://yugabyte@yb-tserver-n1:5433/yugabyte" -c "SELECT version();" - name: "TEST: pg-table-list-flags-test (table-list and exclude-table-list)" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/table-list-flags-tests - name: "TEST: pg-table-list-file-path-test (table-list-file-path and exclude-table-list-file-path)" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/table-list-flags-tests env-file-path-flags.sh - name: "TEST: pg-case-sensitivity-single-table" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test-export-data.sh pg/case-sensitivity-single-table - - name: "TEST: pg-dvdrental" - if: always() - run: migtests/scripts/run-test.sh pg/dvdrental - - name: "TEST: pg-datatypes" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/datatypes - name: "TEST: pg-constraints" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/constraints - name: "TEST: pg-sequences" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/sequences - name: "TEST: pg-indexes" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/indexes - name: "TEST: pg-partitions" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/partitions - name: "TEST: pg-partitions with (table-list)" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: EXPORT_TABLE_LIST='customers,sales,emp,p2.boston,p2.london,p2.sydney,range_columns_partition_test,sales_region,test_partitions_sequences' migtests/scripts/run-test.sh pg/partitions # Broken for v2.15 and v2.16: https://github.com/yugabyte/yugabyte-db/issues/14529 # Fixed in 2.17.1.0-b368 - name: "TEST: pg-partitions-with-indexes" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/partitions-with-indexes - name: "TEST: pg-views-and-rules" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/views-and-rules - name: "TEST: pg-misc-objects-1 (Types, case-sensitive-table-name, Domain)" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/misc-objects-1 - name: "TEST: pg-misc-objects-2 (Aggregates, Procedures, triggers, functions, extensions, inline comments)" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/misc-objects-2 - name: "TEST: pg-dependent-ddls" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/dependent-ddls - name: "TEST: pg-multiple-schemas" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/multiple-schemas - - name: "Set up gcp environment" - env: - GCS_CLIENT_ID: ${{ secrets.PGUPTA_GCS_CLIENT_ID }} - GCS_CLIENT_SECRET: ${{ secrets.PGUPTA_GCS_CLIENT_SECRET }} - GCS_REFRESH_TOKEN: ${{ secrets.PGUPTA_GCS_REFRESH_TOKEN }} - if: always() - run: migtests/scripts/gcs/create_gcs_credentials_file - - name: "TEST: pg-codependent-schemas" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/codependent-schemas - name: "TEST: pg-sample-schema-emp" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/sample-employee - name: "TEST: pg-hasura-ecommerce" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'offline' }} run: migtests/scripts/run-test.sh pg/hasura-ecommerce + - name: "TEST: pg-case-sensitivity-reserved-words-offline" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/case-sensitivity-reserved-words + - name: "TEST: pg-basic-non-public-live-migration-test" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'live_basic' }} run: migtests/scripts/live-migration-run-test.sh pg/basic-non-public-live-test - - - name: "TEST: pg-unique-key-conflicts-test" - if: always() - run: migtests/scripts/live-migration-fallf-run-test.sh pg/unique-key-conflicts-test - - # case sensitive table names are not yet supported in live migration, to restricting test only to a few tables. - - name: "TEST: pg-live-migration-multiple-schemas" - if: always() - run: EXPORT_TABLE_LIST="ext_test,tt,audit,recipients,session_log,schema2.ext_test,schema2.tt,schema2.audit,schema2.recipients,schema2.session_log" migtests/scripts/live-migration-run-test.sh pg/multiple-schemas - name: "TEST: pg-basic-public-fall-forward-test" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'live_basic' }} run: migtests/scripts/live-migration-fallf-run-test.sh pg/basic-public-live-test # - name: "TEST: pg-basic-non-public-fall-back-test" - # if: always() # run: migtests/scripts/live-migration-fallb-run-test.sh pg/basic-non-public-live-test - name: "TEST: pg-datatypes-fall-back-test" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'live_basic' }} run: migtests/scripts/live-migration-fallb-run-test.sh pg/datatypes + # case sensitive table names are not yet supported in live migration, to restricting test only to a few tables. + - name: "TEST: pg-live-migration-multiple-schemas" + if: ${{ !cancelled() && matrix.test_group == 'live_advanced' }} + run: EXPORT_TABLE_LIST="ext_test,tt,audit,recipients,session_log,schema2.ext_test,schema2.tt,schema2.audit,schema2.recipients,schema2.session_log" migtests/scripts/live-migration-run-test.sh pg/multiple-schemas + + - name: "TEST: pg-unique-key-conflicts-test" + if: ${{ !cancelled() && matrix.test_group == 'live_advanced' }} + run: migtests/scripts/live-migration-fallf-run-test.sh pg/unique-key-conflicts-test + - name: "TEST: pg-live-migration-partitions-fall-forward" - if: always() + if: ${{ !cancelled() && matrix.test_group == 'live_advanced' }} run: migtests/scripts/live-migration-fallf-run-test.sh pg/partitions - - name: "TEST: pg-case-sensitivity-reserved-words-offline" - if: always() - run: migtests/scripts/run-test.sh pg/case-sensitivity-reserved-words diff --git a/.github/workflows/pg-17-migtests.yml b/.github/workflows/pg-17-migtests.yml new file mode 100644 index 0000000000..27aacc0c68 --- /dev/null +++ b/.github/workflows/pg-17-migtests.yml @@ -0,0 +1,249 @@ +name: "PG 17: Migration Tests" + +on: + push: + branches: ['main', '*.*-dev', '*.*.*-dev'] + pull_request: + branches: ['main'] + +jobs: + run-pg-17-migration-tests: + strategy: + matrix: + version: [2024.2.0.0-b145, 2.20.8.0-b53, 2024.1.3.1-b8, 2.23.1.0-b220] + BETA_FAST_DATA_EXPORT: [0, 1] + test_group: + - offline + - live_basic + - live_advanced + + env: + BETA_FAST_DATA_EXPORT: ${{ matrix.BETA_FAST_DATA_EXPORT }} + runs-on: ubuntu-22.04 + services: + postgres: + image: postgres:17 + env: + POSTGRES_PASSWORD: postgres + # Set health checks to wait until postgres has started + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + # Maps tcp port 5432 on service container to the host + - 5432:5432 + + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-java@v3 + # https://github.com/actions/setup-java + with: + distribution: "temurin" + java-version: "17" + check-latest: true + + - name: Cache local Maven repository + uses: actions/cache@v3 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven- + + - name: "Enable postgres with wal_level as logical and install postgis" + run: | + docker exec ${{ job.services.postgres.id }} sh -c "echo 'wal_level=logical' >> /var/lib/postgresql/data/postgresql.conf" + docker exec ${{ job.services.postgres.id }} sh -c "apt-get update && apt-get install -y postgresql-17-postgis postgresql-17-postgis-3" + docker restart ${{ job.services.postgres.id }} + sleep 10 + + - name: Install python3 and psycopg2 + run: | + sudo apt install -y python3 + sudo apt install -y libpq-dev + sudo apt install python3-psycopg2 + + - name: Run installer script to setup voyager + run: | + cd installer_scripts + yes | ./install-yb-voyager --install-from-local-source --only-pg-support + sudo rm /usr/bin/pg_dump + sudo ln -s /usr/lib/postgresql/17/bin/pg_dump /usr/bin/pg_dump + sudo rm /usr/bin/pg_restore + sudo ln -s /usr/lib/postgresql/17/bin/pg_restore /usr/bin/pg_restore + pg_dump --version + pg_restore --version + psql --version + env: + ON_INSTALLER_ERROR_OUTPUT_LOG: Y + + - name: Test PostgreSQL Connection + run: | + psql "postgresql://postgres:postgres@127.0.0.1:5432/postgres" -c "SELECT version();" + + - name: Create PostgreSQL user + run: | + ./migtests/scripts/postgresql/create_pg_user + + - name: Start YugabyteDB cluster + run: | + docker run -d --name yugabytedb \ + -p7000:7000 -p9000:9000 -p15433:15433 -p5433:5433 -p9042:9042 \ + yugabytedb/yugabyte:${{ matrix.version }} \ + bin/yugabyted start --background=false --ui=false + sleep 20 + + - name: Test YugabyteDB connection + run: | + psql "postgresql://yugabyte:@127.0.0.1:5433/yugabyte" -c "SELECT version();" + + - name: Create YugabyteDB user + run: | + ./migtests/scripts/yugabytedb/create_yb_user + + - name: Enable yb-tserver-n1 and yb-master-n1 name resolution + run: | + echo "127.0.0.1 yb-tserver-n1" | sudo tee -a /etc/hosts + echo "127.0.0.1 yb-master-n1" | sudo tee -a /etc/hosts + psql "postgresql://yugabyte@yb-tserver-n1:5433/yugabyte" -c "SELECT version();" + + - name: "TEST: PG sample schemas (sakila)" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-schema-migration.sh pg/sakila + + - name: "TEST: PG sample schemas (osm)" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-schema-migration.sh pg/osm + + - name: "TEST: PG sample schemas (omnibus)" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-schema-migration.sh pg/omnibus + + - name: "TEST: PG sample schemas (adventureworks)" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-schema-migration.sh pg/adventureworks + + - name: "TEST: PG sample schemas (sample-is)" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-schema-migration.sh pg/sample-is + + - name: "TEST: PG sample schemas (pgtbrus)" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-schema-migration.sh pg/pgtbrus + + - name: "TEST: PG sample schemas (stackexchange)" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-schema-migration.sh pg/stackexchange + + - name: "TEST: pg-table-list-flags-test (table-list and exclude-table-list)" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/table-list-flags-tests + + - name: "TEST: pg-table-list-file-path-test (table-list-file-path and exclude-table-list-file-path)" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/table-list-flags-tests env-file-path-flags.sh + + - name: "TEST: pg-case-sensitivity-single-table" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test-export-data.sh pg/case-sensitivity-single-table + + - name: "TEST: pg-dvdrental" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/dvdrental + + - name: "TEST: pg-datatypes" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/datatypes + + - name: "TEST: pg-constraints" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/constraints + + - name: "TEST: pg-sequences" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/sequences + + - name: "TEST: pg-indexes" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/indexes + + - name: "TEST: pg-partitions" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/partitions + + - name: "TEST: pg-partitions with (table-list)" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: EXPORT_TABLE_LIST='customers,sales,emp,p2.boston,p2.london,p2.sydney,range_columns_partition_test,sales_region,test_partitions_sequences' migtests/scripts/run-test.sh pg/partitions + + # Broken for v2.15 and v2.16: https://github.com/yugabyte/yugabyte-db/issues/14529 + # Fixed in 2.17.1.0-b368 + - name: "TEST: pg-partitions-with-indexes" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/partitions-with-indexes + + - name: "TEST: pg-views-and-rules" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/views-and-rules + + - name: "TEST: pg-misc-objects-1 (Types, case-sensitive-table-name, Domain)" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/misc-objects-1 + + - name: "TEST: pg-misc-objects-2 (Aggregates, Procedures, triggers, functions, extensions, inline comments)" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/misc-objects-2 + + - name: "TEST: pg-dependent-ddls" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/dependent-ddls + + - name: "TEST: pg-multiple-schemas" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/multiple-schemas + + - name: "TEST: pg-codependent-schemas" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/codependent-schemas + + - name: "TEST: pg-sample-schema-emp" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/sample-employee + + - name: "TEST: pg-hasura-ecommerce" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/hasura-ecommerce + + - name: "TEST: pg-case-sensitivity-reserved-words-offline" + if: ${{ !cancelled() && matrix.test_group == 'offline' }} + run: migtests/scripts/run-test.sh pg/case-sensitivity-reserved-words + + - name: "TEST: pg-basic-non-public-live-migration-test" + if: ${{ !cancelled() && matrix.test_group == 'live_basic' }} + run: migtests/scripts/live-migration-run-test.sh pg/basic-non-public-live-test + + - name: "TEST: pg-basic-public-fall-forward-test" + if: ${{ !cancelled() && matrix.test_group == 'live_basic' }} + run: migtests/scripts/live-migration-fallf-run-test.sh pg/basic-public-live-test + + # - name: "TEST: pg-basic-non-public-fall-back-test" + # run: migtests/scripts/live-migration-fallb-run-test.sh pg/basic-non-public-live-test + + - name: "TEST: pg-datatypes-fall-back-test" + if: ${{ !cancelled() && matrix.test_group == 'live_basic' }} + run: migtests/scripts/live-migration-fallb-run-test.sh pg/datatypes + + # case sensitive table names are not yet supported in live migration, to restricting test only to a few tables. + - name: "TEST: pg-live-migration-multiple-schemas" + if: ${{ !cancelled() && matrix.test_group == 'live_advanced' }} + run: EXPORT_TABLE_LIST="ext_test,tt,audit,recipients,session_log,schema2.ext_test,schema2.tt,schema2.audit,schema2.recipients,schema2.session_log" migtests/scripts/live-migration-run-test.sh pg/multiple-schemas + + - name: "TEST: pg-unique-key-conflicts-test" + if: ${{ !cancelled() && matrix.test_group == 'live_advanced' }} + run: migtests/scripts/live-migration-fallf-run-test.sh pg/unique-key-conflicts-test + + - name: "TEST: pg-live-migration-partitions-fall-forward" + if: ${{ !cancelled() && matrix.test_group == 'live_advanced' }} + run: migtests/scripts/live-migration-fallf-run-test.sh pg/partitions + diff --git a/.github/workflows/pg-9-migtests.yml b/.github/workflows/pg-9-migtests.yml index 622ba4134a..8fa3251d1b 100644 --- a/.github/workflows/pg-9-migtests.yml +++ b/.github/workflows/pg-9-migtests.yml @@ -7,10 +7,10 @@ on: branches: ['main'] jobs: - run-pg-migration-tests: + run-pg-9-migration-tests: strategy: matrix: - version: [2.21.1.0-b271] + version: [2024.2.0.0-b145] BETA_FAST_DATA_EXPORT: [0, 1] env: BETA_FAST_DATA_EXPORT: ${{ matrix.BETA_FAST_DATA_EXPORT }} @@ -19,7 +19,7 @@ jobs: postgres: image: postgres:9 env: - POSTGRES_PASSWORD: secret + POSTGRES_PASSWORD: postgres # Set health checks to wait until postgres has started options: >- --health-cmd pg_isready @@ -45,6 +45,7 @@ jobs: key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} restore-keys: | ${{ runner.os }}-maven- + - name: "Enable postgres with wal_level as logical" run: | docker exec ${{ job.services.postgres.id }} sh -c "echo 'wal_level=logical' >> /var/lib/postgresql/data/postgresql.conf" @@ -62,16 +63,18 @@ jobs: cd installer_scripts yes | ./install-yb-voyager --install-from-local-source --only-pg-support sudo rm /usr/bin/pg_dump - sudo ln -s /usr/lib/postgresql/16/bin/pg_dump /usr/bin/pg_dump + sudo ln -s /usr/lib/postgresql/17/bin/pg_dump /usr/bin/pg_dump sudo rm /usr/bin/pg_restore - sudo ln -s /usr/lib/postgresql/16/bin/pg_restore /usr/bin/pg_restore + sudo ln -s /usr/lib/postgresql/17/bin/pg_restore /usr/bin/pg_restore pg_dump --version + pg_restore --version + psql --version env: ON_INSTALLER_ERROR_OUTPUT_LOG: Y - name: Test PostgreSQL Connection run: | - psql "postgresql://postgres:secret@127.0.0.1:5432/postgres" -c "SELECT version();" + psql "postgresql://postgres:postgres@127.0.0.1:5432/postgres" -c "SELECT version();" - name: Create PostgreSQL user run: | @@ -79,8 +82,10 @@ jobs: - name: Start YugabyteDB cluster run: | - docker pull yugabytedb/yugabyte:${{ matrix.version }} - VERSION=${{ matrix.version }} docker compose -f migtests/setup/yb-docker-compose.yaml up -d + docker run -d --name yugabytedb \ + -p7000:7000 -p9000:9000 -p15433:15433 -p5433:5433 -p9042:9042 \ + yugabytedb/yugabyte:${{ matrix.version }} \ + bin/yugabyted start --background=false --ui=false sleep 20 - name: Test YugabyteDB connection @@ -98,16 +103,13 @@ jobs: psql "postgresql://yugabyte@yb-tserver-n1:5433/yugabyte" -c "SELECT version();" - name: "TEST: pg-case-sensitivity-single-table" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test-export-data.sh pg/case-sensitivity-single-table - name: "TEST: pg-datatypes" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh pg/datatypes - name: "TEST: pg-constraints" - if: always() + if: ${{ !cancelled() }} run: migtests/scripts/run-test.sh pg/constraints - - - diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 11b98494e0..dccd04631e 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -4,6 +4,126 @@ Included here are the release notes for the [YugabyteDB Voyager](https://docs.yugabyte.com/preview/migrate/) v1 release series. Content will be added as new notable features and changes are available in the patch releases of the YugabyteDB v1 series. +## v1.8.9 - January 14, 2025 + +### New Features +- Implemented a new algorithm for migration complexity determination that accounts for all potential issues, including unsupported query constructs, PL/pgSQL objects, and incompatible data types. +- Introduced migration complexity explanations in the PostgreSQL assessment report, summarising high-impact issues and illustrating how the overall complexity level is determined. + +### Enhancements +- Enhanced Assessment and Schema Analysis reports to detect unsupported PostgreSQL features from PG 12 up to PG 17, including: + - Regexp functions (`regexp_count`, `regexp_instr`, `regexp_like`) + - Security Invoker Views + - JSON **constructor** and JSON **Query functions** + - IS_JSON predicate clauses(`IS_JSON`, `IS JSON SCALAR`, `IS JSON OBJECT`, `IS JSON ARRAY`) + - Aggregate functions like `anyvalue`, `range_agg`, `range_intersect_agg` + - COPY command syntax such as `COPY FROM ... WHERE` and `COPY ... ON_ERROR` + - **Multirange datatypes** like `int4multirange`, `int8multirange`, `datemultirange` etc.. + - `FETCH FIRST … WITH TIES` subclause in `SELECT` statement + - Foreign Key referencing a partitioned table + - JSONB Subscripting in DML, DDL or PL/PGSQL + - `UNIQUE NULLS NOT DISTINCT` in `CREATE/ALTER TABLE` statement + - The **deterministic** attribute in `CREATE COLLATION` + - `MERGE` statements + +### Bug Fixes +- Fixed an [issue](https://github.com/yugabyte/yb-voyager/issues/2034) where import data failed for tables whose datafile paths exceeded 250 characters. The fix is backward compatible, allowing migrations started with older voyager version to continue seamlessly. +- Fixed an issue where logic for detecting unsupported PostgreSQL versions was giving false positives. + + +## v1.8.8 - December 24, 2024 + +### Enhancements + +- Assessment and Schema Analysis Reports + - You can now specify the target version of YugabyteDB when running `assess-migration` and `analyze-schema`. Specify the version using the flag `--target-db-version`. The default is the latest stable release (currently 2024.2.0.0). + - Assessment and schema analysis now detect and report the presence of advisory locks, XML functions, and system columns in DDLs. + - Assessment and schema analysis now detect the presence of large objects (and their functions) in DDLs/DMLs. + - In the Schema analysis report (html/text), changed the following field names to improve readability: Invalid Count to Objects with Issues; Total Count to Total Objects; and Valid Count to Objects without Issues. The logic determining when an object is considered to have issues or not has also been improved. + - Stop reporting Unlogged tables as an issue in assessment and schema analysis reports by default, as UNLOGGED no longer results in a syntax error in YugabyteDB v2024.2.0.0. + - Stop reporting ALTER PARTITIONED TABLE ADD PRIMARY KEY as an issue in assessment and schema analysis reports, as the issue has been fixed in YugabyteDB v2024.1.0.0 and later. + - In the assessment report, only statements from `pg_stat_statements` that belong to the schemas provided by the user will be processed for detecting and reporting issues. + +- Data Migration + - `import data file` and `import data to source replica` now accept a new flag `truncate-tables` (in addition to `import data`), which, when used with `start-clean true`, truncates all the tables in the target/source-replica database before importing data into the tables. + +- Miscellaneous + - Enhanced guardrail checks in `import-schema` for YugabyteDB Aeon. + + +### Bug Fixes +- Skip Unsupported Query Constructs detection if `pg_stat_statements` is not loaded via `shared_preloaded_libraries`. +- Prevent Voyager from panicking/erroring out in case of `analyze-schema` and `import data` when `export-dir` is empty. + + +## v1.8.7 - December 10, 2024 + +### New Features + +- Introduced a framework in the `assess-migration` and `analyze-schema` commands to accept the target database version (`--target-db-version` flag) as input and use it for reporting issues not supported in that target version for the source schema. + +### Enhancements + +- Improved permission grant script (`yb-voyager-pg-grant-migration-permissions.sql`) by internally detecting table owners, eliminating the need to specify the `original_owner_of_tables` flag. +- Enhanced reporting of **Unsupported Query Constructs** in the `assess-migration` command by filtering queries to include only those that match user-specified schemas, provided schema information is present in the query. +- Enhanced the `assess-migration` and `analyze-schema` commands to report issues in Functions or Procedures for variables declared with reference types (%TYPE) in the **Unsupported PL/pgSQL Objects** section. +- Added support to report DDL issues present in the PL/pgSQL blocks of objects listed in the **Unsupported PL/pgSQL Objects** section of the `assess-migration` and `analyze-schema` commands. +- Allow yb-voyager upgrades during migration from the recent breaking release (v1.8.5) to later versions. +- Modified the internal HTTP port to dynamically use an available free port instead of defaulting to 8080, avoiding conflicts with commonly used services. +- Added a guardrail check to the `assess-migration` command to verify that the `pg_stat_statements` extension is properly loaded in the source database. + +### Bug fixes + +- Fixed an [issue](https://github.com/yugabyte/yb-voyager/issues/1895) where NOT VALID constraints in the schema could cause constraint violation errors during data imports; these constraints are now created during the post-snapshot import phase. +- Fixed formatting issues in the assessment HTML report, where extra spaces or characters appeared after the **Unsupported PL/pgSQL Objects** heading, depending on the browser used for viewing. +- Fixed an [issue](https://github.com/yugabyte/yb-voyager/issues/1913) of segmentation faults when certain commands are executed before migration initialization. + +## v1.8.6 - November 26, 2024 + +### New Features + +- Unsupported PL/pgSQL objects detection. Migration assessment and schema analysis commands can now detect and report SQL features and constructs in PL/pgSQL objects in the source schema that are not supported by YugabyteDB. This includes detecting advisory locks, system columns, and XML functions. Voyager reports individual queries in these objects that contain unsupported constructs, such as queries in PL/pgSQL blocks for functions and procedures, or select statements in views and materialized views. + +### Enhancements + +- Using the arguments `--table-list` and `--exclude-table-list` in guardrails now checks for PostgreSQL export to determine which tables require permission checks. +- Added a check for Java as a dependency in guardrails for PostgreSQL export during live migration. +- Added check to verify if [pg_stat_statements](https://docs.yugabyte.com/preview/explore/ysql-language-features/pg-extensions/extension-pgstatstatements/) is in a schema not included in the specified `schema_list` and if the migration user has access to queries in the pg_stat_statements view. This is part of the guardrails for assess-migration for PostgreSQL. +- Introduced the `--version` flag in the voyager installer script, which can be used to specify the version to install. +- Added argument [--truncate-tables](https://docs.yugabyte.com/preview/yugabyte-voyager/reference/data-migration/import-data/#arguments) to import data to target for truncating tables, applicable only when --start-clean is true. +- Added support in the assess-migration command to detect the `XMLTABLE()` function under unsupported query constructs. +- Added support for reporting unsupported indexes on some data types, such as daterange, int4range, int8range, tsrange, tstzrange, numrange, and interval, in analyze-schema and assess-migration. +- Added support for reporting unsupported primary and unique key constraints on various data types in assess-migration and analyze-schema. + +### Bug fixes + +- Fixed an [issue](https://github.com/yugabyte/yb-voyager/issues/1920) where export-data errors out if background metadata queries (count*) are still running after pg_dump completes. +- Fixed a bug where the assess-migration command fails when gathering metadata for unsupported query constructs if the pg_stat_statements extension was installed in a non-public schema. +- Fixed nil pointer exceptions and index-out-of-range issues when running export data status and get data-migration-report commands before export data is properly started. +- Fixed a bug in export data status command for accurate status reporting of partition tables during PostgreSQL data export. + +## v1.8.5 - November 12, 2024 + +### Enhancements + +- The guardrail checks to validate source/target database permissions, verify binary dependencies, and check database version compatibility for PostgreSQL in all voyager commands are now enabled by default. +- UI/UX improvements in the PostgreSQL permission grant script (`yb-voyager-pg-grant-migration-permissions.sql`) and new checks are added for replication slots, foreign keys, and triggers in PostgreSQL guardrails. +- Object names are scrollable in the analyze schema HTML report for improved navigation. +- Added constraint names and their corresponding table names when reporting unsupported features related to deferrable and exclusion constraints. +- Added reporting for the REFERENCING clause for triggers and BEFORE ROW triggers on partitioned tables in the analyze-schema and assess-migration reports. +- Added documentation links for unsupported query constructs in the assessment report. +- Standardized the format of data sent to the yugabyted control plane via the assess-migration command, ensuring consistent presentation across various sections of the report, such as Unsupported Features, Unsupported Datatypes, and Unsupported Query Constructs. + +### Bug fixes + +- Fixed the import-schema DDL parsing issue for functions and procedures, where extra spaces before the DDL caused it to be treated as normal DDL, preventing the PLPGSQL parsing logic from triggering. +- Fixed an issue which resulted in "token too long" errors in export-data-from-target when log level was set to DEBUG. + +### Known issues + +- The [assess-migration](https://docs.yugabyte.com/preview/yugabyte-voyager/reference/assess-migration/) command will fail if the [pg_stat_statements](https://docs.yugabyte.com/preview/explore/ysql-language-features/pg-extensions/extension-pgstatstatements/) extension is created in a non-public schema, due to the "Unsupported Query Constructs" feature. +To bypass this issue, set the environment variable `REPORT_UNSUPPORTED_QUERY_CONSTRUCTS=false`, which disables the "Unsupported Query Constructs" feature and proceeds with the command execution. + ## v1.8.4 - October 29, 2024 ### New Features diff --git a/guardrails-scripts/yb-voyager-pg-grant-migration-permissions.sql b/guardrails-scripts/yb-voyager-pg-grant-migration-permissions.sql index ec5c08f4c1..81efa5c60c 100644 --- a/guardrails-scripts/yb-voyager-pg-grant-migration-permissions.sql +++ b/guardrails-scripts/yb-voyager-pg-grant-migration-permissions.sql @@ -1,8 +1,8 @@ --- How to use the script: -- Run the script with psql command line tool, passing the necessary parameters: ---- psql -h -d -U -v voyager_user='' -v schema_list='' -v is_live_migration= -v is_live_migration_fall_back= -v replication_group='' -v original_owner_of_tables='' -f +--- psql -h -d -U -v voyager_user='' -v schema_list='' -v is_live_migration= -v is_live_migration_fall_back= -v replication_group='' -f --- Example: ---- psql -h -d -U -v voyager_user='ybvoyager' -v schema_list='schema1,public,schema2' -v is_live_migration=1 -v is_live_migration_fall_back=0 -v replication_group='replication_group' -v original_owner_of_tables='postgres' -f /home/ubuntu/yb-voyager-pg-grant-migration-permissions.sql +--- psql -h -d -U -v voyager_user='ybvoyager' -v schema_list='schema1,public,schema2' -v is_live_migration=1 -v is_live_migration_fall_back=0 -v replication_group='replication_group' -f /home/ubuntu/yb-voyager-pg-grant-migration-permissions.sql --- Parameters: --- : The hostname of the PostgreSQL server. --- : The name of the database to connect to. @@ -12,7 +12,6 @@ --- : A flag indicating if this is a live migration (1 for true, 0 for false). If set to 0 then the script will check for permissions for an offline migration. --- : A flag indicating if this is a live migration with fallback (1 for true, 0 for false). If set to 0 then the script will detect permissions for live migration with fall-forward. Should only be set to 1 when is_live_migration is also set to 1. Does not need to be provided unless is_live_migration is set to 1. --- : The name of the replication group to be created. Not needed for offline migration. ---- : The original owner of the tables to be added to the replication group. Not needed for offline migration. \echo '' \echo '--- Checking Variables ---' @@ -41,7 +40,7 @@ \q \endif --- If live migration is enabled, then is_live_migration_fall_back, replication_group and original_owner_of_tables should be provided +-- If live migration is enabled, then is_live_migration_fall_back, replication_group should be provided \if :is_live_migration -- Check if is_live_migration_fall_back is provided @@ -59,14 +58,6 @@ \echo 'Error: replication_group flag is not provided!' \q \endif - - -- Check if original_owner_of_tables is provided - \if :{?original_owner_of_tables} - \echo 'Original owner of tables is provided: ':original_owner_of_tables - \else - \echo 'Error: original_owner_of_tables flag is not provided!' - \q - \endif \endif -- If live migration fallback is provided and enabled, then is_live_migration should be enabled @@ -165,7 +156,31 @@ GRANT pg_read_all_stats to :voyager_user; -- Add the original owner of the tables to the group \echo '' \echo '--- Adding Original Owner to Replication Group ---' - GRANT :replication_group TO :original_owner_of_tables; + DO $$ + DECLARE + tableowner TEXT; + schema_list TEXT[] := string_to_array(current_setting('myvars.schema_list'), ','); -- Convert the schema list to an array + replication_group TEXT := current_setting('myvars.replication_group'); -- Get the replication group from settings + BEGIN + -- Generate the GRANT statements and execute them dynamically + FOR tableowner IN + SELECT DISTINCT t.tableowner + FROM pg_catalog.pg_tables t + WHERE t.schemaname = ANY (schema_list) -- Use the schema_list variable + AND NOT EXISTS ( + SELECT 1 + FROM pg_roles r + WHERE r.rolname = t.tableowner + AND pg_has_role(t.tableowner, replication_group, 'USAGE') -- Use the replication_group variable + ) + LOOP + -- Display the GRANT statement + RAISE NOTICE 'Granting role: GRANT % TO %;', replication_group, tableowner; + + -- Execute the GRANT statement + EXECUTE format('GRANT %I TO %I;', replication_group, tableowner); + END LOOP; + END $$; -- Add the user ybvoyager to the replication group \echo '' diff --git a/installer_scripts/install-voyager-airgapped.sh b/installer_scripts/install-voyager-airgapped.sh index 886ed06148..3a1a1c49ae 100644 --- a/installer_scripts/install-voyager-airgapped.sh +++ b/installer_scripts/install-voyager-airgapped.sh @@ -150,8 +150,12 @@ install_perl_module() { local requirement_type="$2" local required_version="$3" local package="$4" - - echo "Installing module $module_name..." + + # Check if the module is already installed and meets the version requirements + check_perl_module_version "$module_name" "$requirement_type" "$required_version" "true" + if [[ $? -eq 0 ]]; then + return + fi # Extract the package tar -xzvf "$package" 1>&2 || { echo "Error: Failed to extract $package"; exit 1; } @@ -183,35 +187,56 @@ install_perl_module() { # Return to the original directory cd .. - # Verification and version check + # Verification of the installed module + check_perl_module_version "$module_name" "$requirement_type" "$required_version" "false" + if [[ $? -ne 0 ]]; then + exit 1 + fi +} + +check_perl_module_version() { + local module_name="$1" + local requirement_type="$2" + local required_version="$3" + local check_only="$4" # If "true", suppress error messages and exit silently + + # Get installed version + local installed_version installed_version=$(perl -M"$module_name" -e 'print $'"$module_name"'::VERSION' 2> /dev/null) - + if [[ -z "$installed_version" ]]; then - echo "Error: $module_name could not be loaded or found." - exit 1 + if [[ "$check_only" != "true" ]]; then + echo "Error: $module_name could not be loaded or found." + fi + return 1 fi # Version comparison based on requirement type if [[ "$requirement_type" == "min" ]]; then # Check if installed version is at least the required version - if [[ $(echo -e "$installed_version\n$required_version" | sort -V | head -n1) != "$required_version" ]]; then + if [[ $(echo -e "$installed_version\n$required_version" | sort -V | head -n1) == "$required_version" ]]; then + return 0 + fi + if [[ "$check_only" != "true" ]]; then echo "Error: Installed version of $module_name ($installed_version) does not meet the minimum required version ($required_version)." - exit 1 fi + return 1 elif [[ "$requirement_type" == "exact" ]]; then # Check if installed version matches the required version exactly - if [[ "$installed_version" != "$required_version" ]]; then + if [[ "$installed_version" == "$required_version" ]]; then + return 0 + fi + if [[ "$check_only" != "true" ]]; then echo "Error: Installed version of $module_name ($installed_version) does not match the exact required version ($required_version)." - exit 1 fi + return 1 else echo "Error: Unknown requirement type '$requirement_type' for $module_name." exit 1 fi - - echo "" } + check_binutils_version() { min_required_version='2.25' @@ -428,9 +453,6 @@ centos_main() { echo "" echo -e "\e[33mYum packages:\e[0m" print_dependencies "${centos_yum_package_requirements[@]}" - echo "" - echo -e "\e[33mCPAN modules:\e[0m" - print_dependencies "${cpan_modules_requirements[@]}" print_steps_to_install_oic_on_centos exit 0 fi @@ -609,9 +631,6 @@ ubuntu_main() { echo "" echo -e "\e[33mApt packages:\e[0m" print_dependencies "${ubuntu_apt_package_requirements[@]}" - echo "" - echo -e "\e[33mCPAN modules:\e[0m" - print_dependencies "${cpan_modules_requirements[@]}" print_steps_to_install_oic_on_ubuntu exit 0 fi diff --git a/installer_scripts/install-yb-voyager b/installer_scripts/install-yb-voyager index 86185127c7..44a49c462f 100755 --- a/installer_scripts/install-yb-voyager +++ b/installer_scripts/install-yb-voyager @@ -12,6 +12,7 @@ LOG_FILE=/tmp/install-yb-voyager.log VERSION="latest" ONLY_PG="false" +SKIP_DEBEZIUM="false" trap on_exit EXIT @@ -131,17 +132,17 @@ centos_main() { fi # TODO: Remove the usage of jq and use something inbuilt in the future. - if [ "${VERSION}" == "latest" ] + if [ "${VERSION}" != "local" ] then $YUM_INSTALL jq 1>&2 - fetch_latest_release_data + fetch_release_data fi centos_check_base_repo_enabled output "Installing RPM dependencies." $YUM_INSTALL which wget git gcc make 1>&2 $YUM_INSTALL https://download.postgresql.org/pub/repos/yum/reporpms/EL-${majorVersion}-x86_64/pgdg-redhat-repo-latest.noarch.rpm 1>&2 || true - $YUM_INSTALL postgresql16 1>&2 + $YUM_INSTALL postgresql17 1>&2 $YUM_INSTALL sqlite 1>&2 create_guardrail_scripts_dir create_pg_dump_args_file @@ -177,10 +178,10 @@ ubuntu_main() { sudo apt-get update 1>&2 # TODO: Remove the usage of jq and use something inbuilt in the future. - if [ "${VERSION}" == "latest" ] + if [ "${VERSION}" != "local" ] then sudo apt-get install -y jq 1>&2 - fetch_latest_release_data + fetch_release_data fi output "Installing packages." @@ -225,10 +226,10 @@ macos_main() { macos_install_brew # TODO: Remove the usage of jq and use something inbuilt in the future. - if [ "${VERSION}" == "latest" ] + if [ "${VERSION}" != "local" ] then brew install jq 1>&2 - fetch_latest_release_data + fetch_release_data fi macos_install_pg_dump @@ -248,41 +249,56 @@ macos_main() { # COMMON #============================================================================= -# Function to fetch the latest release data from GitHub API -fetch_latest_release_data() { - # Fetch the latest release data from GitHub API - LATEST_RELEASE_DATA=$(curl -s https://api.github.com/repos/yugabyte/yb-voyager/releases/latest) - if [ -z "$LATEST_RELEASE_DATA" ]; then - echo "ERROR: Failed to fetch the latest release data from the GitHub API." - exit 1 - fi +# Function to fetch the release data from GitHub API +fetch_release_data() { + # Fetch the latest release data from GitHub API if VERSION is latest + if [ "${VERSION}" == "latest" ]; then + RELEASE_DATA=$(curl -s https://api.github.com/repos/yugabyte/yb-voyager/releases/latest) + if [ -z "$RELEASE_DATA" ]; then + echo "ERROR: Failed to fetch the latest release data from the GitHub API." + exit 1 + fi + fi - # Extract the latest release name and tag name - LATEST_RELEASE_NAME=$(echo "$LATEST_RELEASE_DATA" | jq -r '.name') - LATEST_TAG_NAME=$(echo "$LATEST_RELEASE_DATA" | jq -r '.tag_name') + # Extract the latest release name and tag name from the fetched data if VERSION is latest + if [ "${VERSION}" == "latest" ]; then + RELEASE_NAME=$(echo "$RELEASE_DATA" | jq -r '.name') + TAG_NAME=$(echo "$RELEASE_DATA" | jq -r '.tag_name') + else + # If the version is not latest, then the provided VERSION is used in the release name and tag name + RELEASE_NAME="v${VERSION}" + TAG_NAME="yb-voyager/v${VERSION}" + fi - # Fetch the commit hash of the latest tagged commit - LATEST_TAG_DATA=$(curl -s https://api.github.com/repos/yugabyte/yb-voyager/git/refs/tags/${LATEST_TAG_NAME}) - if [ -z "$LATEST_TAG_DATA" ]; then - echo "ERROR: Failed to fetch the latest tagged commit data from the GitHub API." + # Fetch the commit hash of the tagged commit related to the release + TAG_DATA=$(curl -s https://api.github.com/repos/yugabyte/yb-voyager/git/refs/tags/${TAG_NAME}) + if [ -z "$TAG_DATA" ]; then + echo "ERROR: Failed to fetch the tagged commit data from the GitHub API." exit 1 fi - LATEST_TAGGED_COMMIT=$(echo "$LATEST_TAG_DATA" | jq -r '.object.sha') + TAGGED_COMMIT=$(echo "$TAG_DATA" | jq -r '.object.sha') # Extract voyager version from the release name - VOYAGER_VERSION=$(echo "$LATEST_RELEASE_NAME" | sed 's/v//') + VOYAGER_RELEASE_VERSION=$(echo "$RELEASE_NAME" | sed 's/v//') # Log the fetched data to the log file - echo "LATEST_RELEASE_NAME=${LATEST_RELEASE_NAME}" >> "$LOG_FILE" - echo "LATEST_TAG_NAME=${LATEST_TAG_NAME}" >> "$LOG_FILE" - echo "LATEST_TAGGED_COMMIT=${LATEST_TAGGED_COMMIT}" >> "$LOG_FILE" - echo "VOYAGER_VERSION=${VOYAGER_VERSION}" >> "$LOG_FILE" + echo "RELEASE_NAME=${RELEASE_NAME}" >> "$LOG_FILE" + echo "TAG_NAME=${TAG_NAME}" >> "$LOG_FILE" + echo "TAGGED_COMMIT=${TAGGED_COMMIT}" >> "$LOG_FILE" + echo "VOYAGER_RELEASE_VERSION=${VOYAGER_RELEASE_VERSION}" >> "$LOG_FILE" # Set global variables for version and hash - VOYAGER_RELEASE_NAME=${VOYAGER_RELEASE_NAME:-${LATEST_RELEASE_NAME}} - DEBEZIUM_VERSION=${DEBEZIUM_VERSION:-"2.5.2-${VOYAGER_VERSION}"} - YB_VOYAGER_GIT_HASH=${LATEST_TAGGED_COMMIT} + VOYAGER_RELEASE_NAME=${RELEASE_NAME} + DEBEZIUM_VERSION="${DEBEZIUM_LOCAL_VERSION}-${VOYAGER_RELEASE_VERSION}" + # If voyager version contains 0rcx then DEBEZIUM_VERSION will be like 0rcx.2.5.2-[voyager version without rcx] + if [[ $VOYAGER_RELEASE_VERSION == *"rc"* ]]; then + # In case rc release voyager version is like 0rc1.1.8.5 + RC_PREFIX="${VOYAGER_RELEASE_VERSION:0:4}" + VOYAGER_VERSION="${VOYAGER_RELEASE_VERSION:5}" + DEBEZIUM_VERSION="${RC_PREFIX}.${DEBEZIUM_LOCAL_VERSION}-${VOYAGER_VERSION}" + fi + YB_VOYAGER_GIT_HASH=${TAGGED_COMMIT} } check_java() { @@ -308,14 +324,18 @@ check_java() { } install_debezium_server(){ - if [ "${VERSION}" == "latest" ] + if [ $SKIP_DEBEZIUM == "true" ] ; then + return + fi + + if [ "${VERSION}" != "local" ] then - output "Installing debezium:${VERSION}:${DEBEZIUM_VERSION}" - install_debezium_server_latest_release + output "Installing debezium:${DEBEZIUM_VERSION}" + install_debezium_server_from_release return fi - output "Installing debezium:${VERSION}." + output "Installing debezium:${VERSION}." check_install_maven clean_debezium install_debezium_local ${DEBEZIUM_LOCAL_REF} @@ -330,7 +350,7 @@ install_debezium_server(){ package_debezium_server_local } -install_debezium_server_latest_release() { +install_debezium_server_from_release() { debezium_server_filename="debezium-server.tar.gz" # download wget -nv "https://github.com/yugabyte/yb-voyager/releases/download/yb-voyager/${VOYAGER_RELEASE_NAME}/${debezium_server_filename}" @@ -523,9 +543,9 @@ rebuild_voyager_local() { get_passed_options() { if [ "$1" == "linux" ] then - OPTS=$(getopt -o "lpv", --long install-from-local-source,only-pg-support,rebuild-voyager-local --name 'install-yb-voyager' -- $ARGS_LINUX) + OPTS=$(getopt -o "lpdvV", --long install-from-local-source,only-pg-support,skip-debezium,rebuild-voyager-local,version: --name 'install-yb-voyager' -- $ARGS_LINUX) else - OPTS=$(getopt lpv $ARGS_MACOS) + OPTS=$(getopt lpdvV $ARGS_MACOS) fi eval set -- "$OPTS" @@ -540,10 +560,18 @@ get_passed_options() { ONLY_PG="true"; shift ;; + -d | --skip-debezium ) + SKIP_DEBEZIUM="true" + shift + ;; -v | --rebuild-voyager-local ) REBUILD_VOYAGER_LOCAL="true"; shift ;; + -V | --version ) + VERSION="$2" + shift 2 + ;; * ) break ;; @@ -629,9 +657,9 @@ update_yb_voyager_bashrc() { install_yb_voyager() { GO=${GO:-"go"} - if [ "${VERSION}" == "latest" ] + if [ "${VERSION}" != "local" ] then - output "Installing yb-voyager:${VERSION}:${VOYAGER_VERSION}" + output "Installing yb-voyager:${VOYAGER_RELEASE_VERSION}" $GO install github.com/yugabyte/yb-voyager/yb-voyager@${YB_VOYAGER_GIT_HASH} sudo mv -f $HOME/go/bin/yb-voyager /usr/local/bin return @@ -801,7 +829,7 @@ create_base_ora2pg_conf_file() { fi output "Installing the latest base-ora2pg.conf" - if [ "${VERSION}" == "latest" ] + if [ "${VERSION}" != "local" ] then sudo wget -nv -O $conf_file_name https://github.com/yugabyte/yb-voyager/raw/$YB_VOYAGER_GIT_HASH/yb-voyager/src/srcdb/data/sample-ora2pg.conf else @@ -823,7 +851,7 @@ create_pg_dump_args_file() { fi output "Installing the latest pg_dump-args.ini" - if [ "${VERSION}" == "latest" ] + if [ "${VERSION}" != "local" ] then sudo wget -nv -O $args_file_name https://github.com/yugabyte/yb-voyager/raw/$YB_VOYAGER_GIT_HASH/yb-voyager/src/srcdb/data/pg_dump-args.ini else @@ -840,7 +868,7 @@ create_gather_assessment_metadata_dir() { sudo mkdir -p $scripts_parent_dir output "Installing the latest scripts for gathering assessment metadata" - if [ "${VERSION}" == "latest" ] + if [ "${VERSION}" != "local" ] then TAR_URL="https://github.com/yugabyte/yb-voyager/raw/$YB_VOYAGER_GIT_HASH/$scripts_dir_path/${scripts_dir_name}.tar.gz" sudo wget -nv -O /tmp/${scripts_dir_name}.tar.gz $TAR_URL @@ -860,7 +888,7 @@ create_guardrail_scripts_dir() { sudo mkdir -p $scripts_parent_dir/$scripts_dir_name output "Installing the guardrails scripts" - if [ "${VERSION}" == "latest" ] + if [ "${VERSION}" != "local" ] then TAR_URL="https://github.com/yugabyte/yb-voyager/archive/$YB_VOYAGER_GIT_HASH.tar.gz" sudo wget -nv -O /tmp/yb-voyager.tar.gz $TAR_URL @@ -925,7 +953,7 @@ ubuntu_install_postgres() { sudo apt install -y postgresql-common 1>&2 echo | sudo /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh 1>&2 sudo apt-get update 1>&2 - sudo apt-get -y install postgresql-16 1>&2 + sudo apt-get -y install postgresql-17 1>&2 output "Postgres Installed." } diff --git a/migtests/lib/yb.py b/migtests/lib/yb.py index 854de33fe4..c9351928d8 100644 --- a/migtests/lib/yb.py +++ b/migtests/lib/yb.py @@ -52,7 +52,8 @@ def new_source_db(): def verify_colocation(tgt, source_db_type): print("Verifying the colocation of the tables") - json_file = "export-dir/assessment/reports/migration_assessment_report.json" + export_dir = os.getenv("EXPORT_DIR", "export-dir") + json_file = f"{export_dir}/assessment/reports/migration_assessment_report.json" sharded_tables, colocated_tables = fetch_sharded_and_colocated_tables(json_file) diff --git a/migtests/scripts/add-pk-from-alter-to-create b/migtests/scripts/add-pk-from-alter-to-create index 83d0ef3eb2..77f1ab5b7b 100755 --- a/migtests/scripts/add-pk-from-alter-to-create +++ b/migtests/scripts/add-pk-from-alter-to-create @@ -5,7 +5,7 @@ import os import re import shutil -table_file_path = os.getenv('TEST_DIR')+'/export-dir/schema/tables/table.sql' +table_file_path = os.getenv('EXPORT_DIR')+'/schema/tables/table.sql' #copy the table_file_path to table_file_path+'.old' src = table_file_path diff --git a/migtests/scripts/functions.sh b/migtests/scripts/functions.sh index 2b48f9f5b5..f01d384a06 100644 --- a/migtests/scripts/functions.sh +++ b/migtests/scripts/functions.sh @@ -134,7 +134,8 @@ grant_user_permission_oracle(){ */ GRANT FLASHBACK ANY TABLE TO ybvoyager; EOF - run_sqlplus_as_sys ${db_name} "oracle-inputs.sql" + run_sqlplus_as_sys ${db_name} "oracle-inputs.sql" + rm oracle-inputs.sql } @@ -149,13 +150,15 @@ EOF run_sqlplus_as_sys ${pdb_name} "create-pdb-tablespace.sql" cp ${SCRIPTS}/oracle/live-grants.sql oracle-inputs.sql run_sqlplus_as_sys ${cdb_name} "oracle-inputs.sql" + rm create-pdb-tablespace.sql + rm oracle-inputs.sql } grant_permissions_for_live_migration_pg() { db_name=$1 db_schema=$2 conn_string="postgresql://${SOURCE_DB_ADMIN_USER}:${SOURCE_DB_ADMIN_PASSWORD}@${SOURCE_DB_HOST}:${SOURCE_DB_PORT}/${db_name}" - psql "${conn_string}" -v voyager_user="${SOURCE_DB_USER}" -v schema_list="${db_schema}" -v replication_group='replication_group' -v original_owner_of_tables="${SOURCE_DB_ADMIN_USER}" -v is_live_migration=1 -v is_live_migration_fall_back=0 -f /opt/yb-voyager/guardrails-scripts/yb-voyager-pg-grant-migration-permissions.sql + psql "${conn_string}" -v voyager_user="${SOURCE_DB_USER}" -v schema_list="${db_schema}" -v replication_group='replication_group' -v is_live_migration=1 -v is_live_migration_fall_back=0 -f /opt/yb-voyager/guardrails-scripts/yb-voyager-pg-grant-migration-permissions.sql } grant_permissions() { @@ -191,7 +194,7 @@ run_sqlplus_as_sys() { run_sqlplus_as_schema_owner() { db_name=$1 sql=$2 - conn_string="${SOURCE_DB_USER_SCHEMA_OWNER}/${SOURCE_DB_USER_SCHEMA_OWNER_PASSWORD}@${SOURCE_DB_HOST}:${SOURCE_DB_PORT}/${db_name}" + conn_string="${SOURCE_DB_SCHEMA}/${SOURCE_DB_PASSWORD}@${SOURCE_DB_HOST}:${SOURCE_DB_PORT}/${db_name}" echo exit | sqlplus -f "${conn_string}" @"${sql}" } @@ -236,7 +239,6 @@ export_schema() { --source-db-password ${SOURCE_DB_PASSWORD} --source-db-name ${SOURCE_DB_NAME} --send-diagnostics=false --yes - --start-clean t " if [ "${source_db_schema}" != "" ] then @@ -280,7 +282,6 @@ export_data() { --disable-pb=true --send-diagnostics=false --yes - --start-clean 1 " if [ "${TABLE_LIST}" != "" ] then @@ -371,7 +372,6 @@ import_schema() { --target-db-name ${TARGET_DB_NAME} --yes --send-diagnostics=false - --start-clean 1 " if [ "${SOURCE_DB_TYPE}" != "postgresql" ] @@ -392,7 +392,6 @@ import_data() { --target-db-name ${TARGET_DB_NAME} --disable-pb true --send-diagnostics=false - --start-clean 1 --truncate-splits true --max-retries 1 " @@ -436,7 +435,6 @@ import_data_to_source_replica() { --source-replica-db-user ${SOURCE_REPLICA_DB_USER} --source-replica-db-name ${SOURCE_REPLICA_DB_NAME} --source-replica-db-password ${SOURCE_REPLICA_DB_PASSWORD} - --start-clean true --disable-pb true --send-diagnostics=false --parallel-jobs 3 @@ -532,37 +530,38 @@ get_data_migration_report(){ } verify_report() { - expected_report=$1 - actual_report=$2 - if [ -f "${actual_report}" ] - then - echo "Printing ${actual_report} file" - cat "${actual_report}" - # Parse JSON data - actual_data=$(jq -c '.' "${actual_report}") + expected_report=$1 + actual_report=$2 + + if [ -f "${actual_report}" ]; then + # Parse and sort JSON data + actual_data=$(jq -c '.' "${actual_report}" | jq -S 'sort_by(.table_name)') - if [ -f "${expected_report}" ] - then - expected_data=$(jq -c '.' "${expected_report}") + if [ -f "${expected_report}" ]; then + expected_data=$(jq -c '.' "${expected_report}" | jq -S 'sort_by(.table_name)') + + # Save the sorted JSON data to temporary files + temp_actual=$(mktemp) + temp_expected=$(mktemp) + echo "$actual_data" > "$temp_actual" + echo "$expected_data" > "$temp_expected" + + compare_files "$temp_actual" "$temp_expected" - # Compare data - actual_data=$(echo $actual_data | jq -S 'sort_by(.table_name)') - expected_data=$(echo $expected_data | jq -S 'sort_by(.table_name)') - if [ "$actual_data" == "$expected_data" ] - then - echo "Data matches expected report." - else - echo "Data does not match expected report." - exit 1 + # Clean up temporary files + rm "$temp_actual" "$temp_expected" + + # If files do not match, exit + if [ $? -ne 0 ]; then + exit 1 fi else echo "No ${expected_report} found." - # exit 1 fi - else - echo "No ${actual_report} found." - exit 1 - fi + else + echo "No ${actual_report} found." + exit 1 + fi } @@ -627,16 +626,6 @@ get_value_from_msr(){ echo $val } -create_ff_schema(){ - db_name=$1 - - cat > create-ff-schema.sql << EOF - CREATE USER FF_SCHEMA IDENTIFIED BY "password"; - GRANT all privileges to FF_SCHEMA; -EOF - run_sqlplus_as_sys ${db_name} "create-ff-schema.sql" -} - set_replica_identity(){ db_schema=$1 cat > alter_replica_identity.sql < alter_user_superuser.sql < $TEMP_SCRIPT + + run_sqlplus_as_sys ${SOURCE_DB_NAME} $TEMP_SCRIPT + + # Clean up the temporary file after execution + rm -f $TEMP_SCRIPT + elif [ "${SOURCE_DB_TYPE}" = "postgresql" ]; then + conn_string="postgresql://${SOURCE_DB_ADMIN_USER}:${SOURCE_DB_ADMIN_PASSWORD}@${SOURCE_DB_HOST}:${SOURCE_DB_PORT}/${SOURCE_DB_NAME}" + psql "${conn_string}" -v voyager_user="${SOURCE_DB_USER}" -v schema_list="${SOURCE_DB_SCHEMA}" -v replication_group='replication_group' -v is_live_migration=1 -v is_live_migration_fall_back=1 -f /opt/yb-voyager/guardrails-scripts/yb-voyager-pg-grant-migration-permissions.sql + + disable_triggers_sql=$(mktemp) + drop_constraints_sql=$(mktemp) + formatted_schema_list=$(echo "${SOURCE_DB_SCHEMA}" | sed "s/,/','/g") + + # Disabling Triggers + cat < "${disable_triggers_sql}" +DO \$\$ +DECLARE + r RECORD; +BEGIN + FOR r IN + SELECT table_schema, '"' || table_name || '"' AS t_name + FROM information_schema.tables + WHERE table_type = 'BASE TABLE' + AND table_schema IN ('${formatted_schema_list}') + LOOP + EXECUTE 'ALTER TABLE ' || r.table_schema || '.' || r.t_name || ' DISABLE TRIGGER ALL'; + END LOOP; +END \$\$; EOF - run_psql ${SOURCE_DB_NAME} "$(cat alter_user_superuser.sql)" - + + # Dropping Fkeys + cat < "${drop_constraints_sql}" +DO \$\$ +DECLARE + fk RECORD; +BEGIN + FOR fk IN + SELECT conname, conrelid::regclass AS table_name + FROM pg_constraint + JOIN pg_class ON conrelid = pg_class.oid + JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace + WHERE contype = 'f' + AND pg_namespace.nspname IN ('${formatted_schema_list}') + LOOP + EXECUTE 'ALTER TABLE ' || fk.table_name || ' DROP CONSTRAINT ' || fk.conname; + END LOOP; +END \$\$; +EOF + + psql_import_file "${SOURCE_DB_NAME}" "${disable_triggers_sql}" + psql_import_file "${SOURCE_DB_NAME}" "${drop_constraints_sql}" + + rm -f "${disable_triggers_sql}" "${drop_constraints_sql}" fi +} +reenable_triggers_fkeys() { + if [ "${SOURCE_DB_TYPE}" = "postgresql" ]; then + enable_triggers_sql=$(mktemp) + formatted_schema_list=$(echo "${SOURCE_DB_SCHEMA}" | sed "s/,/','/g") + + cat < "${enable_triggers_sql}" +DO \$\$ +DECLARE + r RECORD; +BEGIN + FOR r IN + SELECT table_schema, '"' || table_name || '"' AS t_name + FROM information_schema.tables + WHERE table_type = 'BASE TABLE' + AND table_schema IN ('${formatted_schema_list}') + LOOP + EXECUTE 'ALTER TABLE ' || r.table_schema || '.' || r.t_name || ' ENABLE TRIGGER ALL'; + END LOOP; +END \$\$; +EOF + psql_import_file "${SOURCE_DB_NAME}" "${enable_triggers_sql}" + fi +#TODO: Add re-creating FKs } assess_migration() { @@ -695,7 +759,6 @@ assess_migration() { --source-db-password ${SOURCE_DB_PASSWORD} --source-db-name ${SOURCE_DB_NAME} --send-diagnostics=false --yes - --start-clean t --iops-capture-interval 0 " if [ "${SOURCE_DB_SCHEMA}" != "" ] @@ -816,17 +879,32 @@ normalize_json() { # Normalize JSON with jq; use --sort-keys to avoid the need to keep the same sequence of keys in expected vs actual json jq --sort-keys 'walk( if type == "object" then - .ObjectNames? |= (if type == "string" then split(", ") | sort | join(", ") else . end) | + .ObjectNames? |= ( + if type == "string" then + split(", ") | sort | join(", ") + else + . + end + ) | .VoyagerVersion? = "IGNORED" | + .TargetDBVersion? = "IGNORED" | .DbVersion? = "IGNORED" | .FilePath? = "IGNORED" | .OptimalSelectConnectionsPerNode? = "IGNORED" | .OptimalInsertConnectionsPerNode? = "IGNORED" | .RowCount? = "IGNORED" | - .SqlStatement? |= (if type == "string" then gsub("\\n"; " ") else . end) + .MigrationComplexityExplanation?= "IGNORED" | + # Replace newline characters in SqlStatement with spaces + .SqlStatement? |= ( + if type == "string" then + gsub("\\n"; " ") + else + . + end + ) elif type == "array" then - sort_by(tostring) - else + sort_by(tostring) + else . end )' "$input_file" > "$temp_file" @@ -870,7 +948,7 @@ compare_files() { return 0 else echo "Data does not match expected report." - diff_output=$(diff "$file1" "$file2") + diff_output=$(diff --context "$file1" "$file2") echo "$diff_output" return 1 fi @@ -982,3 +1060,60 @@ cutover_to_target() { yb-voyager initiate cutover to target ${args} $* } + +create_source_db() { + source_db=$1 + case ${SOURCE_DB_TYPE} in + postgresql) + run_psql postgres "DROP DATABASE IF EXISTS ${source_db};" + run_psql postgres "CREATE DATABASE ${source_db};" + ;; + mysql) + run_mysql mysql "DROP DATABASE IF EXISTS ${source_db};" + run_mysql mysql "CREATE DATABASE ${source_db};" + ;; + oracle) + cat > create-oracle-schema.sql << EOF + CREATE USER ${source_db} IDENTIFIED BY "password"; + GRANT all privileges to ${source_db}; +EOF + run_sqlplus_as_sys ${SOURCE_DB_NAME} "create-oracle-schema.sql" + rm create-oracle-schema.sql + ;; + *) + echo "ERROR: Source DB not created for ${SOURCE_DB_TYPE}" + exit 1 + ;; + esac +} + +normalize_and_export_vars() { + local test_suffix=$1 + + # Normalize TEST_NAME + # Keeping the full name for PG and MySQL to test out large schema/export dir names + export NORMALIZED_TEST_NAME="$(echo "$TEST_NAME" | tr '/-' '_')" + + # Set EXPORT_DIR + export EXPORT_DIR=${EXPORT_DIR:-"${TEST_DIR}/${NORMALIZED_TEST_NAME}_${test_suffix}_export-dir"} + if [ -n "${SOURCE_DB_SSL_MODE}" ]; then + EXPORT_DIR="${EXPORT_DIR}_ssl" + fi + + # Set database-specific variables + case "${SOURCE_DB_TYPE}" in + postgresql|mysql) + export SOURCE_DB_NAME=${SOURCE_DB_NAME:-"${NORMALIZED_TEST_NAME}_${test_suffix}"} + ;; + oracle) + # Limit schema name to 10 characters for Oracle/Debezium due to 30 character limit + # Since test_suffix is the unique identifying factor, we need to add it post all the normalization + export SOURCE_DB_SCHEMA=${SOURCE_DB_SCHEMA:-"${NORMALIZED_TEST_NAME:0:10}_${test_suffix}"} + export SOURCE_DB_SCHEMA=${SOURCE_DB_SCHEMA^^} + ;; + *) + echo "ERROR: Unsupported SOURCE_DB_TYPE: ${SOURCE_DB_TYPE}" + exit 1 + ;; + esac +} diff --git a/migtests/scripts/live-migration-fallb-run-test.sh b/migtests/scripts/live-migration-fallb-run-test.sh index 1ddf5221cd..bac1a1e968 100755 --- a/migtests/scripts/live-migration-fallb-run-test.sh +++ b/migtests/scripts/live-migration-fallb-run-test.sh @@ -18,9 +18,7 @@ export REPO_ROOT="${PWD}" export SCRIPTS="${REPO_ROOT}/migtests/scripts" export TESTS_DIR="${REPO_ROOT}/migtests/tests" export TEST_DIR="${TESTS_DIR}/${TEST_NAME}" -export EXPORT_DIR=${EXPORT_DIR:-"${TEST_DIR}/export-dir"} export QUEUE_SEGMENT_MAX_BYTES=400 - export PYTHONPATH="${REPO_ROOT}/migtests/lib" # Order of env.sh import matters. @@ -37,10 +35,12 @@ else source ${SCRIPTS}/${SOURCE_DB_TYPE}/env.sh fi -source ${SCRIPTS}/yugabytedb/env.sh - source ${SCRIPTS}/functions.sh +normalize_and_export_vars "fallb" + +source ${SCRIPTS}/yugabytedb/env.sh + main() { echo "Deleting the parent export-dir present in the test directory" @@ -56,6 +56,10 @@ main() { pushd ${TEST_DIR} step "Initialise source database." + if [ "${SOURCE_DB_TYPE}" = "oracle" ] + then + create_source_db ${SOURCE_DB_SCHEMA} + fi ./init-db step "Grant source database user permissions for live migration" @@ -169,7 +173,11 @@ main() { import_schema --post-snapshot-import true --refresh-mviews=true step "Run snapshot validations." - "${TEST_DIR}/validate" --live_migration 'true' --ff_enabled 'false' --fb_enabled 'true' + "${TEST_DIR}/validate" --live_migration 'true' --ff_enabled 'false' --fb_enabled 'true' || { + tail_log_file "yb-voyager-import-data.log" + tail_log_file "yb-voyager-export-data-from-source.log" + exit 1 + } step "Inserting new events" run_sql_file source_delta.sql @@ -236,7 +244,8 @@ main() { run_ysql ${TARGET_DB_NAME} "\di" run_ysql ${TARGET_DB_NAME} "\dft" - + step "Re-Enable Triggers and Fkeys" + reenable_triggers_fkeys step "Run final validations." if [ -x "${TEST_DIR}/validateAfterChanges" ] @@ -263,7 +272,7 @@ main() { step "Clean up" ./cleanup-db - rm -rf "${EXPORT_DIR}/*" + rm -rf "${EXPORT_DIR}" run_ysql yugabyte "DROP DATABASE IF EXISTS ${TARGET_DB_NAME};" } diff --git a/migtests/scripts/live-migration-fallf-run-test.sh b/migtests/scripts/live-migration-fallf-run-test.sh index 175630f70a..54dbe7bd78 100755 --- a/migtests/scripts/live-migration-fallf-run-test.sh +++ b/migtests/scripts/live-migration-fallf-run-test.sh @@ -18,7 +18,6 @@ export REPO_ROOT="${PWD}" export SCRIPTS="${REPO_ROOT}/migtests/scripts" export TESTS_DIR="${REPO_ROOT}/migtests/tests" export TEST_DIR="${TESTS_DIR}/${TEST_NAME}" -export EXPORT_DIR=${EXPORT_DIR:-"${TEST_DIR}/export-dir"} export PYTHONPATH="${REPO_ROOT}/migtests/lib" export PATH="${PATH}:/usr/lib/oracle/21/client64/bin" @@ -38,12 +37,14 @@ else source ${SCRIPTS}/${SOURCE_DB_TYPE}/env.sh fi +source ${SCRIPTS}/functions.sh + +normalize_and_export_vars "fallf" + source ${SCRIPTS}/${SOURCE_DB_TYPE}/ff_env.sh source ${SCRIPTS}/yugabytedb/env.sh -source ${SCRIPTS}/functions.sh - main() { echo "Deleting the parent export-dir present in the test directory" @@ -62,7 +63,9 @@ main() { if [ "${SOURCE_DB_TYPE}" = "oracle" ] then - create_ff_schema ${SOURCE_REPLICA_DB_NAME} + create_source_db ${SOURCE_DB_SCHEMA} + # TODO: Add dynamic Fall Forward schema creation. Currently using the same name for all tests. + create_source_db ${SOURCE_REPLICA_DB_SCHEMA} run_sqlplus_as_sys ${SOURCE_REPLICA_DB_NAME} ${SCRIPTS}/oracle/create_metadata_tables.sql fi ./init-db @@ -206,7 +209,12 @@ main() { import_schema --post-snapshot-import true --refresh-mviews true step "Run snapshot validations." - "${TEST_DIR}/validate" --live_migration 'true' --ff_enabled 'true' --fb_enabled 'false' + "${TEST_DIR}/validate" --live_migration 'true' --ff_enabled 'true' --fb_enabled 'false' || { + tail_log_file "yb-voyager-import-data.log" + tail_log_file "yb-voyager-export-data-from-source.log" + tail_log_file "yb-voyager-import-data-to-source-replica.log" + exit 1 + } step "Inserting new events to source" run_sql_file source_delta.sql @@ -286,7 +294,7 @@ main() { step "Clean up" ./cleanup-db - rm -rf "${EXPORT_DIR}/*" + rm -rf "${EXPORT_DIR}" run_ysql yugabyte "DROP DATABASE IF EXISTS ${TARGET_DB_NAME};" } diff --git a/migtests/scripts/live-migration-run-test.sh b/migtests/scripts/live-migration-run-test.sh index 1594f1449c..98f99e6beb 100755 --- a/migtests/scripts/live-migration-run-test.sh +++ b/migtests/scripts/live-migration-run-test.sh @@ -18,7 +18,6 @@ export REPO_ROOT="${PWD}" export SCRIPTS="${REPO_ROOT}/migtests/scripts" export TESTS_DIR="${REPO_ROOT}/migtests/tests" export TEST_DIR="${TESTS_DIR}/${TEST_NAME}" -export EXPORT_DIR=${EXPORT_DIR:-"${TEST_DIR}/export-dir"} export QUEUE_SEGMENT_MAX_BYTES=400 export PYTHONPATH="${REPO_ROOT}/migtests/lib" @@ -37,10 +36,12 @@ else source ${SCRIPTS}/${SOURCE_DB_TYPE}/env.sh fi -source ${SCRIPTS}/yugabytedb/env.sh - source ${SCRIPTS}/functions.sh +normalize_and_export_vars "live" + +source ${SCRIPTS}/yugabytedb/env.sh + main() { echo "Deleting the parent export-dir present in the test directory" @@ -56,6 +57,10 @@ main() { pushd ${TEST_DIR} step "Initialise source database." + if [ "${SOURCE_DB_TYPE}" = "oracle" ] + then + create_source_db ${SOURCE_DB_SCHEMA} + fi ./init-db step "Grant source database user permissions for live migration" @@ -218,7 +223,7 @@ main() { step "Clean up" ./cleanup-db - rm -rf "${EXPORT_DIR}/*" + rm -rf "${EXPORT_DIR}" run_ysql yugabyte "DROP DATABASE IF EXISTS ${TARGET_DB_NAME};" } diff --git a/migtests/scripts/postgresql/env.sh b/migtests/scripts/postgresql/env.sh index ec1f123582..fcc466b822 100644 --- a/migtests/scripts/postgresql/env.sh +++ b/migtests/scripts/postgresql/env.sh @@ -3,4 +3,4 @@ export SOURCE_DB_PORT=${SOURCE_DB_PORT:-5432} export SOURCE_DB_USER=${SOURCE_DB_USER:-"ybvoyager"} export SOURCE_DB_PASSWORD=${SOURCE_DB_PASSWORD:-'Test@123#$%^&*()!'} export SOURCE_DB_ADMIN_USER=${SOURCE_DB_ADMIN_USER:-"postgres"} -export SOURCE_DB_ADMIN_PASSWORD=${SOURCE_DB_ADMIN_PASSWORD:-"secret"} +export SOURCE_DB_ADMIN_PASSWORD=${SOURCE_DB_ADMIN_PASSWORD:-"postgres"} diff --git a/migtests/scripts/postgresql/ff_env.sh b/migtests/scripts/postgresql/ff_env.sh index d8b513f842..0f980435cf 100644 --- a/migtests/scripts/postgresql/ff_env.sh +++ b/migtests/scripts/postgresql/ff_env.sh @@ -2,4 +2,4 @@ export SOURCE_REPLICA_DB_NAME=${SOURCE_REPLICA_DB_NAME:-"ff_db"} export SOURCE_REPLICA_DB_HOST=${SOURCE_REPLICA_DB_HOST:-"127.0.0.1"} export SOURCE_REPLICA_DB_PORT=${SOURCE_REPLICA_DB_PORT:-"5432"} export SOURCE_REPLICA_DB_USER=${SOURCE_REPLICA_DB_USER:-"postgres"} -export SOURCE_REPLICA_DB_PASSWORD=${SOURCE_REPLICA_DB_PASSWORD:-"secret"} \ No newline at end of file +export SOURCE_REPLICA_DB_PASSWORD=${SOURCE_REPLICA_DB_PASSWORD:-"postgres"} \ No newline at end of file diff --git a/migtests/scripts/run-schema-migration.sh b/migtests/scripts/run-schema-migration.sh index 9dfce9311f..8a142cf152 100755 --- a/migtests/scripts/run-schema-migration.sh +++ b/migtests/scripts/run-schema-migration.sh @@ -118,7 +118,8 @@ main() { mv "${EXPORT_DIR}/schema/failed.sql" "${EXPORT_DIR}/schema/failed.sql.bak" #replace_files replace_files "${TEST_DIR}/replacement_dir" "${EXPORT_DIR}/schema" - import_schema + # --start-clean is required here since we are running the import command for the second time + import_schema --start-clean t if [ -f "${EXPORT_DIR}/schema/failed.sql" ] then @@ -138,7 +139,7 @@ main() { step "Clean up" ./cleanup-db - rm -rf "${EXPORT_DIR}/*" + rm -rf "${EXPORT_DIR}" run_ysql yugabyte "DROP DATABASE IF EXISTS ${TARGET_DB_NAME};" } diff --git a/migtests/scripts/run-test-export-data.sh b/migtests/scripts/run-test-export-data.sh index 04e84bdd22..dcbeecd362 100755 --- a/migtests/scripts/run-test-export-data.sh +++ b/migtests/scripts/run-test-export-data.sh @@ -74,7 +74,7 @@ main() { step "Clean up" ./cleanup-db - rm -rf "${EXPORT_DIR}/*" + rm -rf "${EXPORT_DIR}" run_ysql yugabyte "DROP DATABASE IF EXISTS ${TARGET_DB_NAME};" } diff --git a/migtests/scripts/run-test.sh b/migtests/scripts/run-test.sh index 88569b0c60..e40cc60d10 100755 --- a/migtests/scripts/run-test.sh +++ b/migtests/scripts/run-test.sh @@ -17,7 +17,6 @@ export REPO_ROOT="${PWD}" export SCRIPTS="${REPO_ROOT}/migtests/scripts" export TESTS_DIR="${REPO_ROOT}/migtests/tests" export TEST_DIR="${TESTS_DIR}/${TEST_NAME}" -export EXPORT_DIR=${EXPORT_DIR:-"${TEST_DIR}/export-dir"} export PYTHONPATH="${REPO_ROOT}/migtests/lib" @@ -33,11 +32,16 @@ then else source ${TEST_DIR}/env.sh fi + source ${SCRIPTS}/${SOURCE_DB_TYPE}/env.sh -source ${SCRIPTS}/yugabytedb/env.sh + source ${SCRIPTS}/functions.sh +normalize_and_export_vars "offline" + +source ${SCRIPTS}/yugabytedb/env.sh + main() { echo "Deleting the parent export-dir present in the test directory" rm -rf ${EXPORT_DIR} @@ -52,6 +56,18 @@ main() { pushd ${TEST_DIR} step "Initialise source database." + if [[ "${SKIP_DB_CREATION}" != "true" ]]; then + if [[ "${SOURCE_DB_TYPE}" == "postgresql" || "${SOURCE_DB_TYPE}" == "mysql" ]]; then + create_source_db "${SOURCE_DB_NAME}" + elif [[ "${SOURCE_DB_TYPE}" == "oracle" ]]; then + create_source_db "${SOURCE_DB_SCHEMA}" + else + echo "ERROR: Unsupported SOURCE_DB_TYPE: ${SOURCE_DB_TYPE}" + exit 1 + fi + else + echo "Skipping database creation as SKIP_DB_CREATION is set to true." + fi ./init-db step "Grant source database user permissions" @@ -168,6 +184,11 @@ main() { expected_file="${TEST_DIR}/export_data_status-report.json" actual_file="${EXPORT_DIR}/reports/export-data-status-report.json" + if [ "${EXPORT_TABLE_LIST}" != "" ] + then + expected_file="${TEST_DIR}/export-data-status-with-table-list-report.json" + fi + step "Verify export-data-status report" verify_report ${expected_file} ${actual_file} @@ -185,7 +206,7 @@ main() { step "Clean up" ./cleanup-db - rm -rf "${EXPORT_DIR}/*" + rm -rf "${EXPORT_DIR}" run_ysql yugabyte "DROP DATABASE IF EXISTS ${TARGET_DB_NAME};" } diff --git a/migtests/scripts/run-validate-assessment-report.sh b/migtests/scripts/run-validate-assessment-report.sh index 5cdac1d392..75e3d8875c 100755 --- a/migtests/scripts/run-validate-assessment-report.sh +++ b/migtests/scripts/run-validate-assessment-report.sh @@ -94,7 +94,7 @@ main() { step "Clean up" ./cleanup-db - rm -rf "${EXPORT_DIR}/*" + rm -rf "${EXPORT_DIR}" } main diff --git a/migtests/scripts/run-validate-bulk-assessment-report.sh b/migtests/scripts/run-validate-bulk-assessment-report.sh index 05b43c87ab..ac67773b1d 100755 --- a/migtests/scripts/run-validate-bulk-assessment-report.sh +++ b/migtests/scripts/run-validate-bulk-assessment-report.sh @@ -54,6 +54,7 @@ main() { echo "Assigning permissions to the export-dir to execute init-db, cleanup-db scripts" chmod +x ${TEST_DIR}/init-db + chmod +x ${TEST_DIR}/cleanup-db step "START: ${TEST_NAME}" print_env @@ -109,6 +110,8 @@ main() { fi step "Clean up" + ./cleanup-db + rm -rf "${BULK_ASSESSMENT_DIR}" } diff --git a/migtests/scripts/yugabytedb/env.sh b/migtests/scripts/yugabytedb/env.sh index a288b14ffc..10b9e70e5f 100644 --- a/migtests/scripts/yugabytedb/env.sh +++ b/migtests/scripts/yugabytedb/env.sh @@ -7,7 +7,10 @@ export TARGET_DB_ADMIN_PASSWORD=${TARGET_DB_ADMIN_PASSWORD:-''} export TARGET_DB_SCHEMA=${TARGET_DB_SCHEMA:-'public'} # The PG driver, used to connect to YB, is case-sensitive about database name. -if [ "${TARGET_DB_NAME}" == "" ] -then - export TARGET_DB_NAME=`echo ${SOURCE_DB_NAME} | tr [A-Z] [a-z]` +if [ "${TARGET_DB_NAME}" == "" ]; then + if [ "${SOURCE_DB_TYPE}" == "oracle" ]; then + export TARGET_DB_NAME=$(echo ${SOURCE_DB_SCHEMA} | tr [A-Z] [a-z]) + else + export TARGET_DB_NAME=$(echo ${SOURCE_DB_NAME} | tr [A-Z] [a-z]) + fi fi diff --git a/migtests/setup/yb-docker-compose.yaml b/migtests/setup/yb-docker-compose.yaml deleted file mode 100644 index e8c43262cd..0000000000 --- a/migtests/setup/yb-docker-compose.yaml +++ /dev/null @@ -1,46 +0,0 @@ -version: '2.1' - -volumes: - yb-master-data-1: - yb-tserver-data-1: - -services: - yb-master: - image: yugabytedb/yugabyte:${VERSION} - container_name: yb-master-n1 - volumes: - - yb-master-data-1:/mnt/master - command: [ "/home/yugabyte/bin/yb-master", - "--fs_data_dirs=/mnt/master", - "--master_addresses=yb-master-n1:7100", - "--rpc_bind_addresses=yb-master-n1:7100", - "--replication_factor=1"] - ports: - - "7000:7000" - - "7100:7100" - environment: - SERVICE_7000_NAME: yb-master - - yb-tserver: - image: yugabytedb/yugabyte:${VERSION} - container_name: yb-tserver-n1 - volumes: - - yb-tserver-data-1:/mnt/tserver - command: [ "/home/yugabyte/bin/yb-tserver", - "--fs_data_dirs=/mnt/tserver", - "--start_pgsql_proxy", - "--rpc_bind_addresses=yb-tserver-n1:9100", - "--tserver_master_addrs=yb-master-n1:7100"] - ports: - - "9042:9042" - - "5433:5433" - - "9000:9000" - - "9100:9100" - environment: - SERVICE_5433_NAME: ysql - SERVICE_9042_NAME: ycql - SERVICE_6379_NAME: yedis - SERVICE_9000_NAME: yb-tserver - depends_on: - - yb-master - diff --git a/migtests/tests/analyze-schema/dummy-export-dir/schema/collations/collation.sql b/migtests/tests/analyze-schema/dummy-export-dir/schema/collations/collation.sql index ed16fb0aa4..3b2898b004 100644 --- a/migtests/tests/analyze-schema/dummy-export-dir/schema/collations/collation.sql +++ b/migtests/tests/analyze-schema/dummy-export-dir/schema/collations/collation.sql @@ -1,3 +1,9 @@ --dropping multiple object -DROP COLLATION IF EXISTS coll1,coll2,coll3; \ No newline at end of file +DROP COLLATION IF EXISTS coll1,coll2,coll3; + +CREATE COLLATION special1 (provider = icu, locale = 'en@colCaseFirst=upper;colReorder=grek-latn', deterministic = true); + +CREATE COLLATION ignore_accents (provider = icu, locale = 'und-u-ks-level1-kc-true', deterministic = false); + + CREATE COLLATION schema2.upperfirst (provider = icu, locale = 'en-u-kf-upper'); \ No newline at end of file diff --git a/migtests/tests/analyze-schema/dummy-export-dir/schema/functions/function.sql b/migtests/tests/analyze-schema/dummy-export-dir/schema/functions/function.sql new file mode 100644 index 0000000000..fc5c894f6d --- /dev/null +++ b/migtests/tests/analyze-schema/dummy-export-dir/schema/functions/function.sql @@ -0,0 +1,176 @@ +CREATE OR REPLACE FUNCTION create_and_populate_tables(table_prefix TEXT, partitions INT) +RETURNS VOID +LANGUAGE plpgsql +AS $$ +DECLARE + i INT; + partition_table TEXT; +BEGIN + -- Loop to create multiple partition tables + FOR i IN 1..partitions LOOP + partition_table := table_prefix || '_part_' || i; + + -- Dynamic SQL to create each partition table + EXECUTE format(' + CREATE TABLE IF NOT EXISTS %I ( + id SERIAL PRIMARY KEY, + name TEXT, + amount NUMERIC + )', partition_table); + RAISE NOTICE 'Table % created', partition_table; + + -- Dynamic SQL to insert data into each partition table + EXECUTE format(' + INSERT INTO %I (name, amount) + SELECT name, amount + FROM source_data + WHERE id %% %L = %L', partition_table, partitions, i - 1); + RAISE NOTICE 'Data inserted into table %', partition_table; + END LOOP; + + PERFORM pg_advisory_lock(sender_id); + PERFORM pg_advisory_lock(receiver_id); + + -- Check if the sender has enough balance + IF (SELECT balance FROM accounts WHERE account_id = sender_id) < transfer_amount THEN + RAISE EXCEPTION 'Insufficient funds'; + -- Deduct the amount from the sender's account SOME DUMMY code to understand nested if structure + UPDATE accounts + SET balance = balance - transfer_amount + WHERE account_id = sender_id; + + -- Add the amount to the receiver's account + UPDATE accounts + SET balance = balance + transfer_amount + WHERE account_id = receiver_id; + IF (SELECT balance FROM accounts WHERE account_id = sender_id) < transfer_amount THEN + -- Release the advisory locks (optional, as they will be released at the end of the transaction) + PERFORM pg_advisory_unlock(sender_id); + PERFORM pg_advisory_unlock(receiver_id); + END IF; + END IF; + + -- Deduct the amount from the sender's account + UPDATE accounts + SET balance = balance - transfer_amount + WHERE account_id = sender_id; + + -- Add the amount to the receiver's account + UPDATE accounts + SET balance = balance + transfer_amount + WHERE account_id = receiver_id; + + -- Commit the transaction + COMMIT; + + -- Release the advisory locks (optional, as they will be released at the end of the transaction) + PERFORM pg_advisory_unlock(sender_id); + PERFORM pg_advisory_unlock(receiver_id); + + -- Conditional logic + IF balance >= withdrawal THEN + RAISE NOTICE 'Sufficient balance, processing withdrawal.'; + -- Add the amount to the receiver's account + UPDATE accounts SET balance = balance + amount WHERE account_id = receiver; + ELSIF balance > 0 AND balance < withdrawal THEN + RAISE NOTICE 'Insufficient balance, consider reducing the amount.'; + -- Add the amount to the receiver's account + UPDATE accounts SET balance = balance + amount WHERE account_id = receiver; + ELSE + -- Add the amount to the receiver's account + UPDATE accounts SET balance = balance + amount WHERE account_id = receiver; + RAISE NOTICE 'No funds available.'; + END IF; + + SELECT id, xpath('/person/name/text()', data) AS name FROM test_xml_type; + + SELECT * FROM employees e WHERE e.xmax = (SELECT MAX(xmax) FROM employees WHERE department = e.department); + +END; +$$; + +CREATE FUNCTION public.get_employeee_salary(emp_id integer) RETURNS numeric + LANGUAGE plpgsql + AS $$ +DECLARE + emp_salary employees.salary%TYPE; -- Declare a variable with the same type as employees.salary +BEGIN + SELECT salary INTO emp_salary + FROM employees + WHERE employee_id = emp_id; + RETURN emp_salary; +END; +$$; + +CREATE OR REPLACE FUNCTION calculate_tax(salary_amount NUMERIC) RETURNS NUMERIC AS $$ +DECLARE + tax_rate employees.tax_rate%TYPE; -- Inherits type from employees.tax_rate column + tax_amount NUMERIC; +BEGIN + -- Assign a value to the variable + SELECT tax_rate INTO tax_rate FROM employees WHERE id = 1; + + -- Use the variable in a calculation + tax_amount := salary_amount * tax_rate; + RETURN tax_amount; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION log_salary_change() RETURNS TRIGGER AS $$ +DECLARE + old_salary employees.salary%TYPE; -- Matches the type of the salary column + new_salary employees.salary%TYPE; +BEGIN + old_salary := OLD.salary; + new_salary := NEW.salary; + + IF new_salary <> old_salary THEN + INSERT INTO salary_log(employee_id, old_salary, new_salary, changed_at) + VALUES (NEW.id, old_salary, new_salary, now()); + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER salary_update_trigger +AFTER UPDATE OF salary ON employees +FOR EACH ROW EXECUTE FUNCTION log_salary_change(); + +CREATE OR REPLACE FUNCTION get_employee_details(emp_id employees.id%Type) +RETURNS public.employees.name%Type AS $$ +DECLARE + employee_name employees.name%TYPE; +BEGIN + SELECT name INTO employee_name FROM employees WHERE id = emp_id; + RETURN employee_name; +END; +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION list_high_earners(threshold NUMERIC) RETURNS VOID AS $$ +DECLARE + emp_name employees.name%TYPE; + emp_salary employees.salary%TYPE; +BEGIN + FOR emp_name, emp_salary IN + SELECT name, salary FROM employees WHERE salary > threshold + LOOP + RAISE NOTICE 'Employee: %, Salary: %', emp_name, emp_salary; + END LOOP; +END; +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION copy_high_earners(threshold NUMERIC) RETURNS VOID AS $$ +DECLARE + temp_salary employees.salary%TYPE; +BEGIN + CREATE TEMP TABLE temp_high_earners AS + SELECT * FROM employees WHERE salary > threshold; + + FOR temp_salary IN SELECT salary FROM temp_high_earners LOOP + RAISE NOTICE 'High earner salary: %', temp_salary; + END LOOP; +END; +$$ LANGUAGE plpgsql; \ No newline at end of file diff --git a/migtests/tests/analyze-schema/dummy-export-dir/schema/mviews/mview.sql b/migtests/tests/analyze-schema/dummy-export-dir/schema/mviews/mview.sql index 45097666b7..50b78580c1 100644 --- a/migtests/tests/analyze-schema/dummy-export-dir/schema/mviews/mview.sql +++ b/migtests/tests/analyze-schema/dummy-export-dir/schema/mviews/mview.sql @@ -2,4 +2,15 @@ CREATE MATERIALIZED VIEW test AS ( select x , JSON_ARRAYAGG(trunc(b, 2) order by t desc) as agg FROM test1 where t = '1DAY' group by x - ); \ No newline at end of file + ); + +CREATE MATERIALIZED VIEW public.sample_data_view AS + SELECT sample_data.id, + sample_data.name, + sample_data.description, + XMLFOREST(sample_data.name AS name, sample_data.description AS description) AS xml_data, + pg_try_advisory_lock((sample_data.id)::bigint) AS lock_acquired, + sample_data.ctid AS row_ctid, + sample_data.xmin AS xmin_value + FROM public.sample_data + WITH NO DATA; \ No newline at end of file diff --git a/migtests/tests/analyze-schema/dummy-export-dir/schema/procedures/procedure.sql b/migtests/tests/analyze-schema/dummy-export-dir/schema/procedures/procedure.sql index 900f71e8b4..cdb2bdcff6 100644 --- a/migtests/tests/analyze-schema/dummy-export-dir/schema/procedures/procedure.sql +++ b/migtests/tests/analyze-schema/dummy-export-dir/schema/procedures/procedure.sql @@ -112,3 +112,64 @@ $body$ LANGUAGE PLPGSQL SECURITY DEFINER ; + +CREATE OR REPLACE PROCEDURE add_employee( + emp_name VARCHAR, + emp_age INT +) +LANGUAGE plpgsql +AS $$ +BEGIN + SELECT id, first_name FROM employees WHERE pg_try_advisory_lock(300) IS TRUE; + + -- Insert a new record into the employees table + INSERT INTO employees(name, age) + VALUES (emp_name, emp_age); + + SELECT e.id, e.name, + ROW_NUMBER() OVER (ORDER BY e.ctid) AS row_num + FROM employees e; + + SELECT e.id, x.employee_xml + FROM employees e + JOIN ( + SELECT xmlelement(name "employee", xmlattributes(e.id AS "id"), e.name) AS employee_xml + FROM employees e + ) x ON x.employee_xml IS NOT NULL + WHERE xmlexists('//employee[name="John Doe"]' PASSING BY REF x.employee_xml); + + SELECT e.id, + CASE + WHEN e.salary > 100000 THEN pg_advisory_lock(e.id) + ELSE pg_advisory_unlock(e.id) + END AS lock_status + FROM employees e; + + -- Optional: Log a message + RAISE NOTICE 'Employee % of age % added successfully.', emp_name, emp_age; +END; +$$; + +CREATE OR REPLACE PROCEDURE update_salary(emp_id INT, increment NUMERIC) AS $$ +DECLARE + current_salary employees.salary%TYPE; -- Matches the type of the salary column +BEGIN + SELECT salary INTO current_salary FROM employees WHERE id = emp_id; + + IF current_salary IS NULL THEN + RAISE NOTICE 'Employee ID % does not exist.', emp_id; + ELSE + UPDATE employees SET salary = current_salary + increment WHERE id = emp_id; + END IF; +END; +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE PROCEDURE get_employee_details_proc(emp_id employees.id%Type, salary employees.salary%TYPE, tax_rate numeric) AS $$ +DECLARE + employee_name employees.name%TYPE; +BEGIN + SELECT name INTO employee_name FROM employees e WHERE e.id = emp_id and e.salary = salary and e.tax_rate = tax_rate; + +END; +$$ LANGUAGE plpgsql; \ No newline at end of file diff --git a/migtests/tests/analyze-schema/dummy-export-dir/schema/tables/INDEXES_table.sql b/migtests/tests/analyze-schema/dummy-export-dir/schema/tables/INDEXES_table.sql index f3496e6089..3457b1c67d 100644 --- a/migtests/tests/analyze-schema/dummy-export-dir/schema/tables/INDEXES_table.sql +++ b/migtests/tests/analyze-schema/dummy-export-dir/schema/tables/INDEXES_table.sql @@ -81,6 +81,21 @@ CREATE INDEX idx14 on combined_tbl (bitt); CREATE INDEX idx15 on combined_tbl (bittv); +CREATE INDEX idx1 on combined_tbl1 (d); + +CREATE INDEX idx2 on combined_tbl1 (t); + +CREATE INDEX idx3 on combined_tbl1 (tz); + +CREATE INDEX idx4 on combined_tbl1 (n); + +CREATE INDEX idx5 on combined_tbl1 (i4); + +CREATE INDEX idx6 on combined_tbl1 (i8); + +CREATE INDEX idx7 on combined_tbl1 (inym); + +CREATE INDEX idx8 on combined_tbl1 (inds); CREATE INDEX idx_udt on test_udt(home_address); @@ -88,4 +103,9 @@ CREATE INDEX idx_udt1 on test_udt(home_address1); CREATE INDEX idx_enum on test_udt(some_field); -CREATE INDEX "idx&_enum2" on test_udt((some_field::non_public.enum_test)); \ No newline at end of file +CREATE INDEX "idx&_enum2" on test_udt((some_field::non_public.enum_test)); + +-- Create a unique index on a column with NULLs with the NULLS NOT DISTINCT option +CREATE UNIQUE INDEX users_unique_nulls_not_distinct_index_email + ON users_unique_nulls_not_distinct_index (email) + NULLS NOT DISTINCT; diff --git a/migtests/tests/analyze-schema/dummy-export-dir/schema/tables/table.sql b/migtests/tests/analyze-schema/dummy-export-dir/schema/tables/table.sql index e677a36f2d..48908efa2d 100755 --- a/migtests/tests/analyze-schema/dummy-export-dir/schema/tables/table.sql +++ b/migtests/tests/analyze-schema/dummy-export-dir/schema/tables/table.sql @@ -27,7 +27,7 @@ CREATE TABLE sales ( -- cases for multi column list partition, to be reported during analyze-schema CREATE TABLE test_1 ( - id numeric NOT NULL, + id numeric NOT NULL REFERENCES sales_data(sales_id), country_code varchar(3), record_type varchar(5), descriptions varchar(50), @@ -321,7 +321,7 @@ create table combined_tbl ( c cidr, ci circle, b box, - j json, + j json UNIQUE, l line, ls lseg, maddr macaddr, @@ -332,9 +332,29 @@ create table combined_tbl ( p2 polygon, id1 txid_snapshot, bitt bit (13), - bittv bit varying(15) + bittv bit varying(15), + CONSTRAINT pk PRIMARY KEY (id, maddr8) +); + +ALTER TABLE combined_tbl + ADD CONSTRAINT combined_tbl_unique UNIQUE(id, bitt); + +CREATE TABLE combined_tbl1( + id int, + t tsrange, + d daterange, + tz tstzrange, + n numrange, + i4 int4range UNIQUE, + i8 int8range, + inym INTERVAL YEAR TO MONTH, + inds INTERVAL DAY TO SECOND(9), + PRIMARY KEY(id, t, n) ); +ALTER TABLE combined_tbl1 + ADD CONSTRAINT combined_tbl1_unique UNIQUE(id, d); + CREATE UNLOGGED TABLE tbl_unlogged (id int, val text); CREATE TABLE test_udt ( @@ -342,17 +362,106 @@ CREATE TABLE test_udt ( employee_name VARCHAR(100), home_address address_type, some_field enum_test, - home_address1 non_public.address_type1 + home_address1 non_public.address_type1, + scalar_column TEXT CHECK (scalar_column IS JSON SCALAR) ); CREATE TABLE test_arr_enum ( id int, arr text[], - arr_enum enum_test[] + arr_enum enum_test[], + object_column TEXT CHECK (object_column IS JSON OBJECT) ); CREATE TABLE public.locations ( id integer NOT NULL, name character varying(100), - geom geometry(Point,4326) - ); \ No newline at end of file + geom geometry(Point,4326), + array_column TEXT CHECK (array_column IS JSON ARRAY) + ); + + CREATE TABLE public.xml_data_example ( + id SERIAL PRIMARY KEY, + name VARCHAR(255), + description XML DEFAULT xmlparse(document 'Default Product100.00Electronics'), + created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, + unique_keys_column TEXT CHECK (unique_keys_column IS JSON WITH UNIQUE KEYS) +); + +CREATE TABLE image (title text, raster lo); + +-- IS JSON Predicate +CREATE TABLE public.json_data ( + id SERIAL PRIMARY KEY, + data_column TEXT NOT NULL CHECK (data_column IS JSON) +); +CREATE TABLE employees (id INT PRIMARY KEY, salary INT); +-- create table with multirange data types + +-- Create tables with primary keys directly +CREATE TABLE bigint_multirange_table ( + id integer PRIMARY KEY, + value_ranges int8multirange +); + +CREATE TABLE date_multirange_table ( + id integer PRIMARY KEY, + project_dates datemultirange +); + +CREATE TABLE int_multirange_table ( + id integer PRIMARY KEY, + value_ranges int4multirange +); + +CREATE TABLE numeric_multirange_table ( + id integer PRIMARY KEY, + price_ranges nummultirange +); + +CREATE TABLE timestamp_multirange_table ( + id integer PRIMARY KEY, + event_times tsmultirange +); + +CREATE TABLE timestamptz_multirange_table ( + id integer PRIMARY KEY, + global_event_times tstzmultirange +); + +-- Testing tables with unique nulls not distinct constraints + +-- Control case +CREATE TABLE users_unique_nulls_distinct ( + id SERIAL PRIMARY KEY, + email TEXT, + UNIQUE (email) +); + +CREATE TABLE users_unique_nulls_not_distinct ( + id SERIAL PRIMARY KEY, + email TEXT, + UNIQUE NULLS NOT DISTINCT (email) +); + +CREATE TABLE sales_unique_nulls_not_distinct ( + store_id INT, + product_id INT, + sale_date DATE, + UNIQUE NULLS NOT DISTINCT (store_id, product_id, sale_date) +); + +CREATE TABLE sales_unique_nulls_not_distinct_alter ( + store_id INT, + product_id INT, + sale_date DATE +); + +ALTER TABLE sales_unique_nulls_not_distinct_alter + ADD CONSTRAINT sales_unique_nulls_not_distinct_alter_unique UNIQUE NULLS NOT DISTINCT (store_id, product_id, sale_date); + +-- Create a unique index on a column with NULLs with the NULLS NOT DISTINCT option +CREATE TABLE users_unique_nulls_not_distinct_index ( + id INTEGER PRIMARY KEY, + email TEXT +); diff --git a/migtests/tests/analyze-schema/dummy-export-dir/schema/views/view.sql b/migtests/tests/analyze-schema/dummy-export-dir/schema/views/view.sql index 8dba0d3d11..1fc6a82cbb 100644 --- a/migtests/tests/analyze-schema/dummy-export-dir/schema/views/view.sql +++ b/migtests/tests/analyze-schema/dummy-export-dir/schema/views/view.sql @@ -16,7 +16,36 @@ CREATE OR REPLACE view test AS ( FROM test1 where t = '1DAY' group by x ); - +CREATE VIEW view_name AS SELECT * from test_arr_enum; --Unsupported PG Syntax --For this case we will have two issues reported one by regex and other by Unsupported PG syntax with error msg -ALTER VIEW view_name TO select * from test; \ No newline at end of file +ALTER VIEW view_name TO select * from test; + +CREATE VIEW public.orders_view AS + SELECT orders.order_id, + orders.customer_name, + orders.product_name, + orders.quantity, + orders.price, + XMLELEMENT(NAME "OrderDetails", XMLELEMENT(NAME "Customer", orders.customer_name), XMLELEMENT(NAME "Product", orders.product_name), XMLELEMENT(NAME "Quantity", orders.quantity), XMLELEMENT(NAME "TotalPrice", (orders.price * (orders.quantity)::numeric))) AS order_xml, + XMLCONCAT(XMLELEMENT(NAME "Customer", orders.customer_name), XMLELEMENT(NAME "Product", orders.product_name)) AS summary_xml, + pg_try_advisory_lock((hashtext((orders.customer_name || orders.product_name)))::bigint) AS lock_acquired, + orders.ctid AS row_ctid, + orders.xmin AS transaction_id + FROM public.orders; + +CREATE VIEW top_employees_view AS SELECT * FROM ( + SELECT * FROM employees + ORDER BY salary DESC + FETCH FIRST 2 ROWS WITH TIES + ) AS top_employees; +CREATE VIEW public.my_films_view AS +SELECT jt.* FROM + my_films, + JSON_TABLE ( js, '$.favorites[*]' + COLUMNS ( + id FOR ORDINALITY, + kind text PATH '$.kind', + NESTED PATH '$.films[*]' COLUMNS ( + title text FORMAT JSON PATH '$.title' OMIT QUOTES, + director text PATH '$.director' KEEP QUOTES))) AS jt; diff --git a/migtests/tests/analyze-schema/expected_issues.json b/migtests/tests/analyze-schema/expected_issues.json index e3eacf1d81..8bb85fa16a 100644 --- a/migtests/tests/analyze-schema/expected_issues.json +++ b/migtests/tests/analyze-schema/expected_issues.json @@ -7,7 +7,8 @@ "SqlStatement": "CREATE INDEX film_fulltext_idx ON public.film USING gist (fulltext);", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1337" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1337", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -16,7 +17,151 @@ "Reason": "JSON_ARRAYAGG() function is not available in YugabyteDB", "SqlStatement": "CREATE MATERIALIZED VIEW test AS (\n select x , JSON_ARRAYAGG(trunc(b, 2) order by t desc) as agg\n FROM test1\n where t = '1DAY' group by x\n );", "Suggestion": "Rename the function to YugabyteDB's equivalent JSON_AGG()", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1542" + "GH": "https://github.com/yugabyte/yb-voyager/issues/1542", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "VIEW", + "ObjectName": "public.my_films_view", + "Reason": "Json Query Functions", + "SqlStatement": "CREATE VIEW public.my_films_view AS\nSELECT jt.* FROM\n my_films,\n JSON_TABLE ( js, '$.favorites[*]'\n COLUMNS (\n id FOR ORDINALITY,\n kind text PATH '$.kind',\n NESTED PATH '$.films[*]' COLUMNS (\n title text FORMAT JSON PATH '$.title' OMIT QUOTES,\n director text PATH '$.director' KEEP QUOTES))) AS jt;", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "public.json_data", + "Reason": "Json Type Predicate", + "SqlStatement": "CREATE TABLE public.json_data (\n id SERIAL PRIMARY KEY,\n data_column TEXT NOT NULL CHECK (data_column IS JSON)\n);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "test_arr_enum", + "Reason": "Json Type Predicate", + "SqlStatement": "CREATE TABLE test_arr_enum (\n\tid int,\n\tarr text[],\n\tarr_enum enum_test[],\n object_column TEXT CHECK (object_column IS JSON OBJECT)\n);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "test_udt", + "Reason": "Json Type Predicate", + "SqlStatement": "CREATE TABLE test_udt (\n\temployee_id SERIAL PRIMARY KEY,\n\temployee_name VARCHAR(100),\n\thome_address address_type,\n\tsome_field enum_test,\n\thome_address1 non_public.address_type1,\n scalar_column TEXT CHECK (scalar_column IS JSON SCALAR)\n);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "public.xml_data_example", + "Reason": "Json Type Predicate", + "SqlStatement": " CREATE TABLE public.xml_data_example (\n id SERIAL PRIMARY KEY,\n name VARCHAR(255),\n description XML DEFAULT xmlparse(document '\u003cproduct\u003e\u003cname\u003eDefault Product\u003c/name\u003e\u003cprice\u003e100.00\u003c/price\u003e\u003ccategory\u003eElectronics\u003c/category\u003e\u003c/product\u003e'),\n created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,\n unique_keys_column TEXT CHECK (unique_keys_column IS JSON WITH UNIQUE KEYS)\n);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "public.locations", + "Reason": "Json Type Predicate", + "SqlStatement": "CREATE TABLE public.locations (\n id integer NOT NULL,\n name character varying(100),\n geom geometry(Point,4326),\n array_column TEXT CHECK (array_column IS JSON ARRAY)\n );", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "VIEW", + "ObjectName": "test", + "Reason": "Json Constructor Functions", + "SqlStatement": "CREATE OR REPLACE view test AS (\n select x , JSON_ARRAYAGG(trunc(b, 2) order by t desc) as agg\n FROM test1\n where t = '1DAY' group by x\n );", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "test_1, constraint: (test_1_id_fkey)", + "Reason": "Foreign key constraint references partitioned table", + "SqlStatement": "CREATE TABLE test_1 (\n\tid numeric NOT NULL REFERENCES sales_data(sales_id),\n\tcountry_code varchar(3),\n\trecord_type varchar(5),\n\tdescriptions varchar(50),\n\tPRIMARY KEY (id)\n) PARTITION BY LIST (country_code, record_type) ;", + "Suggestion": "No workaround available ", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "MVIEW", + "ObjectName": "test", + "Reason": "Json Constructor Functions", + "SqlStatement": "CREATE MATERIALIZED VIEW test AS (\n select x , JSON_ARRAYAGG(trunc(b, 2) order by t desc) as agg\n FROM test1\n where t = '1DAY' group by x\n );", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "COLLATION", + "ObjectName": "special1", + "Reason": "Deterministic attribute in collation", + "SqlStatement": "CREATE COLLATION special1 (provider = icu, locale = 'en@colCaseFirst=upper;colReorder=grek-latn', deterministic = true);", + "Suggestion": "This feature is not supported in YugabyteDB yet", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "COLLATION", + "ObjectName": "ignore_accents", + "Reason": "Deterministic attribute in collation", + "SqlStatement": "CREATE COLLATION ignore_accents (provider = icu, locale = 'und-u-ks-level1-kc-true', deterministic = false);", + "Suggestion": "This feature is not supported in YugabyteDB yet", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "public.xml_data_example", + "Reason": "XML Functions", + "SqlStatement": " CREATE TABLE public.xml_data_example (\n id SERIAL PRIMARY KEY,\n name VARCHAR(255),\n description XML DEFAULT xmlparse(document '\u003cproduct\u003e\u003cname\u003eDefault Product\u003c/name\u003e\u003cprice\u003e100.00\u003c/price\u003e\u003ccategory\u003eElectronics\u003c/category\u003e\u003c/product\u003e'),\n created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,\n unique_keys_column TEXT CHECK (unique_keys_column IS JSON WITH UNIQUE KEYS)\n);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_datatypes", + "ObjectType": "TABLE", + "ObjectName": "public.xml_data_example", + "Reason": "Unsupported datatype - xml on column - description", + "SqlStatement": " CREATE TABLE public.xml_data_example (\n id SERIAL PRIMARY KEY,\n name VARCHAR(255),\n description XML DEFAULT xmlparse(document '\u003cproduct\u003e\u003cname\u003eDefault Product\u003c/name\u003e\u003cprice\u003e100.00\u003c/price\u003e\u003ccategory\u003eElectronics\u003c/category\u003e\u003c/product\u003e'),\n created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,\n unique_keys_column TEXT CHECK (unique_keys_column IS JSON WITH UNIQUE KEYS)\n);", + "Suggestion": "Data ingestion is not supported for this type in YugabyteDB so handle this type in different way. Refer link for more details.", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -25,8 +170,9 @@ "Reason": "INDEX on column 'cidr' not yet supported", "SqlStatement": "CREATE index idx1 on combined_tbl (c);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -35,8 +181,9 @@ "Reason": "INDEX on column 'circle' not yet supported", "SqlStatement": "CREATE index idx2 on combined_tbl (ci);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -45,8 +192,9 @@ "Reason": "INDEX on column 'box' not yet supported", "SqlStatement": "CREATE index idx3 on combined_tbl (b);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -55,8 +203,9 @@ "Reason": "INDEX on column 'json' not yet supported", "SqlStatement": "CREATE index idx4 on combined_tbl (j);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -65,8 +214,9 @@ "Reason": "INDEX on column 'line' not yet supported", "SqlStatement": "CREATE index idx5 on combined_tbl (l);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -75,8 +225,9 @@ "Reason": "INDEX on column 'lseg' not yet supported", "SqlStatement": "CREATE index idx6 on combined_tbl (ls);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -85,8 +236,9 @@ "Reason": "INDEX on column 'macaddr' not yet supported", "SqlStatement": "CREATE index idx7 on combined_tbl (maddr);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -95,8 +247,9 @@ "Reason": "INDEX on column 'macaddr8' not yet supported", "SqlStatement": "CREATE index idx8 on combined_tbl (maddr8);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -105,8 +258,9 @@ "Reason": "INDEX on column 'point' not yet supported", "SqlStatement": "CREATE index idx9 on combined_tbl (p);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -115,8 +269,9 @@ "Reason": "INDEX on column 'pg_lsn' not yet supported", "SqlStatement": "CREATE index idx10 on combined_tbl (lsn);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -125,8 +280,9 @@ "Reason": "INDEX on column 'path' not yet supported", "SqlStatement": "CREATE index idx11 on combined_tbl (p1);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -135,8 +291,9 @@ "Reason": "INDEX on column 'polygon' not yet supported", "SqlStatement": "CREATE index idx12 on combined_tbl (p2);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -145,8 +302,9 @@ "Reason": "INDEX on column 'txid_snapshot' not yet supported", "SqlStatement": "CREATE index idx13 on combined_tbl (id1);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -155,8 +313,9 @@ "Reason": "INDEX on column 'bit' not yet supported", "SqlStatement": "CREATE INDEX idx14 on combined_tbl (bitt);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -165,8 +324,9 @@ "Reason": "INDEX on column 'varbit' not yet supported", "SqlStatement": "CREATE INDEX idx15 on combined_tbl (bittv);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -176,7 +336,8 @@ "SqlStatement": "CREATE TABLE public.documents (\n id integer NOT NULL,\n title_tsvector tsvector,\n content_tsvector tsvector,\n\tlist_of_sections text[]\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -186,7 +347,8 @@ "SqlStatement": "CREATE TABLE public.documents (\n id integer NOT NULL,\n title_tsvector tsvector,\n content_tsvector tsvector,\n\tlist_of_sections text[]\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -196,37 +358,41 @@ "SqlStatement": "CREATE TABLE public.ts_query_table (\n id int generated by default as identity,\n query tsquery\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", "ObjectType": "TABLE", "ObjectName": "test_udt", "Reason": "Unsupported datatype for Live migration with fall-forward/fallback - address_type on column - home_address", - "SqlStatement": "CREATE TABLE test_udt (\n\temployee_id SERIAL PRIMARY KEY,\n\temployee_name VARCHAR(100),\n\thome_address address_type,\n\tsome_field enum_test,\n\thome_address1 non_public.address_type1\n);", + "SqlStatement": "CREATE TABLE test_udt (\n\temployee_id SERIAL PRIMARY KEY,\n\temployee_name VARCHAR(100),\n\thome_address address_type,\n\tsome_field enum_test,\n\thome_address1 non_public.address_type1,\n scalar_column TEXT CHECK (scalar_column IS JSON SCALAR)\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", "ObjectType": "TABLE", "ObjectName": "test_arr_enum", "Reason": "Unsupported datatype for Live migration with fall-forward/fallback - enum_test[] on column - arr_enum", - "SqlStatement": "CREATE TABLE test_arr_enum (\n\tid int,\n\tarr text[],\n\tarr_enum enum_test[]\n);", + "SqlStatement": "CREATE TABLE test_arr_enum (\n\tid int,\n\tarr text[],\n\tarr_enum enum_test[],\n object_column TEXT CHECK (object_column IS JSON OBJECT)\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", "ObjectType": "TABLE", "ObjectName": "test_udt", "Reason": "Unsupported datatype for Live migration with fall-forward/fallback - non_public.address_type1 on column - home_address1", - "SqlStatement": "CREATE TABLE test_udt (\n\temployee_id SERIAL PRIMARY KEY,\n\temployee_name VARCHAR(100),\n\thome_address address_type,\n\tsome_field enum_test,\n\thome_address1 non_public.address_type1\n);", + "SqlStatement": "CREATE TABLE test_udt (\n\temployee_id SERIAL PRIMARY KEY,\n\temployee_name VARCHAR(100),\n\thome_address address_type,\n\tsome_field enum_test,\n\thome_address1 non_public.address_type1,\n scalar_column TEXT CHECK (scalar_column IS JSON SCALAR)\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -235,8 +401,9 @@ "Reason": "INDEX on column 'user_defined_type' not yet supported", "SqlStatement": "CREATE INDEX idx_udt on test_udt(home_address);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -245,8 +412,9 @@ "Reason": "INDEX on column 'user_defined_type' not yet supported", "SqlStatement": "CREATE INDEX idx_udt1 on test_udt(home_address1);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -255,28 +423,31 @@ "Reason": "INDEX on column 'user_defined_type' not yet supported", "SqlStatement": "CREATE INDEX \"idx\u0026_enum2\" on test_udt((some_field::non_public.enum_test));", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "abc", + "ObjectName": "abc ON public.example", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX abc ON public.example USING btree (new_id) WITH (fillfactor='70'); ", + "SqlStatement": "CREATE INDEX abc ON public.example USING btree (new_id) WITH (fillfactor='70');", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", "Suggestion": "Remove the storage parameters from the DDL", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "abc", + "ObjectName": "abc ON schema2.example", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX abc ON schema2.example USING btree (new_id) WITH (fillfactor='70'); ", + "SqlStatement": "CREATE INDEX abc ON schema2.example USING btree (new_id) WITH (fillfactor='70');", "Suggestion": "Remove the storage parameters from the DDL", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -286,7 +457,8 @@ "SqlStatement": "CREATE OR REPLACE PROCEDURE foo (p_id integer) AS $body$\nBEGIN\n drop temporary table if exists temp;\n create temporary table temp(id int, name text);\n insert into temp(id,name) select id,p_name from bar where p_id=id;\n select name from temp;\nend;\n$body$\nLANGUAGE PLPGSQL\nSECURITY DEFINER\n;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/mysql/#drop-temporary-table-statements-are-not-supported", "Suggestion": "remove \"temporary\" and change it to \"drop table\"", - "GH": "https://github.com/yugabyte/yb-voyager/issues/705" + "GH": "https://github.com/yugabyte/yb-voyager/issues/705", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -296,7 +468,8 @@ "SqlStatement": "CREATE TABLE sales (\n\tcust_id bigint NOT NULL,\n\tname varchar(40),\n\tstore_id varchar(20) NOT NULL,\n\tbill_no bigint NOT NULL,\n\tbill_date timestamp NOT NULL,\n\tamount decimal(8,2) NOT NULL,\n\tPRIMARY KEY (bill_date)\n) PARTITION BY RANGE (extract(year from date(bill_date))) ;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/mysql/#tables-partitioned-with-expressions-cannot-contain-primary-unique-keys", "Suggestion": "Remove the Constriant from the table definition", - "GH": "https://github.com/yugabyte/yb-voyager/issues/698" + "GH": "https://github.com/yugabyte/yb-voyager/issues/698", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -306,7 +479,8 @@ "SqlStatement": "CREATE TABLE salaries2 (\n\temp_no bigint NOT NULL,\n\tsalary bigint NOT NULL,\n\tfrom_date timestamp NOT NULL,\n\tto_date timestamp NOT NULL,\n\tPRIMARY KEY (emp_no,from_date)\n) PARTITION BY RANGE (extract(epoch from date(from_date))) ;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/mysql/#tables-partitioned-with-expressions-cannot-contain-primary-unique-keys", "Suggestion": "Remove the Constriant from the table definition", - "GH": "https://github.com/yugabyte/yb-voyager/issues/698" + "GH": "https://github.com/yugabyte/yb-voyager/issues/698", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -315,16 +489,8 @@ "Reason": "ALTER VIEW not supported yet.", "SqlStatement": "ALTER VIEW view_name TO select * from test;", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1131" - }, - { - "IssueType": "unsupported_features", - "ObjectType": "VIEW", - "ObjectName": "", - "Reason": "Unsupported PG syntax - 'syntax error at or near \"TO\"'", - "SqlStatement": "ALTER VIEW view_name TO select * from test;", - "Suggestion": "Fix the schema as per PG syntax", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1625" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1131", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -333,17 +499,19 @@ "Reason": "ALTER TABLE OF not supported yet.", "SqlStatement": "Alter table only party_profile_part of parent_tbl add constraint party_profile_pk primary key (party_profile_id);", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "TABLE", "ObjectName": "test_1", "Reason": "cannot use \"list\" partition strategy with more than one column", - "SqlStatement": "CREATE TABLE test_1 (\n\tid numeric NOT NULL,\n\tcountry_code varchar(3),\n\trecord_type varchar(5),\n\tdescriptions varchar(50),\n\tPRIMARY KEY (id)\n) PARTITION BY LIST (country_code, record_type) ;", + "SqlStatement": "CREATE TABLE test_1 (\n\tid numeric NOT NULL REFERENCES sales_data(sales_id),\n\tcountry_code varchar(3),\n\trecord_type varchar(5),\n\tdescriptions varchar(50),\n\tPRIMARY KEY (id)\n) PARTITION BY LIST (country_code, record_type) ;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/mysql/#multi-column-partition-by-list-is-not-supported", "Suggestion": "Make it a single column partition by list or choose other supported Partitioning methods", - "GH": "https://github.com/yugabyte/yb-voyager/issues/699" + "GH": "https://github.com/yugabyte/yb-voyager/issues/699", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -353,7 +521,8 @@ "SqlStatement": "CREATE TABLE test_2 (\n\tid numeric NOT NULL PRIMARY KEY,\n\tcountry_code varchar(3),\n\trecord_type varchar(5),\n\tdescriptions varchar(50)\n) PARTITION BY LIST (country_code, record_type) ;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/mysql/#multi-column-partition-by-list-is-not-supported", "Suggestion": "Make it a single column partition by list or choose other supported Partitioning methods", - "GH": "https://github.com/yugabyte/yb-voyager/issues/699" + "GH": "https://github.com/yugabyte/yb-voyager/issues/699", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -363,7 +532,8 @@ "SqlStatement": "CREATE TABLE test_2 (\n\tid numeric NOT NULL PRIMARY KEY,\n\tcountry_code varchar(3),\n\trecord_type varchar(5),\n\tdescriptions varchar(50)\n) PARTITION BY LIST (country_code, record_type) ;", "Suggestion": "Add all Partition columns to Primary Key", "GH": "https://github.com/yugabyte/yb-voyager/issues/578", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/oracle/#partition-key-column-not-part-of-primary-key-columns" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/oracle/#partition-key-column-not-part-of-primary-key-columns", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -373,7 +543,8 @@ "SqlStatement": "CREATE TABLE test_5 (\n\tid numeric NOT NULL,\n\tcountry_code varchar(3),\n\trecord_type varchar(5),\n\tdescriptions varchar(50),\n\tPRIMARY KEY (id)\n) PARTITION BY RANGE (country_code, record_type) ;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/oracle/#partition-key-column-not-part-of-primary-key-columns", "Suggestion": "Add all Partition columns to Primary Key", - "GH": "https://github.com/yugabyte/yb-voyager/issues/578" + "GH": "https://github.com/yugabyte/yb-voyager/issues/578", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -383,7 +554,8 @@ "SqlStatement": "CREATE TABLE test_6 (\n\tid numeric NOT NULL,\n\tcountry_code varchar(3),\n\trecord_type varchar(5),\n\tdescriptions varchar(50),\n\tPRIMARY KEY (id,country_code)\n) PARTITION BY RANGE (country_code, record_type) ;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/oracle/#partition-key-column-not-part-of-primary-key-columns", "Suggestion": "Add all Partition columns to Primary Key", - "GH": "https://github.com/yugabyte/yb-voyager/issues/578" + "GH": "https://github.com/yugabyte/yb-voyager/issues/578", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -393,7 +565,8 @@ "SqlStatement": "CREATE TABLE test_7 (\n\tid numeric NOT NULL,\n\tcountry_code varchar(3),\n\trecord_type varchar(5),\n\tdescriptions varchar(50),\n\tPRIMARY KEY (id,country_code)\n) PARTITION BY RANGE (descriptions, record_type) ;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/oracle/#partition-key-column-not-part-of-primary-key-columns", "Suggestion": "Add all Partition columns to Primary Key", - "GH": "https://github.com/yugabyte/yb-voyager/issues/578" + "GH": "https://github.com/yugabyte/yb-voyager/issues/578", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -403,7 +576,8 @@ "SqlStatement": "CREATE TABLE test_8 (\n\torder_id bigint NOT NULL,\n\torder_date timestamp,\n\torder_mode varchar(8),\n\tcustomer_id integer,\n\torder_mode smallint,\n\torder_total double precision,\n\tsales_rep_id integer,\n\tpromotion_id integer,\n\tPRIMARY KEY (order_id,order_mode,customer_id,order_total,sales_rep_id)\n) PARTITION BY RANGE (promotion_id, order_date, sales_rep_id) ;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/oracle/#partition-key-column-not-part-of-primary-key-columns", "Suggestion": "Add all Partition columns to Primary Key", - "GH": "https://github.com/yugabyte/yb-voyager/issues/578" + "GH": "https://github.com/yugabyte/yb-voyager/issues/578", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -413,7 +587,8 @@ "SqlStatement": "CREATE TABLE test_non_pk_multi_column_list (\n\tid numeric NOT NULL PRIMARY KEY,\n\tcountry_code varchar(3),\n\trecord_type varchar(5),\n\tdescriptions varchar(50)\n) PARTITION BY LIST (country_code, record_type) ;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/mysql/#multi-column-partition-by-list-is-not-supported", "Suggestion": "Make it a single column partition by list or choose other supported Partitioning methods", - "GH": "https://github.com/yugabyte/yb-voyager/issues/699" + "GH": "https://github.com/yugabyte/yb-voyager/issues/699", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -423,7 +598,8 @@ "SqlStatement": "CREATE TABLE test_non_pk_multi_column_list (\n\tid numeric NOT NULL PRIMARY KEY,\n\tcountry_code varchar(3),\n\trecord_type varchar(5),\n\tdescriptions varchar(50)\n) PARTITION BY LIST (country_code, record_type) ;", "Suggestion": "Add all Partition columns to Primary Key", "GH": "https://github.com/yugabyte/yb-voyager/issues/578", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/oracle/#partition-key-column-not-part-of-primary-key-columns" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/oracle/#partition-key-column-not-part-of-primary-key-columns", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -433,7 +609,8 @@ "SqlStatement": "CREATE CONVERSION myconv FOR 'UTF8' TO 'LATIN1' FROM myfunc;", "Suggestion": "Remove it from the exported schema", "GH": "https://github.com/yugabyte/yugabyte-db/issues/10866", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#create-or-alter-conversion-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#create-or-alter-conversion-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -443,7 +620,8 @@ "SqlStatement": "ALTER CONVERSION myconv rename to my_conv_1;", "Suggestion": "Remove it from the exported schema", "GH": "https://github.com/YugaByte/yugabyte-db/issues/10866", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#create-or-alter-conversion-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#create-or-alter-conversion-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -453,7 +631,8 @@ "SqlStatement": "CREATE INDEX idx_name1 ON table_name USING spgist (col1);", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1337", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -463,17 +642,8 @@ "SqlStatement": "CREATE INDEX idx_name3 ON schema_name.table_name USING gin (col1,col2,col3);", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gin-indexes-on-multiple-columns-are-not-supported", "Suggestion": "", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/10652" - }, - { - "IssueType": "unsupported_features", - "ObjectType": "TABLE", - "ObjectName": "tbl_unlogged", - "Reason": "UNLOGGED tables are not supported yet.", - "SqlStatement": "CREATE UNLOGGED TABLE tbl_unlogged (id int, val text);", - "Suggestion": "Remove UNLOGGED keyword to make it work", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/1129/", - "DocsLink":"https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unlogged-table-is-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/10652", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -483,7 +653,8 @@ "SqlStatement": "CREATE VIEW v1 AS SELECT * FROM t1 WHERE a \u003c 2\nWITH CHECK OPTION;", "Suggestion": "Use Trigger with INSTEAD OF clause on INSERT/UPDATE on view to get this functionality", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#view-with-check-option-is-not-supported", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/22716" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/22716", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -493,7 +664,8 @@ "SqlStatement": "CREATE VIEW v2 AS SELECT * FROM t1 WHERE a \u003c 2\nWITH LOCAL CHECK OPTION;", "Suggestion": "Use Trigger with INSTEAD OF clause on INSERT/UPDATE on view to get this functionality", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#view-with-check-option-is-not-supported", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/22716" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/22716", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -502,7 +674,8 @@ "Reason": "DROP multiple objects not supported yet.", "SqlStatement": "DROP COLLATION IF EXISTS coll1,coll2,coll3;", "Suggestion": "DROP COLLATION coll1;DROP COLLATION coll2;DROP COLLATION coll3;", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/880" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/880", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -511,7 +684,8 @@ "Reason": "DROP multiple objects not supported yet.", "SqlStatement": "DROP INDEX idx1,idx2,idx3;", "Suggestion": "DROP INDEX idx1;DROP INDEX idx2;DROP INDEX idx3;", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/880" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/880", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -520,7 +694,8 @@ "Reason": "DROP multiple objects not supported yet.", "SqlStatement": "DROP VIEW IF EXISTS view1,view2,view3;", "Suggestion": "DROP VIEW view1;DROP VIEW view2;DROP VIEW view3;", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/880" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/880", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -529,7 +704,8 @@ "Reason": "DROP multiple objects not supported yet.", "SqlStatement": "DROP SEQUENCE seq1_tbl,seq2_tbl,seq3_tbl;", "Suggestion": "DROP SEQUENCE seq1_tbl;DROP SEQUENCE seq2_tbl;DROP SEQUENCE seq3_tbl;", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/880" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/880", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -538,7 +714,8 @@ "Reason": "DROP INDEX CONCURRENTLY not supported yet", "SqlStatement": "DROP INDEX CONCURRENTLY sales_quantity_index;", "Suggestion": "", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/22717" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/22717", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -548,7 +725,8 @@ "SqlStatement": "CREATE TRIGGER before_insert_or_delete_row_trigger\nBEFORE INSERT OR DELETE ON public.range_columns_partition_test\nFOR EACH ROW\nEXECUTE FUNCTION handle_insert_or_delete();", "Suggestion": "Create the triggers on individual partitions.", "GH": "https://github.com/yugabyte/yugabyte-db/issues/24830", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#before-row-triggers-on-partitioned-tables" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#before-row-triggers-on-partitioned-tables", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -558,7 +736,8 @@ "SqlStatement": "CREATE TRIGGER transfer_insert\n AFTER INSERT ON transfer\n REFERENCING NEW TABLE AS inserted\n FOR EACH STATEMENT\n EXECUTE FUNCTION check_transfer_balances_to_zero();", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1668", - "DocsLink":"https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#referencing-clause-for-triggers" + "DocsLink":"https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#referencing-clause-for-triggers", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -568,7 +747,8 @@ "SqlStatement": "CREATE CONSTRAINT TRIGGER some_trig\n AFTER DELETE ON xyz_schema.abc\n DEFERRABLE INITIALLY DEFERRED\n FOR EACH ROW EXECUTE PROCEDURE xyz_schema.some_trig();", "Suggestion": "", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#constraint-trigger-is-not-supported", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1709" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1709", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -577,7 +757,8 @@ "Reason": "CREATE ACCESS METHOD is not supported.", "SqlStatement": "CREATE ACCESS METHOD heptree TYPE INDEX HANDLER heptree_handler;", "Suggestion": "", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/10693" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/10693", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -586,7 +767,8 @@ "Reason": "REINDEX is not supported.", "SqlStatement": "REINDEX TABLE my_table;", "Suggestion": "", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/10267" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/10267", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -596,7 +778,8 @@ "SqlStatement": "CREATE TABLE public.employees4 (\n id integer NOT NULL,\n first_name character varying(50) NOT NULL,\n last_name character varying(50) NOT NULL,\n full_name character varying(101) GENERATED ALWAYS AS ((((first_name)::text || ' '::text) || (last_name)::text)) STORED\n);", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#generated-always-as-stored-type-column-is-not-supported", "Suggestion": "Using Triggers to update the generated columns is one way to work around this issue, refer docs link for more details.", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/10695" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/10695", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -606,7 +789,8 @@ "SqlStatement": "CREATE TABLE order_details (\n detail_id integer NOT NULL,\n quantity integer,\n price_per_unit numeric,\n amount numeric GENERATED ALWAYS AS (((quantity)::numeric * price_per_unit)) STORED\n);", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#generated-always-as-stored-type-column-is-not-supported", "Suggestion": "Using Triggers to update the generated columns is one way to work around this issue, refer docs link for more details.", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/10695" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/10695", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -615,7 +799,8 @@ "Reason": "LIKE clause not supported yet.", "SqlStatement": "CREATE TABLE table_xyz\n (LIKE xyz INCLUDING DEFAULTS INCLUDING CONSTRAINTS);", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -624,7 +809,8 @@ "Reason": "LIKE ALL is not supported yet.", "SqlStatement": "CREATE TABLE table_abc\n (LIKE abc INCLUDING ALL);", "Suggestion": "", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/10697" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/10697", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -634,7 +820,8 @@ "SqlStatement": "CREATE TABLE table_1 () INHERITS (xyz);", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -643,16 +830,184 @@ "Reason": "OIDs are not supported for user tables.", "SqlStatement": "Create table table_test (col1 text, col2 int) with (OIDS = TRUE);", "Suggestion": "", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/10273" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/10273", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "INDEX", + "ObjectName": "idx1 ON combined_tbl1", + "Reason": "INDEX on column 'daterange' not yet supported", + "SqlStatement": "CREATE INDEX idx1 on combined_tbl1 (d);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "INDEX", + "ObjectName": "idx2 ON combined_tbl1", + "Reason": "INDEX on column 'tsrange' not yet supported", + "SqlStatement": "CREATE INDEX idx2 on combined_tbl1 (t);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "INDEX", + "ObjectName": "idx3 ON combined_tbl1", + "Reason": "INDEX on column 'tstzrange' not yet supported", + "SqlStatement": "CREATE INDEX idx3 on combined_tbl1 (tz);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "INDEX", + "ObjectName": "idx4 ON combined_tbl1", + "Reason": "INDEX on column 'numrange' not yet supported", + "SqlStatement": "CREATE INDEX idx4 on combined_tbl1 (n);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "INDEX", + "ObjectName": "idx5 ON combined_tbl1", + "Reason": "INDEX on column 'int4range' not yet supported", + "SqlStatement": "CREATE INDEX idx5 on combined_tbl1 (i4);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "INDEX", + "ObjectName": "idx6 ON combined_tbl1", + "Reason": "INDEX on column 'int8range' not yet supported", + "SqlStatement": "CREATE INDEX idx6 on combined_tbl1 (i8);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "combined_tbl, constraint: (combined_tbl_j_key)", + "Reason": "Primary key and Unique constraint on column 'json' not yet supported", + "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json UNIQUE,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15),\n\tCONSTRAINT pk PRIMARY KEY (id, maddr8)\n);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "combined_tbl, constraint: (pk)", + "Reason": "Primary key and Unique constraint on column 'macaddr8' not yet supported", + "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json UNIQUE,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15),\n\tCONSTRAINT pk PRIMARY KEY (id, maddr8)\n);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "combined_tbl, constraint: (combined_tbl_unique)", + "Reason": "Primary key and Unique constraint on column 'bit' not yet supported", + "SqlStatement": "ALTER TABLE combined_tbl\n\t\tADD CONSTRAINT combined_tbl_unique UNIQUE(id, bitt);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "combined_tbl1, constraint: (combined_tbl1_i4_key)", + "Reason": "Primary key and Unique constraint on column 'int4range' not yet supported", + "SqlStatement": "CREATE TABLE combined_tbl1(\n\tid int,\n\tt tsrange,\n\td daterange,\n\ttz tstzrange,\n\tn numrange,\n\ti4 int4range UNIQUE,\n\ti8 int8range,\n\tinym INTERVAL YEAR TO MONTH,\n\tinds INTERVAL DAY TO SECOND(9),\n\tPRIMARY KEY(id, t, n)\n);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "combined_tbl1, constraint: (combined_tbl1_id_t_n_pkey)", + "Reason": "Primary key and Unique constraint on column 'tsrange' not yet supported", + "SqlStatement": "CREATE TABLE combined_tbl1(\n\tid int,\n\tt tsrange,\n\td daterange,\n\ttz tstzrange,\n\tn numrange,\n\ti4 int4range UNIQUE,\n\ti8 int8range,\n\tinym INTERVAL YEAR TO MONTH,\n\tinds INTERVAL DAY TO SECOND(9),\n\tPRIMARY KEY(id, t, n)\n);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "combined_tbl1, constraint: (combined_tbl1_id_t_n_pkey)", + "Reason": "Primary key and Unique constraint on column 'numrange' not yet supported", + "SqlStatement": "CREATE TABLE combined_tbl1(\n\tid int,\n\tt tsrange,\n\td daterange,\n\ttz tstzrange,\n\tn numrange,\n\ti4 int4range UNIQUE,\n\ti8 int8range,\n\tinym INTERVAL YEAR TO MONTH,\n\tinds INTERVAL DAY TO SECOND(9),\n\tPRIMARY KEY(id, t, n)\n);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "combined_tbl1, constraint: (combined_tbl1_unique)", + "Reason": "Primary key and Unique constraint on column 'daterange' not yet supported", + "SqlStatement": "ALTER TABLE combined_tbl1\n\t\tADD CONSTRAINT combined_tbl1_unique UNIQUE(id, d);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "INDEX", + "ObjectName": "idx7 ON combined_tbl1", + "Reason": "INDEX on column 'interval' not yet supported", + "SqlStatement": "CREATE INDEX idx7 on combined_tbl1 (inym);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "INDEX", + "ObjectName": "idx8 ON combined_tbl1", + "Reason": "INDEX on column 'interval' not yet supported", + "SqlStatement": "CREATE INDEX idx8 on combined_tbl1 (inds);", + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "TABLE", - "ObjectName": "test_interval", - "Reason": "PRIMARY KEY containing column of type 'INTERVAL' not yet supported.", + "ObjectName": "test_interval, constraint: (test_interval_frequency_pkey)", + "Reason": "Primary key and Unique constraint on column 'interval' not yet supported", "SqlStatement": "create table test_interval(\n frequency interval primary key,\n\tcol1 int\n);", - "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1397" + "Suggestion": "Refer to the docs link for the workaround", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -661,7 +1016,8 @@ "Reason": "ALTER TABLE SET SCHEMA not supported yet.", "SqlStatement": "ALTER TABLE oldschema.tbl_name SET SCHEMA newschema;", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/3947" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/3947", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -670,7 +1026,8 @@ "Reason": "CREATE SCHEMA with elements not supported yet.", "SqlStatement": "CREATE SCHEMA hollywood\n CREATE TABLE films (title text, release date, awards text[])\n CREATE VIEW winners AS\n SELECT title, release FROM films WHERE awards IS NOT NULL;", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/10865" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/10865", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -679,7 +1036,8 @@ "Reason": "ALTER TABLE ALTER column SET STATISTICS not supported yet.", "SqlStatement": "alter table table_name alter column column_name set statistics 100;", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -688,7 +1046,8 @@ "Reason": "ALTER TABLE ALTER column SET STORAGE not supported yet.", "SqlStatement": "alter table test alter column col set STORAGE EXTERNAL;", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -698,7 +1057,8 @@ "SqlStatement": "alter table test_1 alter column col1 set (attribute_option=value);", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", "Suggestion": "Remove it from the exported schema", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/1124" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1124", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -708,7 +1068,8 @@ "SqlStatement": "alter table test DISABLE RULE example_rule;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", "Suggestion": "Remove this and the rule 'example_rule' from the exported schema to be not enabled on the table.", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/1124" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1124", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -718,7 +1079,8 @@ "SqlStatement": "ALTER TABLE ONLY public.example ADD CONSTRAINT example_email_key UNIQUE (email) WITH (fillfactor='70');", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", "Suggestion": "Remove the storage parameters from the DDL", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -728,7 +1090,8 @@ "SqlStatement": "alter table abc cluster on xyz;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", "Suggestion": "Remove it from the exported schema.", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -737,8 +1100,9 @@ "Reason": "INDEX on column 'tsvector' not yet supported", "SqlStatement": "CREATE INDEX tsvector_idx ON public.documents (title_tsvector, id);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -747,8 +1111,9 @@ "Reason": "INDEX on column 'tsquery' not yet supported", "SqlStatement": "CREATE INDEX tsquery_idx ON public.ts_query_table (query);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -757,8 +1122,9 @@ "Reason": "INDEX on column 'citext' not yet supported", "SqlStatement": "CREATE INDEX idx_citext ON public.citext_type USING btree (data);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -767,8 +1133,9 @@ "Reason": "INDEX on column 'inet' not yet supported", "SqlStatement": "CREATE INDEX idx_inet ON public.inet_type USING btree (data);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -777,8 +1144,9 @@ "Reason": "INDEX on column 'jsonb' not yet supported", "SqlStatement": "CREATE INDEX idx_json ON public.test_jsonb (data);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -787,8 +1155,9 @@ "Reason": "INDEX on column 'jsonb' not yet supported", "SqlStatement": "CREATE INDEX idx_json2 ON public.test_jsonb ((data2::jsonb));", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -797,8 +1166,9 @@ "Reason": "INDEX on column 'array' not yet supported", "SqlStatement": "create index idx_array on public.documents (list_of_sections);", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -807,7 +1177,8 @@ "Reason": "ALTER TABLE SET WITHOUT CLUSTER not supported yet.", "SqlStatement": "alter table test SET WITHOUT CLUSTER;", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -816,7 +1187,8 @@ "Reason": "ALTER INDEX SET not supported yet.", "SqlStatement": "ALTER INDEX abc set TABLESPACE new_tbl;", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -825,7 +1197,8 @@ "Reason": "ALTER TABLE INHERIT not supported yet.", "SqlStatement": "alter table test_3 INHERIT test_2;", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -834,7 +1207,8 @@ "Reason": "ALTER TABLE VALIDATE CONSTRAINT not supported yet.", "SqlStatement": "ALTER TABLE distributors VALIDATE CONSTRAINT distfk;", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -844,7 +1218,8 @@ "SqlStatement": "ALTER TABLE abc\nADD CONSTRAINT cnstr_id\n UNIQUE (id)\nDEFERRABLE;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#deferrable-constraint-on-constraints-other-than-foreign-keys-is-not-supported", "Suggestion": "Remove these constraints from the exported schema and make the neccessary changes to the application to work on target seamlessly", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/1709" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1709", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -854,7 +1229,8 @@ "SqlStatement": "ALTER TABLE ONLY public.users\n ADD CONSTRAINT users_email_key UNIQUE (email) DEFERRABLE;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#deferrable-constraint-on-constraints-other-than-foreign-keys-is-not-supported", "Suggestion": "Remove these constraints from the exported schema and make the neccessary changes to the application to work on target seamlessly", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/1709" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1709", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -863,8 +1239,9 @@ "Reason": "DEFERRABLE constraints not supported yet", "SqlStatement": "create table unique_def_test(id int UNIQUE DEFERRABLE, c1 int);", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#deferrable-constraint-on-constraints-other-than-foreign-keys-is-not-supported", - "Suggestion": "Remove these constraints from the exported schema and make the necessary changes to the application before pointing it to target", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/1709" + "Suggestion": "Remove these constraints from the exported schema and make the neccessary changes to the application to work on target seamlessly", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1709", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -874,7 +1251,8 @@ "SqlStatement": "create table unique_def_test1(id int, c1 int, UNIQUE(id) DEFERRABLE INITIALLY DEFERRED);", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#deferrable-constraint-on-constraints-other-than-foreign-keys-is-not-supported", "Suggestion": "Remove these constraints from the exported schema and make the neccessary changes to the application to work on target seamlessly", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/1709" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1709", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_datatypes", @@ -884,7 +1262,8 @@ "SqlStatement": "CREATE TABLE test_xml_type(id int, data xml);", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported", "Suggestion": "Data ingestion is not supported for this type in YugabyteDB so handle this type in different way. Refer link for more details.", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_datatypes", @@ -894,47 +1273,52 @@ "SqlStatement": "CREATE TABLE test_xid_type(id int, data xid);", "Suggestion": "Functions for this type e.g. txid_current are not supported in YugabyteDB yet", "GH": "https://github.com/yugabyte/yugabyte-db/issues/15638", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xid-functions-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xid-functions-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_datatypes", "ObjectType": "TABLE", "ObjectName": "combined_tbl", "Reason": "Unsupported datatype - pg_lsn on column - lsn", - "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15)\n);", + "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json UNIQUE,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15),\n\tCONSTRAINT pk PRIMARY KEY (id, maddr8)\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-yugabytedb" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-yugabytedb", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_datatypes", "ObjectType": "TABLE", "ObjectName": "combined_tbl", "Reason": "Unsupported datatype - txid_snapshot on column - id1", - "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15)\n);", + "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json UNIQUE,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15),\n\tCONSTRAINT pk PRIMARY KEY (id, maddr8)\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-yugabytedb" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-yugabytedb", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_datatypes", "ObjectType": "TABLE", "ObjectName": "public.locations", "Reason": "Unsupported datatype - geometry on column - geom", - "SqlStatement": "CREATE TABLE public.locations (\n id integer NOT NULL,\n name character varying(100),\n geom geometry(Point,4326)\n );", + "SqlStatement": "CREATE TABLE public.locations (\n id integer NOT NULL,\n name character varying(100),\n geom geometry(Point,4326),\n array_column TEXT CHECK (array_column IS JSON ARRAY)\n );", "Suggestion": "", "GH": "https://github.com/yugabyte/yugabyte-db/issues/11323", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-yugabytedb" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-yugabytedb", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", "ObjectType": "FOREIGN TABLE", "ObjectName": "public.locations", "Reason": "Foreign tables require manual intervention.", - "SqlStatement": "CREATE FOREIGN TABLE public.locations ( id integer NOT NULL, name character varying(100), geom geometry(Point,4326) ) SERVER remote_server OPTIONS ( schema_name 'public', table_name 'remote_locations' ); ", + "SqlStatement": "CREATE FOREIGN TABLE public.locations (\n id integer NOT NULL,\n name character varying(100),\n geom geometry(Point,4326)\n ) SERVER remote_server\nOPTIONS (\n schema_name 'public',\n table_name 'remote_locations'\n);", "Suggestion": "SERVER 'remote_server', and USER MAPPING should be created manually on the target to create and use the foreign table", "GH": "https://github.com/yugabyte/yb-voyager/issues/1627", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#foreign-table-in-the-source-database-requires-server-and-user-mapping" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#foreign-table-in-the-source-database-requires-server-and-user-mapping", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_datatypes", @@ -944,77 +1328,85 @@ "SqlStatement": "CREATE FOREIGN TABLE public.locations (\n id integer NOT NULL,\n name character varying(100),\n geom geometry(Point,4326)\n ) SERVER remote_server\nOPTIONS (\n schema_name 'public',\n table_name 'remote_locations'\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yugabyte-db/issues/11323", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-yugabytedb" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-yugabytedb", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", "ObjectType": "TABLE", "ObjectName": "combined_tbl", "Reason": "Unsupported datatype for Live migration - circle on column - ci", - "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15)\n);", + "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json UNIQUE,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15),\n\tCONSTRAINT pk PRIMARY KEY (id, maddr8)\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", "ObjectType": "TABLE", "ObjectName": "combined_tbl", "Reason": "Unsupported datatype for Live migration - box on column - b", - "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15)\n);", + "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json UNIQUE,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15),\n\tCONSTRAINT pk PRIMARY KEY (id, maddr8)\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", "ObjectType": "TABLE", "ObjectName": "combined_tbl", "Reason": "Unsupported datatype for Live migration - line on column - l", - "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15)\n);", + "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json UNIQUE,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15),\n\tCONSTRAINT pk PRIMARY KEY (id, maddr8)\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", "ObjectType": "TABLE", "ObjectName": "combined_tbl", "Reason": "Unsupported datatype for Live migration - lseg on column - ls", - "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15)\n);", + "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json UNIQUE,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15),\n\tCONSTRAINT pk PRIMARY KEY (id, maddr8)\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", "ObjectType": "TABLE", "ObjectName": "combined_tbl", "Reason": "Unsupported datatype for Live migration - point on column - p", - "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15)\n);", + "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json UNIQUE,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15),\n\tCONSTRAINT pk PRIMARY KEY (id, maddr8)\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", "ObjectType": "TABLE", "ObjectName": "combined_tbl", "Reason": "Unsupported datatype for Live migration - path on column - p1", - "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15)\n);", + "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json UNIQUE,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15),\n\tCONSTRAINT pk PRIMARY KEY (id, maddr8)\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", "ObjectType": "TABLE", "ObjectName": "combined_tbl", "Reason": "Unsupported datatype for Live migration - polygon on column - p2", - "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15)\n);", + "SqlStatement": "create table combined_tbl (\n\tid int,\n\tc cidr,\n\tci circle,\n\tb box,\n\tj json UNIQUE,\n\tl line,\n\tls lseg,\n\tmaddr macaddr,\n\tmaddr8 macaddr8,\n\tp point,\n\tlsn pg_lsn,\n\tp1 path,\n\tp2 polygon,\n\tid1 txid_snapshot,\n\tbitt bit (13),\n\tbittv bit varying(15),\n\tCONSTRAINT pk PRIMARY KEY (id, maddr8)\n);", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1024,7 +1416,8 @@ "SqlStatement": "ALTER TABLE ONLY public.meeting\n ADD CONSTRAINT no_time_overlap EXCLUDE USING gist (room_id WITH =, time_range WITH \u0026\u0026);", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#exclusion-constraints-is-not-supported", "Suggestion": "Refer docs link for details on possible workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/3944" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/3944", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1034,7 +1427,8 @@ "SqlStatement": "CREATE TABLE \"Test\"(\n\tid int,\n\troom_id int,\n\ttime_range trange,\n\troomid int,\n\ttimerange tsrange,\n\tEXCLUDE USING gist (room_id WITH =, time_range WITH \u0026\u0026),\n\tCONSTRAINT no_time_overlap_constr EXCLUDE USING gist (roomid WITH =, timerange WITH \u0026\u0026)\n);", "Suggestion": "Refer docs link for details on possible workaround", "GH": "https://github.com/yugabyte/yugabyte-db/issues/3944", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#exclusion-constraints-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#exclusion-constraints-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1044,17 +1438,8 @@ "SqlStatement": "CREATE TABLE \"Test\"(\n\tid int,\n\troom_id int,\n\ttime_range trange,\n\troomid int,\n\ttimerange tsrange,\n\tEXCLUDE USING gist (room_id WITH =, time_range WITH \u0026\u0026),\n\tCONSTRAINT no_time_overlap_constr EXCLUDE USING gist (roomid WITH =, timerange WITH \u0026\u0026)\n);", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#exclusion-constraints-is-not-supported", "Suggestion": "Refer docs link for details on possible workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/3944" - }, - { - "IssueType": "migration_caveats", - "ObjectType": "TABLE", - "ObjectName": "public.range_columns_partition_test", - "Reason": "Adding primary key to a partitioned table is not supported yet.", - "SqlStatement": "ALTER TABLE ONLY public.range_columns_partition_test\n ADD CONSTRAINT range_columns_partition_test_pkey PRIMARY KEY (a, b);", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#adding-primary-key-to-a-partitioned-table-results-in-an-error", - "Suggestion": "", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/10074" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/3944", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1063,7 +1448,8 @@ "Reason": "Primary key constraints are not supported on foreign tables.", "SqlStatement": "CREATE FOREIGN TABLE tbl_p(\n\tid int PRIMARY KEY\n) SERVER remote_server\nOPTIONS (\n schema_name 'public',\n table_name 'remote_table'\n);", "Suggestion": "", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/10698" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/10698", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1072,7 +1458,8 @@ "Reason": "COMPOUND TRIGGER not supported in YugabyteDB.", "SqlStatement": "CREATE TRIGGER emp_trig\n\tCOMPOUND INSERT ON emp FOR EACH ROW\n\tEXECUTE PROCEDURE trigger_fct_emp_trig();", "Suggestion": "", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1543" + "GH": "https://github.com/yugabyte/yb-voyager/issues/1543", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1081,7 +1468,8 @@ "Reason": "ALTER TYPE not supported yet.", "SqlStatement": "ALTER TYPE colors ADD VALUE 'orange' AFTER 'red';", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1893" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1893", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1090,7 +1478,8 @@ "Reason": "ALTER TYPE not supported yet.", "SqlStatement": "ALTER TYPE compfoo ADD ATTRIBUTE f3 int;", "Suggestion": "", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/1893" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/1893", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1099,7 +1488,8 @@ "Reason": "AnyData datatype doesn't have a mapping in YugabyteDB", "SqlStatement": "CREATE TABLE anydata_test (\n\tid numeric,\n\tcontent ANYDATA\n) ;", "Suggestion": "Remove the column with AnyData datatype or change it to a relevant supported datatype", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1541" + "GH": "https://github.com/yugabyte/yb-voyager/issues/1541", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1108,7 +1498,8 @@ "Reason": "AnyDataSet datatype doesn't have a mapping in YugabyteDB", "SqlStatement": "CREATE TABLE anydataset_test (\n\tid numeric,\n\tcontent ANYDATASET\n) ;", "Suggestion": "Remove the column with AnyDataSet datatype or change it to a relevant supported datatype", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1541" + "GH": "https://github.com/yugabyte/yb-voyager/issues/1541", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1117,7 +1508,8 @@ "Reason": "AnyType datatype doesn't have a mapping in YugabyteDB", "SqlStatement": "CREATE TABLE anytype_test (\n\tid numeric,\n\tcontent ANYTYPE\n) ;", "Suggestion": "Remove the column with AnyType datatype or change it to a relevant supported datatype", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1541" + "GH": "https://github.com/yugabyte/yb-voyager/issues/1541", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1126,7 +1518,8 @@ "Reason": "URIType datatype doesn't have a mapping in YugabyteDB", "SqlStatement": "CREATE TABLE uritype_test (\n\tid numeric,\n\tcontent URITYPE\n) ;", "Suggestion": "Remove the column with URIType datatype or change it to a relevant supported datatype", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1541" + "GH": "https://github.com/yugabyte/yb-voyager/issues/1541", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1135,7 +1528,8 @@ "Reason": "JSON_ARRAYAGG() function is not available in YugabyteDB", "SqlStatement": "CREATE OR REPLACE view test AS (\n select x , JSON_ARRAYAGG(trunc(b, 2) order by t desc) as agg\n FROM test1\n where t = '1DAY' group by x\n );", "Suggestion": "Rename the function to YugabyteDB's equivalent JSON_AGG()", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1542" + "GH": "https://github.com/yugabyte/yb-voyager/issues/1542", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1144,7 +1538,8 @@ "Reason": "JSON_ARRAYAGG() function is not available in YugabyteDB", "SqlStatement": "CREATE OR REPLACE PROCEDURE foo1 (p_id integer) AS $body$\nBEGIN\n create temporary table temp(id int, agg bigint);\n insert into temp(id,agg) select x , JSON_ARRAYAGG(trunc(b, 2) order by t desc) as agg FROM test1\n select agg from temp;\nend;\n$body$\nLANGUAGE PLPGSQL\nSECURITY DEFINER\n;", "Suggestion": "Rename the function to YugabyteDB's equivalent JSON_AGG()", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1542" + "GH": "https://github.com/yugabyte/yb-voyager/issues/1542", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1153,17 +1548,19 @@ "Reason": "JSON_ARRAYAGG() function is not available in YugabyteDB", "SqlStatement": "CREATE TRIGGER test\n INSTEAD OF INSERT on test for each ROW\n EXECUTE PROCEDURE JSON_ARRAYAGG(trunc(b, 2) order by t desc);", "Suggestion": "Rename the function to YugabyteDB's equivalent JSON_AGG()", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1542" + "GH": "https://github.com/yugabyte/yb-voyager/issues/1542", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "TABLE", "ObjectName": "test_1", "Reason": "insufficient columns in the PRIMARY KEY constraint definition in CREATE TABLE - (country_code, record_type)", - "SqlStatement": "CREATE TABLE test_1 (\n\tid numeric NOT NULL,\n\tcountry_code varchar(3),\n\trecord_type varchar(5),\n\tdescriptions varchar(50),\n\tPRIMARY KEY (id)\n) PARTITION BY LIST (country_code, record_type) ;", + "SqlStatement": "CREATE TABLE test_1 (\n\tid numeric NOT NULL REFERENCES sales_data(sales_id),\n\tcountry_code varchar(3),\n\trecord_type varchar(5),\n\tdescriptions varchar(50),\n\tPRIMARY KEY (id)\n) PARTITION BY LIST (country_code, record_type) ;", "Suggestion": "Add all Partition columns to Primary Key", "GH": "https://github.com/yugabyte/yb-voyager/issues/578", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/oracle/#partition-key-column-not-part-of-primary-key-columns" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/oracle/#partition-key-column-not-part-of-primary-key-columns", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1173,7 +1570,8 @@ "SqlStatement": "CREATE TABLE sales_data (\n sales_id numeric NOT NULL,\n sales_date timestamp,\n sales_amount numeric,\n PRIMARY KEY (sales_id)\n) PARTITION BY RANGE (sales_date) ;", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/oracle/#partition-key-column-not-part-of-primary-key-columns", "Suggestion": "Add all Partition columns to Primary Key", - "GH": "https://github.com/yugabyte/yb-voyager/issues/578" + "GH": "https://github.com/yugabyte/yb-voyager/issues/578", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1183,7 +1581,8 @@ "SqlStatement": "CREATE EXTENSION IF NOT EXISTS aws_commons WITH SCHEMA public;", "DocsLink": "https://docs.yugabyte.com/preview/explore/ysql-language-features/pg-extensions/", "Suggestion": "", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1538" + "GH": "https://github.com/yugabyte/yb-voyager/issues/1538", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1193,7 +1592,8 @@ "SqlStatement": "CREATE EXTENSION IF NOT EXISTS plperl WITH SCHEMA pg_catalog;", "DocsLink": "https://docs.yugabyte.com/preview/explore/ysql-language-features/pg-extensions/", "Suggestion": "", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1538" + "GH": "https://github.com/yugabyte/yb-voyager/issues/1538", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1203,7 +1603,8 @@ "SqlStatement": "CREATE EXTENSION IF NOT EXISTS hstore_plperl WITH SCHEMA public;", "DocsLink": "https://docs.yugabyte.com/preview/explore/ysql-language-features/pg-extensions/", "Suggestion": "", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1538" + "GH": "https://github.com/yugabyte/yb-voyager/issues/1538", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1212,17 +1613,19 @@ "Reason": "This FETCH clause might not be supported yet", "SqlStatement": "CREATE OR REPLACE PROCEDURE test () AS $body$\nDECLARE\n cur CURSOR FOR SELECT column_name FROM table_name;\n row RECORD;\nBEGIN\n OPEN cur;\n FETCH PRIOR FROM cur INTO row;\n CLOSE cur;\nEND;\n$body$\nLANGUAGE PLPGSQL\nSECURITY DEFINER\n;", "Suggestion": "Please verify the DDL on your YugabyteDB version before proceeding", - "GH": "https://github.com/YugaByte/yugabyte-db/issues/6514" + "GH": "https://github.com/YugaByte/yugabyte-db/issues/6514", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", "ObjectType": "FOREIGN TABLE", "ObjectName": "tbl_p", "Reason": "Foreign tables require manual intervention.", - "SqlStatement": "CREATE FOREIGN TABLE tbl_p( \tid int PRIMARY KEY ) SERVER remote_server OPTIONS ( schema_name 'public', table_name 'remote_table' ); ", + "SqlStatement": "CREATE FOREIGN TABLE tbl_p(\n\tid int PRIMARY KEY\n) SERVER remote_server\nOPTIONS (\n schema_name 'public',\n table_name 'remote_table'\n);", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#foreign-table-in-the-source-database-requires-server-and-user-mapping", "Suggestion": "SERVER 'remote_server', and USER MAPPING should be created manually on the target to create and use the foreign table", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1627" + "GH": "https://github.com/yugabyte/yb-voyager/issues/1627", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1232,7 +1635,8 @@ "SqlStatement": "CREATE TABLE enum_example.bugs (\n id integer NOT NULL,\n description text,\n status enum_example.bug_status,\n _status enum_example.bug_status GENERATED ALWAYS AS (status) STORED,\n severity enum_example.bug_severity,\n _severity enum_example.bug_severity GENERATED ALWAYS AS (severity) STORED,\n info enum_example.bug_info GENERATED ALWAYS AS (enum_example.make_bug_info(status, severity)) STORED\n);", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#generated-always-as-stored-type-column-is-not-supported", "Suggestion": "Using Triggers to update the generated columns is one way to work around this issue, refer docs link for more details.", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/10695" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/10695", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -1242,7 +1646,8 @@ "SqlStatement": "CREATE POLICY P ON tbl1 TO regress_rls_eve, regress_rls_frank USING (true);", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -1252,6 +1657,491 @@ "SqlStatement": "CREATE POLICY p1 ON z1 TO regress_rls_group1 USING (a % 2 = 0);", "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", - "GH": "https://github.com/yugabyte/yb-voyager/issues/1655" + "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "create_and_populate_tables", + "Reason": "Advisory Locks", + "SqlStatement": "SELECT pg_advisory_lock(sender_id);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/3642", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "create_and_populate_tables", + "Reason": "Advisory Locks", + "SqlStatement": "SELECT pg_advisory_lock(receiver_id);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/3642", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "create_and_populate_tables", + "Reason": "Advisory Locks", + "SqlStatement": "SELECT pg_advisory_unlock(sender_id);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/3642", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "create_and_populate_tables", + "Reason": "Advisory Locks", + "SqlStatement": "SELECT pg_advisory_unlock(receiver_id);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/3642", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "create_and_populate_tables", + "Reason": "Advisory Locks", + "SqlStatement": "SELECT pg_advisory_unlock(sender_id);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/3642", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "create_and_populate_tables", + "Reason": "Advisory Locks", + "SqlStatement": "SELECT pg_advisory_unlock(receiver_id);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/3642", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "create_and_populate_tables", + "Reason": "XML Functions", + "SqlStatement": "SELECT id, xpath('/person/name/text()', data) AS name FROM test_xml_type;", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "create_and_populate_tables", + "Reason": "System Columns", + "SqlStatement": "SELECT * FROM employees e WHERE e.xmax = (SELECT MAX(xmax) FROM employees WHERE department = e.department);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/24843", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#system-columns-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "PROCEDURE", + "ObjectName": "add_employee", + "Reason": "Advisory Locks", + "SqlStatement": "SELECT id, first_name FROM employees WHERE pg_try_advisory_lock(300) IS TRUE;", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/3642", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "PROCEDURE", + "ObjectName": "add_employee", + "Reason": "System Columns", + "SqlStatement": "SELECT e.id, e.name,\n ROW_NUMBER() OVER (ORDER BY e.ctid) AS row_num\n FROM employees e;", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/24843", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#system-columns-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "PROCEDURE", + "ObjectName": "add_employee", + "Reason": "XML Functions", + "SqlStatement": "SELECT e.id, x.employee_xml\n FROM employees e\n JOIN (\n SELECT xmlelement(name \"employee\", xmlattributes(e.id AS \"id\"), e.name) AS employee_xml\n FROM employees e\n ) x ON x.employee_xml IS NOT NULL\n WHERE xmlexists('//employee[name=\"John Doe\"]' PASSING BY REF x.employee_xml);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "PROCEDURE", + "ObjectName": "add_employee", + "Reason": "Advisory Locks", + "SqlStatement": "SELECT e.id,\n CASE\n WHEN e.salary \u003e 100000 THEN pg_advisory_lock(e.id)\n ELSE pg_advisory_unlock(e.id)\n END AS lock_status\n FROM employees e;", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/3642", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "MVIEW", + "ObjectName": "public.sample_data_view", + "Reason": "XML Functions", + "SqlStatement": "CREATE MATERIALIZED VIEW public.sample_data_view AS\n SELECT sample_data.id,\n sample_data.name,\n sample_data.description,\n XMLFOREST(sample_data.name AS name, sample_data.description AS description) AS xml_data,\n pg_try_advisory_lock((sample_data.id)::bigint) AS lock_acquired,\n sample_data.ctid AS row_ctid,\n sample_data.xmin AS xmin_value\n FROM public.sample_data\n WITH NO DATA;", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "MVIEW", + "ObjectName": "public.sample_data_view", + "Reason": "Advisory Locks", + "SqlStatement": "CREATE MATERIALIZED VIEW public.sample_data_view AS\n SELECT sample_data.id,\n sample_data.name,\n sample_data.description,\n XMLFOREST(sample_data.name AS name, sample_data.description AS description) AS xml_data,\n pg_try_advisory_lock((sample_data.id)::bigint) AS lock_acquired,\n sample_data.ctid AS row_ctid,\n sample_data.xmin AS xmin_value\n FROM public.sample_data\n WITH NO DATA;", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/3642", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "MVIEW", + "ObjectName": "public.sample_data_view", + "Reason": "System Columns", + "SqlStatement": "CREATE MATERIALIZED VIEW public.sample_data_view AS\n SELECT sample_data.id,\n sample_data.name,\n sample_data.description,\n XMLFOREST(sample_data.name AS name, sample_data.description AS description) AS xml_data,\n pg_try_advisory_lock((sample_data.id)::bigint) AS lock_acquired,\n sample_data.ctid AS row_ctid,\n sample_data.xmin AS xmin_value\n FROM public.sample_data\n WITH NO DATA;", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/24843", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#system-columns-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "VIEW", + "ObjectName": "public.orders_view", + "Reason": "XML Functions", + "SqlStatement": "CREATE VIEW public.orders_view AS\n SELECT orders.order_id,\n orders.customer_name,\n orders.product_name,\n orders.quantity,\n orders.price,\n XMLELEMENT(NAME \"OrderDetails\", XMLELEMENT(NAME \"Customer\", orders.customer_name), XMLELEMENT(NAME \"Product\", orders.product_name), XMLELEMENT(NAME \"Quantity\", orders.quantity), XMLELEMENT(NAME \"TotalPrice\", (orders.price * (orders.quantity)::numeric))) AS order_xml,\n XMLCONCAT(XMLELEMENT(NAME \"Customer\", orders.customer_name), XMLELEMENT(NAME \"Product\", orders.product_name)) AS summary_xml,\n pg_try_advisory_lock((hashtext((orders.customer_name || orders.product_name)))::bigint) AS lock_acquired,\n orders.ctid AS row_ctid,\n orders.xmin AS transaction_id\n FROM public.orders;", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "VIEW", + "ObjectName": "public.orders_view", + "Reason": "Advisory Locks", + "SqlStatement": "CREATE VIEW public.orders_view AS\n SELECT orders.order_id,\n orders.customer_name,\n orders.product_name,\n orders.quantity,\n orders.price,\n XMLELEMENT(NAME \"OrderDetails\", XMLELEMENT(NAME \"Customer\", orders.customer_name), XMLELEMENT(NAME \"Product\", orders.product_name), XMLELEMENT(NAME \"Quantity\", orders.quantity), XMLELEMENT(NAME \"TotalPrice\", (orders.price * (orders.quantity)::numeric))) AS order_xml,\n XMLCONCAT(XMLELEMENT(NAME \"Customer\", orders.customer_name), XMLELEMENT(NAME \"Product\", orders.product_name)) AS summary_xml,\n pg_try_advisory_lock((hashtext((orders.customer_name || orders.product_name)))::bigint) AS lock_acquired,\n orders.ctid AS row_ctid,\n orders.xmin AS transaction_id\n FROM public.orders;", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/3642", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "VIEW", + "ObjectName": "public.orders_view", + "Reason": "System Columns", + "SqlStatement": "CREATE VIEW public.orders_view AS\n SELECT orders.order_id,\n orders.customer_name,\n orders.product_name,\n orders.quantity,\n orders.price,\n XMLELEMENT(NAME \"OrderDetails\", XMLELEMENT(NAME \"Customer\", orders.customer_name), XMLELEMENT(NAME \"Product\", orders.product_name), XMLELEMENT(NAME \"Quantity\", orders.quantity), XMLELEMENT(NAME \"TotalPrice\", (orders.price * (orders.quantity)::numeric))) AS order_xml,\n XMLCONCAT(XMLELEMENT(NAME \"Customer\", orders.customer_name), XMLELEMENT(NAME \"Product\", orders.product_name)) AS summary_xml,\n pg_try_advisory_lock((hashtext((orders.customer_name || orders.product_name)))::bigint) AS lock_acquired,\n orders.ctid AS row_ctid,\n orders.xmin AS transaction_id\n FROM public.orders;", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/24843", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#system-columns-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "public.get_employeee_salary", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "employees.salary%TYPE", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "calculate_tax", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "employees.tax_rate%TYPE", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "log_salary_change", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "employees.salary%TYPE", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "log_salary_change", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "employees.salary%TYPE", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "get_employee_details", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "employees.name%TYPE", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "get_employee_details", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "employees.id%TYPE", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "list_high_earners", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "employees.name%TYPE", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "list_high_earners", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "employees.salary%TYPE", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "copy_high_earners", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "employees.salary%TYPE", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "PROCEDURE", + "ObjectName": "update_salary", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "employees.salary%TYPE", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "PROCEDURE", + "ObjectName": "get_employee_details_proc", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "employees.name%TYPE", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "PROCEDURE", + "ObjectName": "get_employee_details_proc", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "employees.id%TYPE", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "PROCEDURE", + "ObjectName": "get_employee_details_proc", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "employees.salary%TYPE", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "get_employee_details", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "public.employees.name%TYPE", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_datatypes", + "ObjectType": "TABLE", + "ObjectName": "image", + "Reason": "Unsupported datatype - lo on column - raster", + "SqlStatement": "CREATE TABLE image (title text, raster lo);", + "Suggestion": "Large objects are not yet supported in YugabyteDB, no workaround available currently", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25318", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#large-objects-and-its-functions-are-currently-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "VIEW", + "ObjectName": "top_employees_view", + "Reason": "FETCH .. WITH TIES", + "SqlStatement": "CREATE VIEW top_employees_view AS SELECT * FROM (\n\t\t\tSELECT * FROM employees\n\t\t\tORDER BY salary DESC\n\t\t\tFETCH FIRST 2 ROWS WITH TIES\n\t\t) AS top_employees;", + "Suggestion": "No workaround available right now", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_datatypes", + "ObjectType": "TABLE", + "ObjectName": "bigint_multirange_table", + "Reason": "Unsupported datatype - int8multirange on column - value_ranges", + "SqlStatement": "CREATE TABLE bigint_multirange_table (\n id integer PRIMARY KEY,\n value_ranges int8multirange\n);", + "Suggestion": "Multirange data type is not yet supported in YugabyteDB, no workaround available currently", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_datatypes", + "ObjectType": "TABLE", + "ObjectName": "date_multirange_table", + "Reason": "Unsupported datatype - datemultirange on column - project_dates", + "SqlStatement": "CREATE TABLE date_multirange_table (\n id integer PRIMARY KEY,\n project_dates datemultirange\n);", + "Suggestion": "Multirange data type is not yet supported in YugabyteDB, no workaround available currently", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_datatypes", + "ObjectType": "TABLE", + "ObjectName": "int_multirange_table", + "Reason": "Unsupported datatype - int4multirange on column - value_ranges", + "SqlStatement": "CREATE TABLE int_multirange_table (\n id integer PRIMARY KEY,\n value_ranges int4multirange\n);", + "Suggestion": "Multirange data type is not yet supported in YugabyteDB, no workaround available currently", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_datatypes", + "ObjectType": "TABLE", + "ObjectName": "numeric_multirange_table", + "Reason": "Unsupported datatype - nummultirange on column - price_ranges", + "SqlStatement": "CREATE TABLE numeric_multirange_table (\n id integer PRIMARY KEY,\n price_ranges nummultirange\n);", + "Suggestion": "Multirange data type is not yet supported in YugabyteDB, no workaround available currently", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_datatypes", + "ObjectType": "TABLE", + "ObjectName": "timestamp_multirange_table", + "Reason": "Unsupported datatype - tsmultirange on column - event_times", + "SqlStatement": "CREATE TABLE timestamp_multirange_table (\n id integer PRIMARY KEY,\n event_times tsmultirange\n);", + "Suggestion": "Multirange data type is not yet supported in YugabyteDB, no workaround available currently", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_datatypes", + "ObjectType": "TABLE", + "ObjectName": "timestamptz_multirange_table", + "Reason": "Unsupported datatype - tstzmultirange on column - global_event_times", + "SqlStatement": "CREATE TABLE timestamptz_multirange_table (\n id integer PRIMARY KEY,\n global_event_times tstzmultirange\n);", + "Suggestion": "Multirange data type is not yet supported in YugabyteDB, no workaround available currently", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "users_unique_nulls_not_distinct", + "Reason": "Unique Nulls Not Distinct", + "SqlStatement": "CREATE TABLE users_unique_nulls_not_distinct (\n id SERIAL PRIMARY KEY,\n email TEXT,\n UNIQUE NULLS NOT DISTINCT (email)\n);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "sales_unique_nulls_not_distinct", + "Reason": "Unique Nulls Not Distinct", + "SqlStatement": "CREATE TABLE sales_unique_nulls_not_distinct (\n store_id INT,\n product_id INT,\n sale_date DATE,\n UNIQUE NULLS NOT DISTINCT (store_id, product_id, sale_date)\n);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "TABLE", + "ObjectName": "sales_unique_nulls_not_distinct_alter", + "Reason": "Unique Nulls Not Distinct", + "SqlStatement": "ALTER TABLE sales_unique_nulls_not_distinct_alter\n\tADD CONSTRAINT sales_unique_nulls_not_distinct_alter_unique UNIQUE NULLS NOT DISTINCT (store_id, product_id, sale_date);", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "INDEX", + "ObjectName": "users_unique_nulls_not_distinct_index_email ON users_unique_nulls_not_distinct_index", + "Reason": "Unique Nulls Not Distinct", + "SqlStatement": "CREATE UNIQUE INDEX users_unique_nulls_not_distinct_index_email\n ON users_unique_nulls_not_distinct_index (email)\n NULLS NOT DISTINCT;", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25575", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null } ] diff --git a/migtests/tests/analyze-schema/summary.json b/migtests/tests/analyze-schema/summary.json index 18fd6e6061..2a24c33dd6 100644 --- a/migtests/tests/analyze-schema/summary.json +++ b/migtests/tests/analyze-schema/summary.json @@ -8,8 +8,15 @@ { "ObjectType": "SCHEMA", "TotalCount": 1, + "InvalidCount": 1, "ObjectNames": "hollywood" }, + { + "ObjectType": "COLLATION", + "TotalCount": 3, + "InvalidCount": 3, + "ObjectNames": "special1, ignore_accents, schema2.upperfirst" + }, { "ObjectType": "EXTENSION", "TotalCount": 7, @@ -20,31 +27,38 @@ { "ObjectType": "TYPE", "TotalCount": 4, + "InvalidCount":1, "ObjectNames": "address_type, non_public.address_type1, non_public.enum_test, enum_test" }, { "ObjectType": "TABLE", - "TotalCount": 48, - "InvalidCount": 33, - "ObjectNames": "test_arr_enum, public.locations, test_udt, combined_tbl, public.ts_query_table, public.documents, public.citext_type, public.inet_type, public.test_jsonb, test_xml_type, test_xid_type, public.range_columns_partition_test_copy, anydata_test, uritype_test, public.foreign_def_test, test_4, enum_example.bugs, table_abc, anydataset_test, unique_def_test1, test_2, table_1, public.range_columns_partition_test, table_xyz, public.users, test_3, test_5, test_7, foreign_def_test2, unique_def_test, sales_data, table_test, test_interval, test_non_pk_multi_column_list, test_9, test_8, order_details, public.employees4, anytype_test, public.meeting, test_table_in_type_file, sales, test_1, \"Test\", foreign_def_test1, salaries2, test_6, public.pr" }, + "TotalCount": 64, + "InvalidCount": 53, + "ObjectNames": "test_table_in_type_file, sales_data, salaries2, sales, test_1, test_2, test_non_pk_multi_column_list, test_3, test_4, test_5, test_6, test_7, test_8, test_9, order_details, public.employees4, enum_example.bugs, table_xyz, table_abc, table_1, table_test, test_interval, public.range_columns_partition_test, public.range_columns_partition_test_copy, anydata_test, anydataset_test, anytype_test, uritype_test, \"Test\", public.meeting, public.pr, public.foreign_def_test, public.users, foreign_def_test1, foreign_def_test2, unique_def_test, unique_def_test1, test_xml_type, test_xid_type, public.test_jsonb, public.inet_type, public.citext_type, public.documents, public.ts_query_table, combined_tbl, combined_tbl1, test_udt, test_arr_enum, public.locations, public.xml_data_example, image, public.json_data, employees, bigint_multirange_table, date_multirange_table, int_multirange_table, numeric_multirange_table, timestamp_multirange_table, timestamptz_multirange_table, users_unique_nulls_distinct, users_unique_nulls_not_distinct, sales_unique_nulls_not_distinct, sales_unique_nulls_not_distinct_alter, users_unique_nulls_not_distinct_index" }, { "ObjectType": "INDEX", - "TotalCount": 35, - "InvalidCount": 30, - "ObjectNames": "film_fulltext_idx ON public.film, idx_actor_last_name ON public.actor, idx_name1 ON table_name, idx_name2 ON table_name, idx_name3 ON schema_name.table_name, idx_fileinfo_name_splitted ON public.fileinfo, abc ON public.example, abc ON schema2.example, tsvector_idx ON public.documents, tsquery_idx ON public.ts_query_table, idx_citext ON public.citext_type, idx_inet ON public.inet_type, idx_json ON public.test_jsonb, idx_json2 ON public.test_jsonb, idx_valid ON public.test_jsonb, idx_array ON public.documents, idx1 ON combined_tbl, idx2 ON combined_tbl, idx3 ON combined_tbl, idx4 ON combined_tbl, idx5 ON combined_tbl, idx6 ON combined_tbl, idx7 ON combined_tbl, idx8 ON combined_tbl, idx9 ON combined_tbl, idx10 ON combined_tbl, idx11 ON combined_tbl, idx12 ON combined_tbl, idx13 ON combined_tbl, idx14 ON combined_tbl, idx15 ON combined_tbl, idx_udt ON test_udt, idx_udt1 ON test_udt, idx_enum ON test_udt, \"idx\u0026_enum2\" ON test_udt", + "TotalCount": 44, + "InvalidCount": 40, + "ObjectNames": "film_fulltext_idx ON public.film, idx_actor_last_name ON public.actor, idx_name1 ON table_name, idx_name2 ON table_name, idx_name3 ON schema_name.table_name, idx_fileinfo_name_splitted ON public.fileinfo, abc ON public.example, abc ON schema2.example, tsvector_idx ON public.documents, tsquery_idx ON public.ts_query_table, idx_citext ON public.citext_type, idx_inet ON public.inet_type, idx_json ON public.test_jsonb, idx_json2 ON public.test_jsonb, idx_valid ON public.test_jsonb, idx_array ON public.documents, idx1 ON combined_tbl, idx2 ON combined_tbl, idx3 ON combined_tbl, idx4 ON combined_tbl, idx5 ON combined_tbl, idx6 ON combined_tbl, idx7 ON combined_tbl, idx8 ON combined_tbl, idx9 ON combined_tbl, idx10 ON combined_tbl, idx11 ON combined_tbl, idx12 ON combined_tbl, idx13 ON combined_tbl, idx14 ON combined_tbl, idx15 ON combined_tbl, idx1 ON combined_tbl1, idx2 ON combined_tbl1, idx3 ON combined_tbl1, idx4 ON combined_tbl1, idx5 ON combined_tbl1, idx6 ON combined_tbl1, idx7 ON combined_tbl1, idx8 ON combined_tbl1, idx_udt ON test_udt, idx_udt1 ON test_udt, idx_enum ON test_udt, \"idx\u0026_enum2\" ON test_udt, users_unique_nulls_not_distinct_index_email ON users_unique_nulls_not_distinct_index", "Details": "There are some GIN indexes present in the schema, but GIN indexes are partially supported in YugabyteDB as mentioned in (https://github.com/yugabyte/yugabyte-db/issues/7850) so take a look and modify them if not supported." }, + { + "ObjectType": "FUNCTION", + "TotalCount": 7, + "InvalidCount": 7, + "ObjectNames": "create_and_populate_tables, public.get_employeee_salary, get_employee_details, calculate_tax, log_salary_change, list_high_earners, copy_high_earners" + }, { "ObjectType": "PROCEDURE", - "TotalCount": 5, - "InvalidCount": 3, - "ObjectNames": "foo, foo1, sp_createnachabatch, test, test1" + "TotalCount": 8, + "InvalidCount": 6, + "ObjectNames": "foo, foo1, sp_createnachabatch, test, get_employee_details_proc, test1, add_employee, update_salary" }, { "ObjectType": "VIEW", - "TotalCount": 3, - "InvalidCount": 3, - "ObjectNames": "v1, v2, test" + "TotalCount": 7, + "InvalidCount": 7, + "ObjectNames": "public.my_films_view, v1, v2, test, public.orders_view, view_name, top_employees_view" }, { "ObjectType": "TRIGGER", @@ -54,14 +68,14 @@ }, { "ObjectType": "MVIEW", - "TotalCount": 1, - "InvalidCount": 1, - "ObjectNames": "test" + "TotalCount": 2, + "InvalidCount": 2, + "ObjectNames": "test, public.sample_data_view" }, { "ObjectType": "CONVERSION", "TotalCount": 1, - "InvalidCount": 1, + "InvalidCount": 0, "ObjectNames": "myconv" }, { @@ -79,7 +93,8 @@ { "ObjectType": "OPERATOR", "TotalCount": 1, + "InvalidCount": 0, "ObjectNames": "\u003c%" } ] -} \ No newline at end of file +} diff --git a/migtests/tests/analyze-schema/validate b/migtests/tests/analyze-schema/validate index daf6faa023..64ee24da7d 100755 --- a/migtests/tests/analyze-schema/validate +++ b/migtests/tests/analyze-schema/validate @@ -45,25 +45,27 @@ def validate_report_summary(report, expected_summary): validate_database_objects_summary(report, expected_summary) def validate_database_objects_summary(report, expected_summary): - key = "DatabaseObjects" - expected_objects = expected_summary.get(key, []) - reported_objects = report['Summary'].get(key, []) + key = "DatabaseObjects" + expected_objects = expected_summary.get(key, []) + reported_objects = report['Summary'].get(key, []) - assert len(expected_objects) == len(reported_objects), "Number of database objects does not match" + assert len(expected_objects) == len(reported_objects), "Number of database objects does not match" - for expected_obj, reported_obj in zip(expected_objects, reported_objects): - print(f"validating database object: {expected_obj['ObjectType']}") - print(f"expected summary field for {key}: {expected_obj}") - print(f"reported summary field for {key}: {reported_obj}") - assert expected_obj["ObjectType"] == reported_obj["ObjectType"], f"Object type mismatch for {expected_obj['ObjectType']}" - assert expected_obj["TotalCount"] == reported_obj["TotalCount"], f"Total count mismatch for {expected_obj['ObjectType']}" - if "Details" in expected_obj and "Details" in reported_obj: - assert expected_obj["Details"] == reported_obj["Details"], f"Details mismatch for {expected_obj['ObjectType']}" + for expected_obj, reported_obj in zip(expected_objects, reported_objects): + print(f"validating database object: {expected_obj['ObjectType']}") + print(f"expected summary field for {key}: {expected_obj}") + print(f"reported summary field for {key}: {reported_obj}") + assert expected_obj["InvalidCount"] == reported_obj["InvalidCount"], f"Invalid count mismatch for {expected_obj['ObjectType']}" + assert expected_obj["ObjectType"] == reported_obj["ObjectType"], f"Object type mismatch for {expected_obj['ObjectType']}" + assert expected_obj["TotalCount"] == reported_obj["TotalCount"], f"Total count mismatch for {expected_obj['ObjectType']}" - expected_names = sorted(expected_obj.get("ObjectNames", "").split(", ")) - reported_names = sorted(reported_obj.get("ObjectNames", "").split(", ")) - assert expected_names == reported_names, f"Object names mismatch for {expected_obj['ObjectType']}" + if "Details" in expected_obj and "Details" in reported_obj: + assert expected_obj["Details"] == reported_obj["Details"], f"Details mismatch for {expected_obj['ObjectType']}" + + expected_names = sorted(expected_obj.get("ObjectNames", "").split(", ")) + reported_names = sorted(reported_obj.get("ObjectNames", "").split(", ")) + assert expected_names == reported_names, f"Object names mismatch for {expected_obj['ObjectType']}" def validate_report_issues(report, expected_issues): # FilePath reported in the report can be different depending on the machine diff --git a/migtests/tests/import-file/run-import-file-test b/migtests/tests/import-file/run-import-file-test index 93303f04c3..2a8bf0eeaa 100755 --- a/migtests/tests/import-file/run-import-file-test +++ b/migtests/tests/import-file/run-import-file-test @@ -41,9 +41,11 @@ main() { export TARGET_DB_SCHEMA='non_public' +# Kept --start-clean here to test the command for import data file. Can add proper validations for it once --truncate-tables is introduced here + step "Import data file: SMSA.txt -> smsa in a non-public schema" import_data_file --data-dir ${TEST_DIR} --format text --delimiter '\t' \ - --file-table-map "SMSA.txt:smsa" --start-clean true + --file-table-map "SMSA.txt:smsa" --start-clean true #clean up the export dir as we will have public schema from this test which should be on fresh export-dir rm -rf ${EXPORT_DIR} @@ -53,7 +55,7 @@ main() { #Default BATCH_SIZE_BYTES step "Import data file: OneMRows.text -> one_m_rows" import_data_file --data-dir ${TEST_DIR} --format text --delimiter '|' \ - --file-table-map "OneMRows.text:one_m_rows" --start-clean true + --file-table-map "OneMRows.text:one_m_rows" export MAX_BATCH_SIZE_BYTES=345643 #~300KB @@ -276,7 +278,7 @@ main() { if [ "${RUN_LARGE_IMPORT_DATA_FILE_TEST}" = true ] ; then step "Run large sized import data file test" - import_data_file --data-dir "s3://yb-voyager-test-data" --delimiter "\t" --format "text" --file-table-map "accounts_350m_data.sql:accounts_large" --start-clean true --yes + import_data_file --data-dir "s3://yb-voyager-test-data" --delimiter "\t" --format "text" --file-table-map "accounts_350m_data.sql:accounts_large" --yes fi diff --git a/migtests/tests/mysql/basic-live-test/env.sh b/migtests/tests/mysql/basic-live-test/env.sh index dfe82e6660..62ddf80b40 100644 --- a/migtests/tests/mysql/basic-live-test/env.sh +++ b/migtests/tests/mysql/basic-live-test/env.sh @@ -1,2 +1 @@ -export SOURCE_DB_TYPE="mysql" -export SOURCE_DB_NAME=${SOURCE_DB_NAME:-"live_test"} \ No newline at end of file +export SOURCE_DB_TYPE="mysql" \ No newline at end of file diff --git a/migtests/tests/mysql/chinook/Chinook.sql b/migtests/tests/mysql/chinook/Chinook.sql index c761f0d041..f3309ff09b 100644 --- a/migtests/tests/mysql/chinook/Chinook.sql +++ b/migtests/tests/mysql/chinook/Chinook.sql @@ -15829,4 +15829,3 @@ INSERT INTO `PlaylistTrack` (`PlaylistId`, `TrackId`) VALUES (17, 2096); INSERT INTO `PlaylistTrack` (`PlaylistId`, `TrackId`) VALUES (17, 3290); INSERT INTO `PlaylistTrack` (`PlaylistId`, `TrackId`) VALUES (18, 597); - diff --git a/migtests/tests/mysql/chinook/env.sh b/migtests/tests/mysql/chinook/env.sh index ff571a1c0e..c640e171cd 100644 --- a/migtests/tests/mysql/chinook/env.sh +++ b/migtests/tests/mysql/chinook/env.sh @@ -1,3 +1,5 @@ export SOURCE_DB_TYPE="mysql" -export SOURCE_DB_NAME=${SOURCE_DB_NAME:-"Chinook"} -export TARGET_DB_SCHEMA="TEST_SAMPLE_CHINOOK" \ No newline at end of file +export SOURCE_DB_NAME="Chinook" +export TARGET_DB_SCHEMA="TEST_SAMPLE_CHINOOK" +export SKIP_DB_CREATION="true" + diff --git a/migtests/tests/mysql/datatypes/env.sh b/migtests/tests/mysql/datatypes/env.sh index 1d4759f090..aedc7cffd6 100644 --- a/migtests/tests/mysql/datatypes/env.sh +++ b/migtests/tests/mysql/datatypes/env.sh @@ -1,2 +1 @@ export SOURCE_DB_TYPE="mysql" -export SOURCE_DB_NAME=${SOURCE_DB_NAME:-"datatypes"} diff --git a/migtests/tests/mysql/sakila/env.sh b/migtests/tests/mysql/sakila/env.sh index 620640ea43..5fa4dc4acb 100644 --- a/migtests/tests/mysql/sakila/env.sh +++ b/migtests/tests/mysql/sakila/env.sh @@ -1,2 +1,3 @@ export SOURCE_DB_TYPE="mysql" export SOURCE_DB_NAME=${SOURCE_DB_NAME:-"sakila"} +export SKIP_DB_CREATION="true" \ No newline at end of file diff --git a/migtests/tests/mysql/sakila/init-db b/migtests/tests/mysql/sakila/init-db index f6eb9c5bd6..b783d909f1 100755 --- a/migtests/tests/mysql/sakila/init-db +++ b/migtests/tests/mysql/sakila/init-db @@ -23,4 +23,4 @@ run_mysql ${SOURCE_DB_NAME} "SOURCE sakila-db/sakila-data.sql;" run_mysql ${SOURCE_DB_NAME} "ALTER TABLE address DROP COLUMN location;" echo "Check source database." -run_mysql ${SOURCE_DB_NAME} "SELECT count(*) FROM payment;" +run_mysql ${SOURCE_DB_NAME} "SELECT count(*) FROM payment;" \ No newline at end of file diff --git a/migtests/tests/oracle/assessment-report-test/expectedAssessmentReport.json b/migtests/tests/oracle/assessment-report-test/expectedAssessmentReport.json index 430ba37144..d0e493bd42 100644 --- a/migtests/tests/oracle/assessment-report-test/expectedAssessmentReport.json +++ b/migtests/tests/oracle/assessment-report-test/expectedAssessmentReport.json @@ -192,7 +192,8 @@ "ObjectName": "trg_simple_insert", "SqlStatement": "CREATE TRIGGER trg_simple_insert\n\tCOMPOUND INSERT ON simple_table FOR EACH ROW\n\tEXECUTE PROCEDURE trigger_fct_trg_simple_insert();" } - ] + ], + "MinimumVersionsFixedIn": null }, { "FeatureName": "Unsupported Indexes", @@ -217,7 +218,8 @@ "ObjectName": "Index Name: REV_INDEX, Index Type=NORMAL/REV INDEX", "SqlStatement": "" } - ] + ], + "MinimumVersionsFixedIn": null }, { "FeatureName": "Virtual Columns", @@ -230,7 +232,8 @@ "ObjectName": "GENERATED_COLUMN_TABLE.TOTAL_PRICE", "SqlStatement": "" } - ] + ], + "MinimumVersionsFixedIn": null }, { "FeatureName": "Inherited Types", @@ -239,7 +242,8 @@ "ObjectName": "SIMPLE_CAR_TYPE", "SqlStatement": "" } - ] + ], + "MinimumVersionsFixedIn": null }, { "FeatureName": "Unsupported Partitioning Methods", @@ -252,7 +256,8 @@ "ObjectName": "Table Name: SALES, Partition Method: SYSTEM PARTITION", "SqlStatement": "" } - ] + ], + "MinimumVersionsFixedIn": null } ], "UnsupportedFeaturesDesc": "Features of the source database that are not supported on the target YugabyteDB.", @@ -1356,5 +1361,6 @@ "There are some BITMAP indexes present in the schema that will get converted to GIN indexes, but GIN indexes are partially supported in YugabyteDB as mentioned in \u003ca class=\"highlight-link\" href=\"https://github.com/yugabyte/yugabyte-db/issues/7850\"\u003ehttps://github.com/yugabyte/yugabyte-db/issues/7850\u003c/a\u003e so take a look and modify them if not supported." ], "MigrationCaveats": null, - "UnsupportedQueryConstructs": null + "UnsupportedQueryConstructs": null, + "UnsupportedPlPgSqlObjects": null } diff --git a/migtests/tests/oracle/basic-live-test/env.sh b/migtests/tests/oracle/basic-live-test/env.sh index 722796b6a8..998f6081f6 100644 --- a/migtests/tests/oracle/basic-live-test/env.sh +++ b/migtests/tests/oracle/basic-live-test/env.sh @@ -1,4 +1,2 @@ export SOURCE_DB_TYPE="oracle" -export SOURCE_DB_SCHEMA=${SOURCE_DB_SCHEMA:-"TEST_SCHEMA"} -export TARGET_DB_NAME=${TARGET_DB_NAME:-"live_test"} export TARGET_DB_SCHEMA="test_schema" diff --git a/migtests/tests/oracle/bulk-assessment-test/cleanup-db b/migtests/tests/oracle/bulk-assessment-test/cleanup-db new file mode 100644 index 0000000000..07ebc07a8e --- /dev/null +++ b/migtests/tests/oracle/bulk-assessment-test/cleanup-db @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -e +set -x + +source ${SCRIPTS}/functions.sh + +for schema in TEST_SCHEMA TEST_SCHEMA2; do + export SOURCE_DB_SCHEMA=${schema} + run_sqlplus_as_schema_owner ${SOURCE_DB_NAME} ${TESTS_DIR}/oracle/utils/delete_full_schema +done \ No newline at end of file diff --git a/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild1AssessmentReport.json b/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild1AssessmentReport.json index e9da30ab25..44e835b585 100644 --- a/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild1AssessmentReport.json +++ b/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild1AssessmentReport.json @@ -121,7 +121,8 @@ "ObjectName": "trg_simple_insert", "SqlStatement": "CREATE TRIGGER trg_simple_insert\n\tCOMPOUND INSERT ON simple_table FOR EACH ROW\n\tEXECUTE PROCEDURE trigger_fct_trg_simple_insert();" } - ] + ], + "MinimumVersionsFixedIn": null }, { "FeatureName": "Unsupported Indexes", @@ -146,7 +147,8 @@ "ObjectName": "Index Name: PK_IOT_TABLE, Index Type=IOT - TOP INDEX", "SqlStatement": "" } - ] + ], + "MinimumVersionsFixedIn": null }, { "FeatureName": "Virtual Columns", @@ -159,7 +161,8 @@ "ObjectName": "GENERATED_COLUMN_TABLE.TOTAL_PRICE", "SqlStatement": "" } - ] + ], + "MinimumVersionsFixedIn": null }, { "FeatureName": "Inherited Types", @@ -168,7 +171,8 @@ "ObjectName": "SIMPLE_CAR_TYPE", "SqlStatement": "" } - ] + ], + "MinimumVersionsFixedIn": null }, { "FeatureName": "Unsupported Partitioning Methods", @@ -181,7 +185,8 @@ "ObjectName": "Table Name: EMPLOYEES2, Partition Method: REFERENCE PARTITION", "SqlStatement": "" } - ] + ], + "MinimumVersionsFixedIn": null } ], "UnsupportedFeaturesDesc": "Features of the source database that are not supported on the target YugabyteDB.", @@ -543,5 +548,6 @@ "There are some BITMAP indexes present in the schema that will get converted to GIN indexes, but GIN indexes are partially supported in YugabyteDB as mentioned in \u003ca class=\"highlight-link\" href=\"https://github.com/yugabyte/yugabyte-db/issues/7850\"\u003ehttps://github.com/yugabyte/yugabyte-db/issues/7850\u003c/a\u003e so take a look and modify them if not supported." ], "MigrationCaveats": null, - "UnsupportedQueryConstructs": null + "UnsupportedQueryConstructs": null, + "UnsupportedPlPgSqlObjects": null } diff --git a/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild2AssessmentReport.json b/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild2AssessmentReport.json index 8d5912bf38..8e99cbde6b 100644 --- a/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild2AssessmentReport.json +++ b/migtests/tests/oracle/bulk-assessment-test/expected_reports/expectedChild2AssessmentReport.json @@ -106,28 +106,7 @@ }, "UnsupportedDataTypes": null, "UnsupportedDataTypesDesc": "Data types of the source database that are not supported on the target YugabyteDB.", - "UnsupportedFeatures": [ - { - "FeatureName": "Compound Triggers", - "Objects": [] - }, - { - "FeatureName": "Unsupported Indexes", - "Objects": null - }, - { - "FeatureName": "Virtual Columns", - "Objects": null - }, - { - "FeatureName": "Inherited Types", - "Objects": null - }, - { - "FeatureName": "Unsupported Partitioning Methods", - "Objects": null - } - ], + "UnsupportedFeatures": null, "UnsupportedFeaturesDesc": "Features of the source database that are not supported on the target YugabyteDB.", "TableIndexStats": [ { @@ -877,5 +856,6 @@ "For sharding/colocation recommendations, each partition is treated individually. During the export schema phase, all the partitions of a partitioned table are currently created as colocated by default. \nTo manually modify the schema, please refer: \u003ca class=\"highlight-link\" href=\"https://github.com/yugabyte/yb-voyager/issues/1581\"\u003ehttps://github.com/yugabyte/yb-voyager/issues/1581\u003c/a\u003e." ], "MigrationCaveats": null, - "UnsupportedQueryConstructs": null + "UnsupportedQueryConstructs": null, + "UnsupportedPlPgSqlObjects": null } diff --git a/migtests/tests/oracle/bulk-assessment-test/init-db b/migtests/tests/oracle/bulk-assessment-test/init-db index b8acab5175..0cd3340a0d 100755 --- a/migtests/tests/oracle/bulk-assessment-test/init-db +++ b/migtests/tests/oracle/bulk-assessment-test/init-db @@ -5,21 +5,20 @@ set -x source ${SCRIPTS}/functions.sh -echo "Deleting existing data in source database" -run_sqlplus_as_schema_owner ${SOURCE_DB_NAME} ${TESTS_DIR}/oracle/utils/delete_full_schema +export SOURCE_DB_SCHEMA="TEST_SCHEMA" + +create_source_db ${SOURCE_DB_SCHEMA} + +./cleanup-db echo "Initialising source database 1 & inserting data" run_sqlplus_as_schema_owner ${SOURCE_DB_NAME} ${TEST_DIR}/../assessment-report-test/oracle_assessment_report.sql echo "Initialising source database 2 & inserting data" -cat > create-schema2.sql << EOF - CREATE USER TEST_SCHEMA2 identified by "password"; - GRANT ALL PRIVILEGES TO TEST_SCHEMA2; -EOF - run_sqlplus_as_sys ${SOURCE_DB_NAME} "create-schema2.sql" +export SOURCE_DB_SCHEMA="TEST_SCHEMA2" -export SOURCE_DB_USER_SCHEMA_OWNER="TEST_SCHEMA2" +create_source_db ${SOURCE_DB_SCHEMA} run_sqlplus_as_schema_owner ${SOURCE_DB_NAME} ${TEST_DIR}/../partitions/partition_schema.sql run_sqlplus_as_schema_owner ${SOURCE_DB_NAME} ${TEST_DIR}/../partitions/partition_data.sql diff --git a/migtests/tests/oracle/case-sensitivity-reserved-words/env.sh b/migtests/tests/oracle/case-sensitivity-reserved-words/env.sh index 4f724929c1..429f19fb3a 100644 --- a/migtests/tests/oracle/case-sensitivity-reserved-words/env.sh +++ b/migtests/tests/oracle/case-sensitivity-reserved-words/env.sh @@ -1,4 +1,2 @@ export SOURCE_DB_TYPE="oracle" -export SOURCE_DB_SCHEMA=${SOURCE_DB_SCHEMA:-"TEST_SCHEMA"} -export TARGET_DB_NAME=${TARGET_DB_NAME:-"live_test"} export TARGET_DB_SCHEMA="test_schema2" diff --git a/migtests/tests/oracle/co-db/env.sh b/migtests/tests/oracle/co-db/env.sh index f0f431a669..15d6f15d90 100644 --- a/migtests/tests/oracle/co-db/env.sh +++ b/migtests/tests/oracle/co-db/env.sh @@ -1,4 +1,5 @@ export SOURCE_DB_TYPE="oracle" export SOURCE_DB_SCHEMA=${SOURCE_DB_SCHEMA:-"CO"} export TARGET_DB_NAME=${TARGET_DB_NAME:-"co_db_test"} +export SKIP_DB_CREATION="true" diff --git a/migtests/tests/oracle/constraints/constraints_schema_data.sql b/migtests/tests/oracle/constraints/constraints_schema_data.sql index 84cd2ef524..3cc1d1d9ef 100644 --- a/migtests/tests/oracle/constraints/constraints_schema_data.sql +++ b/migtests/tests/oracle/constraints/constraints_schema_data.sql @@ -41,17 +41,18 @@ CREATE TABLE check_test ( ID int GENERATED BY DEFAULT AS IDENTITY, first_name varchar(255) NOT NULL, last_name varchar(255), + middle_name varchar(255) NOT NULL, Age int, CHECK (Age>=18) ); -insert into check_test (first_name, last_name, age) values ('Modestine', 'MacMeeking', 20); -insert into check_test (first_name, last_name, age) values ('Genna', 'Kaysor', 50); -insert into check_test (first_name, last_name, age) values ('Tess', 'Wesker', 56); -insert into check_test (first_name, last_name, age) values ('Magnum', 'Danzelman', 89); -insert into check_test (first_name, last_name, age) values ('Mitzi', 'Pidwell', 34); -insert into check_test (first_name, last_name, age) values ('Milzie', 'Rohlfing', 70); - - +insert into check_test (first_name, middle_name, last_name, age) values ('Modestine', 'null', 'MacMeeking', 20); +insert into check_test (first_name, middle_name, last_name, age) values ('Genna', 'null', 'Kaysor', 50); +insert into check_test (first_name, middle_name, last_name, age) values ('Tess', 'null', 'Wesker', 56); +insert into check_test (first_name, middle_name, last_name, age) values ('Magnum', 'null', 'Danzelman', 89); +insert into check_test (first_name, middle_name, last_name, age) values ('Mitzi', 'null', 'Pidwell', 34); +insert into check_test (first_name, middle_name, last_name, age) values ('Milzie', 'null', 'Rohlfing', 70); + +ALTER TABLE check_test ADD CONSTRAINT novalid_con CHECK(middle_name<>'null') NOVALIDATE; drop table default_test; diff --git a/migtests/tests/oracle/constraints/validate b/migtests/tests/oracle/constraints/validate index ee964c8bc7..c5d234cb31 100755 --- a/migtests/tests/oracle/constraints/validate +++ b/migtests/tests/oracle/constraints/validate @@ -30,7 +30,11 @@ QUERIES_CHECK = { 'code': "23505" }, 'CHECK_CONDITION': { - 'query': "insert into public.check_test (id, first_name, last_name, age) values (7, 'Tom', 'Stewan', 15);", + 'query': "insert into public.check_test (id, first_name, middle_name, last_name, age) values (7, 'Tom', 'gfh', 'Stewan', 15);", + 'code': "23514" + }, + 'CHECK_CONDITION_NOT_VALID': { + 'query': "insert into public.check_test (id, first_name, middle_name, last_name, age) values (7, 'Tom', 'null', 'Stewan', 25);", 'code': "23514" }, 'FORIEGN_CHECK': { diff --git a/migtests/tests/oracle/partitions/env.sh b/migtests/tests/oracle/partitions/env.sh index 05489d8a99..515cf7abbe 100644 --- a/migtests/tests/oracle/partitions/env.sh +++ b/migtests/tests/oracle/partitions/env.sh @@ -1,4 +1,2 @@ export SOURCE_DB_TYPE="oracle" -export SOURCE_DB_SCHEMA=${SOURCE_DB_SCHEMA:-"TEST_SCHEMA"} -export TARGET_DB_NAME=${TARGET_DB_NAME:-"partition_test"} diff --git a/migtests/tests/oracle/sequences/env.sh b/migtests/tests/oracle/sequences/env.sh index ef3b4aa439..0357fe1d0e 100644 --- a/migtests/tests/oracle/sequences/env.sh +++ b/migtests/tests/oracle/sequences/env.sh @@ -1,3 +1 @@ export SOURCE_DB_TYPE="oracle" -export SOURCE_DB_SCHEMA=${SOURCE_DB_SCHEMA:-"TEST_SCHEMA"} -export TARGET_DB_NAME=${TARGET_DB_NAME:-"oracle_sequences_test"} diff --git a/migtests/tests/oracle/sequences/sequence_schema.sql b/migtests/tests/oracle/sequences/sequence_schema.sql index f3c34ea7b8..310edc2501 100644 --- a/migtests/tests/oracle/sequences/sequence_schema.sql +++ b/migtests/tests/oracle/sequences/sequence_schema.sql @@ -1,70 +1,70 @@ drop table identity_demo_generated_always; CREATE TABLE identity_demo_generated_always ( - id NUMBER GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + id NUMBER GENERATED ALWAYS AS IDENTITY NOCACHE PRIMARY KEY, description VARCHAR2(100) NOT NULL ); drop table identity_demo_generated_by_def; CREATE TABLE identity_demo_generated_by_def ( - id NUMBER GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, + id NUMBER GENERATED BY DEFAULT AS IDENTITY NOCACHE PRIMARY KEY, description VARCHAR2(100) not null ); DROP TABLE identity_demo_with_null; CREATE TABLE identity_demo_with_null ( - id NUMBER GENERATED BY DEFAULT ON NULL AS IDENTITY PRIMARY KEY, + id NUMBER GENERATED BY DEFAULT ON NULL AS IDENTITY NOCACHE PRIMARY KEY, description VARCHAR2(100) not null ); drop table identity_demo_generated_always_start_with; CREATE TABLE identity_demo_generated_always_start_with ( - id NUMBER GENERATED ALWAYS AS IDENTITY start with 101 PRIMARY KEY, + id NUMBER GENERATED ALWAYS AS IDENTITY NOCACHE start with 101 PRIMARY KEY, description VARCHAR2(100) NOT NULL ); drop table identity_demo_generated_by_def_start_with; CREATE TABLE identity_demo_generated_by_def_start_with ( - id NUMBER GENERATED BY DEFAULT AS IDENTITY start with 101 PRIMARY KEY, + id NUMBER GENERATED BY DEFAULT AS IDENTITY NOCACHE start with 101 PRIMARY KEY, description VARCHAR2(100) not null ); drop table identity_demo_generated_by_def_inc_by; CREATE TABLE identity_demo_generated_by_def_inc_by ( - id NUMBER GENERATED BY DEFAULT AS IDENTITY increment by 101 PRIMARY KEY, + id NUMBER GENERATED BY DEFAULT AS IDENTITY NOCACHE increment by 101 PRIMARY KEY, description VARCHAR2(100) not null ); drop table identity_demo_generated_by_def_st_with_inc_by; CREATE TABLE identity_demo_generated_by_def_st_with_inc_by ( - id NUMBER GENERATED BY DEFAULT AS IDENTITY start with 5 increment by 101 PRIMARY KEY, + id NUMBER GENERATED BY DEFAULT AS IDENTITY NOCACHE start with 5 increment by 101 PRIMARY KEY, description VARCHAR2(100) not null ); drop table empty_identity_always; CREATE TABLE empty_identity_always ( - id NUMBER GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + id NUMBER GENERATED ALWAYS AS IDENTITY NOCACHE PRIMARY KEY, description VARCHAR2(100) NOT NULL ); drop table empty_identity_def; CREATE TABLE empty_identity_def ( - id NUMBER GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, + id NUMBER GENERATED BY DEFAULT AS IDENTITY NOCACHE PRIMARY KEY, description VARCHAR2(100) not null ); - drop table "Case_Sensitive_always"; +drop table "Case_Sensitive_always"; CREATE TABLE "Case_Sensitive_always" ( - id NUMBER GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + id NUMBER GENERATED ALWAYS AS IDENTITY NOCACHE PRIMARY KEY, description VARCHAR2(100) not null ); diff --git a/migtests/tests/oracle/sequences/validate b/migtests/tests/oracle/sequences/validate index 3abd78a3b4..8e07b1e988 100755 --- a/migtests/tests/oracle/sequences/validate +++ b/migtests/tests/oracle/sequences/validate @@ -39,14 +39,14 @@ EXPECTED_TABLE_SUM = { } EXPECTED_TABLE_SUM_AFTER_INSERT = { - 'identity_demo_generated_by_def_inc_by': 2227, - 'identity_demo_generated_always': 25, - 'identity_demo_generated_by_def': 30, - 'identity_demo_generated_always_start_with': 223, - 'identity_demo_generated_by_def_st_with_inc_by': 2241, - 'identity_demo_generated_by_def_start_with': 327, - 'identity_demo_with_null': 25, - 'case_sensitive_always': 25 + 'identity_demo_generated_by_def_inc_by': 409, + 'identity_demo_generated_always': 7, + 'identity_demo_generated_by_def': 11, + 'identity_demo_generated_always_start_with': 204, + 'identity_demo_generated_by_def_st_with_inc_by': 423, + 'identity_demo_generated_by_def_start_with': 309, + 'identity_demo_with_null': 7, + 'case_sensitive_always': 7 } if os.environ.get('BETA_FAST_DATA_EXPORT') == '1': diff --git a/migtests/tests/oracle/sequences/validateAfterChanges b/migtests/tests/oracle/sequences/validateAfterChanges index cc6c2705e3..95f0832da3 100755 --- a/migtests/tests/oracle/sequences/validateAfterChanges +++ b/migtests/tests/oracle/sequences/validateAfterChanges @@ -166,14 +166,14 @@ def migration_completed_checks_yb(): def migration_completed_checks_ff(): print("Running tests on Oracle source replica") global db_schema - db_schema = os.environ.get("SOURCE_REPLICA_DB_SCHEMA") + db_schema = os.environ.get("SOURCE_REPLICA_DB_SCHEMA").upper() change_expected_values_ff_fb() oracle.run_checks(migration_completed_checks, db_type="source_replica") def migration_completed_checks_fb(): print("Running tests on Oracle source") global db_schema - db_schema = os.environ.get("SOURCE_DB_SCHEMA") + db_schema = os.environ.get("SOURCE_DB_SCHEMA").upper() change_expected_values_ff_fb() oracle.run_checks(migration_completed_checks, db_type="source") diff --git a/migtests/tests/oracle/unique-key-conflicts-test/env.sh b/migtests/tests/oracle/unique-key-conflicts-test/env.sh index 722796b6a8..998f6081f6 100644 --- a/migtests/tests/oracle/unique-key-conflicts-test/env.sh +++ b/migtests/tests/oracle/unique-key-conflicts-test/env.sh @@ -1,4 +1,2 @@ export SOURCE_DB_TYPE="oracle" -export SOURCE_DB_SCHEMA=${SOURCE_DB_SCHEMA:-"TEST_SCHEMA"} -export TARGET_DB_NAME=${TARGET_DB_NAME:-"live_test"} export TARGET_DB_SCHEMA="test_schema" diff --git a/migtests/tests/pg/adventureworks/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/adventureworks/expected_files/expectedAssessmentReport.json index 3525de0baf..54dc757994 100755 --- a/migtests/tests/pg/adventureworks/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/adventureworks/expected_files/expectedAssessmentReport.json @@ -46,7 +46,7 @@ { "ObjectType": "TABLE", "TotalCount": 68, - "InvalidCount": 5, + "InvalidCount": 68, "ObjectNames": "humanresources.department, humanresources.employee, humanresources.employeedepartmenthistory, humanresources.employeepayhistory, humanresources.jobcandidate, humanresources.shift, person.address, person.businessentityaddress, person.countryregion, person.emailaddress, person.person, person.personphone, person.phonenumbertype, person.stateprovince, person.addresstype, person.businessentity, person.businessentitycontact, person.contacttype, person.password, production.billofmaterials, production.culture, production.document, production.illustration, production.location, production.product, production.productcategory, production.productcosthistory, production.productdescription, production.productdocument, production.productinventory, production.productlistpricehistory, production.productmodel, production.productmodelillustration, production.productmodelproductdescriptionculture, production.productphoto, production.productproductphoto, production.productreview, production.productsubcategory, production.scrapreason, production.transactionhistory, production.transactionhistoryarchive, production.unitmeasure, production.workorder, production.workorderrouting, purchasing.purchaseorderdetail, purchasing.purchaseorderheader, purchasing.productvendor, purchasing.shipmethod, purchasing.vendor, sales.customer, sales.creditcard, sales.currencyrate, sales.countryregioncurrency, sales.currency, sales.personcreditcard, sales.store, sales.shoppingcartitem, sales.specialoffer, sales.salesorderdetail, sales.salesorderheader, sales.salesorderheadersalesreason, sales.specialofferproduct, sales.salesperson, sales.salespersonquotahistory, sales.salesreason, sales.salesterritory, sales.salesterritoryhistory, sales.salestaxrate" }, { @@ -58,7 +58,7 @@ { "ObjectType": "VIEW", "TotalCount": 87, - "InvalidCount": 0, + "InvalidCount": 8, "ObjectNames": "hr.d, hr.e, hr.edh, hr.eph, hr.jc, hr.s, humanresources.vemployee, humanresources.vemployeedepartment, humanresources.vemployeedepartmenthistory, humanresources.vjobcandidate, humanresources.vjobcandidateeducation, humanresources.vjobcandidateemployment, pe.a, pe.at, pe.be, pe.bea, pe.bec, pe.cr, pe.ct, pe.e, pe.p, pe.pa, pe.pnt, pe.pp, pe.sp, person.vadditionalcontactinfo, pr.bom, pr.c, pr.d, pr.i, pr.l, pr.p, pr.pc, pr.pch, pr.pd, pr.pdoc, pr.pi, pr.plph, pr.pm, pr.pmi, pr.pmpdc, pr.pp, pr.ppp, pr.pr, pr.psc, pr.sr, pr.th, pr.tha, pr.um, pr.w, pr.wr, production.vproductmodelcatalogdescription, production.vproductmodelinstructions, pu.pod, pu.poh, pu.pv, pu.sm, pu.v, purchasing.vvendorwithaddresses, purchasing.vvendorwithcontacts, sa.c, sa.cc, sa.cr, sa.crc, sa.cu, sa.pcc, sa.s, sa.sci, sa.so, sa.sod, sa.soh, sa.sohsr, sa.sop, sa.sp, sa.spqh, sa.sr, sa.st, sa.sth, sa.tr, sales.vindividualcustomer, sales.vpersondemographics, sales.vsalesperson, sales.vsalespersonsalesbyfiscalyears, sales.vsalespersonsalesbyfiscalyearsdata, sales.vstorewithaddresses, sales.vstorewithcontacts, sales.vstorewithdemographics" }, { @@ -309,54 +309,6 @@ ], "UnsupportedDataTypesDesc": "Data types of the source database that are not supported on the target YugabyteDB.", "UnsupportedFeatures": [ - { - "FeatureName": "GIST indexes", - "Objects": [] - }, - { - "FeatureName": "BRIN indexes", - "Objects": [] - }, - { - "FeatureName": "SPGIST indexes", - "Objects": [] - }, - { - "FeatureName": "Constraint triggers", - "Objects": [] - }, - { - "FeatureName": "REFERENCING clause for triggers", - "Objects": [] - }, - { - "FeatureName": "BEFORE ROW triggers on Partitioned tables", - "Objects": [] - }, - { - "FeatureName": "Inherited tables", - "Objects": [] - }, - { - "FeatureName": "Tables with stored generated columns", - "Objects": [] - }, - { - "FeatureName": "Conversion objects", - "Objects": [] - }, - { - "FeatureName": "Gin indexes on multi-columns", - "Objects": [] - }, - { - "FeatureName": "Setting attribute=value on column", - "Objects": [] - }, - { - "FeatureName": "Disabling rule on table", - "Objects": [] - }, { "FeatureName": "Clustering table on index", "Objects": [ @@ -633,35 +585,47 @@ "SqlStatement": "ALTER TABLE production.vproductanddescription CLUSTER ON ix_vproductanddescription;" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" - }, - { - "FeatureName": "Storage parameters in DDLs", - "Objects": [] - }, - { - "FeatureName": "Extensions", - "Objects": [] - }, - { - "FeatureName": "Exclusion constraints", - "Objects": [] - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null + }, { - "FeatureName": "Deferrable constraints", - "Objects": [] - }, - { - "FeatureName": "View with check option", - "Objects": [] - }, - { - "FeatureName": "Index on complex datatypes", - "Objects": [] - }, - { - "FeatureName": "Unlogged tables", - "Objects": [] + "FeatureName": "XML Functions", + "Objects": [ + { + "ObjectName": "humanresources.vjobcandidate", + "SqlStatement": "CREATE VIEW humanresources.vjobcandidate AS\n SELECT jobcandidateid,\n businessentityid,\n ((xpath('/n:Resume/n:Name/n:Name.Prefix/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(30) AS \"Name.Prefix\",\n ((xpath('/n:Resume/n:Name/n:Name.First/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(30) AS \"Name.First\",\n ((xpath('/n:Resume/n:Name/n:Name.Middle/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(30) AS \"Name.Middle\",\n ((xpath('/n:Resume/n:Name/n:Name.Last/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(30) AS \"Name.Last\",\n ((xpath('/n:Resume/n:Name/n:Name.Suffix/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(30) AS \"Name.Suffix\",\n ((xpath('/n:Resume/n:Skills/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying AS \"Skills\",\n ((xpath('n:Address/n:Addr.Type/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(30) AS \"Addr.Type\",\n ((xpath('n:Address/n:Addr.Location/n:Location/n:Loc.CountryRegion/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(100) AS \"Addr.Loc.CountryRegion\",\n ((xpath('n:Address/n:Addr.Location/n:Location/n:Loc.State/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(100) AS \"Addr.Loc.State\",\n ((xpath('n:Address/n:Addr.Location/n:Location/n:Loc.City/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(100) AS \"Addr.Loc.City\",\n ((xpath('n:Address/n:Addr.PostalCode/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(20) AS \"Addr.PostalCode\",\n ((xpath('/n:Resume/n:EMail/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying AS \"EMail\",\n ((xpath('/n:Resume/n:WebSite/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying AS \"WebSite\",\n modifieddate\n FROM humanresources.jobcandidate;" + }, + { + "ObjectName": "humanresources.vjobcandidateeducation", + "SqlStatement": "CREATE VIEW humanresources.vjobcandidateeducation AS\n SELECT jobcandidateid,\n ((xpath('/root/ns:Education/ns:Edu.Level/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(50) AS \"Edu.Level\",\n (((xpath('/root/ns:Education/ns:Edu.StartDate/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(20))::date AS \"Edu.StartDate\",\n (((xpath('/root/ns:Education/ns:Edu.EndDate/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(20))::date AS \"Edu.EndDate\",\n ((xpath('/root/ns:Education/ns:Edu.Degree/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(50) AS \"Edu.Degree\",\n ((xpath('/root/ns:Education/ns:Edu.Major/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(50) AS \"Edu.Major\",\n ((xpath('/root/ns:Education/ns:Edu.Minor/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(50) AS \"Edu.Minor\",\n ((xpath('/root/ns:Education/ns:Edu.GPA/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(5) AS \"Edu.GPA\",\n ((xpath('/root/ns:Education/ns:Edu.GPAScale/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(5) AS \"Edu.GPAScale\",\n ((xpath('/root/ns:Education/ns:Edu.School/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(100) AS \"Edu.School\",\n ((xpath('/root/ns:Education/ns:Edu.Location/ns:Location/ns:Loc.CountryRegion/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(100) AS \"Edu.Loc.CountryRegion\",\n ((xpath('/root/ns:Education/ns:Edu.Location/ns:Location/ns:Loc.State/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(100) AS \"Edu.Loc.State\",\n ((xpath('/root/ns:Education/ns:Edu.Location/ns:Location/ns:Loc.City/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(100) AS \"Edu.Loc.City\"\n FROM ( SELECT unnesting.jobcandidateid,\n ((('\u003croot xmlns:ns=\"http://adventureworks.com\"\u003e'::text || ((unnesting.education)::character varying)::text) || '\u003c/root\u003e'::text))::xml AS doc\n FROM ( SELECT jobcandidate.jobcandidateid,\n unnest(xpath('/ns:Resume/ns:Education'::text, jobcandidate.resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])) AS education\n FROM humanresources.jobcandidate) unnesting) jc;" + }, + { + "ObjectName": "humanresources.vjobcandidateemployment", + "SqlStatement": "CREATE VIEW humanresources.vjobcandidateemployment AS\n SELECT jobcandidateid,\n ((unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.StartDate/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying(20))::date AS \"Emp.StartDate\",\n ((unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.EndDate/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying(20))::date AS \"Emp.EndDate\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.OrgName/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying(100) AS \"Emp.OrgName\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.JobTitle/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying(100) AS \"Emp.JobTitle\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.Responsibility/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying AS \"Emp.Responsibility\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.FunctionCategory/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying AS \"Emp.FunctionCategory\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.IndustryCategory/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying AS \"Emp.IndustryCategory\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.Location/ns:Location/ns:Loc.CountryRegion/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying AS \"Emp.Loc.CountryRegion\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.Location/ns:Location/ns:Loc.State/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying AS \"Emp.Loc.State\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.Location/ns:Location/ns:Loc.City/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying AS \"Emp.Loc.City\"\n FROM humanresources.jobcandidate;" + }, + { + "ObjectName": "person.vadditionalcontactinfo", + "SqlStatement": "CREATE VIEW person.vadditionalcontactinfo AS\n SELECT p.businessentityid,\n p.firstname,\n p.middlename,\n p.lastname,\n (xpath('(act:telephoneNumber)[1]/act:number/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS telephonenumber,\n btrim((((xpath('(act:telephoneNumber)[1]/act:SpecialInstructions/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1])::character varying)::text) AS telephonespecialinstructions,\n (xpath('(act:homePostalAddress)[1]/act:Street/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS street,\n (xpath('(act:homePostalAddress)[1]/act:City/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS city,\n (xpath('(act:homePostalAddress)[1]/act:StateProvince/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS stateprovince,\n (xpath('(act:homePostalAddress)[1]/act:PostalCode/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS postalcode,\n (xpath('(act:homePostalAddress)[1]/act:CountryRegion/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS countryregion,\n (xpath('(act:homePostalAddress)[1]/act:SpecialInstructions/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS homeaddressspecialinstructions,\n (xpath('(act:eMail)[1]/act:eMailAddress/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS emailaddress,\n btrim((((xpath('(act:eMail)[1]/act:SpecialInstructions/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1])::character varying)::text) AS emailspecialinstructions,\n (xpath('((act:eMail)[1]/act:SpecialInstructions/act:telephoneNumber)[1]/act:number/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS emailtelephonenumber,\n p.rowguid,\n p.modifieddate\n FROM (person.person p\n LEFT JOIN ( SELECT person.businessentityid,\n unnest(xpath('/ci:AdditionalContactInfo'::text, person.additionalcontactinfo, '{{ci,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactInfo}}'::text[])) AS node\n FROM person.person\n WHERE (person.additionalcontactinfo IS NOT NULL)) additional ON ((p.businessentityid = additional.businessentityid)));" + }, + { + "ObjectName": "production.vproductmodelcatalogdescription", + "SqlStatement": "CREATE VIEW production.vproductmodelcatalogdescription AS\n SELECT productmodelid,\n name,\n ((xpath('/p1:ProductDescription/p1:Summary/html:p/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{html,http://www.w3.org/1999/xhtml}}'::text[]))[1])::character varying AS \"Summary\",\n ((xpath('/p1:ProductDescription/p1:Manufacturer/p1:Name/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying AS manufacturer,\n ((xpath('/p1:ProductDescription/p1:Manufacturer/p1:Copyright/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(30) AS copyright,\n ((xpath('/p1:ProductDescription/p1:Manufacturer/p1:ProductURL/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS producturl,\n ((xpath('/p1:ProductDescription/p1:Features/wm:Warranty/wm:WarrantyPeriod/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wm,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelWarrAndMain}}'::text[]))[1])::character varying(256) AS warrantyperiod,\n ((xpath('/p1:ProductDescription/p1:Features/wm:Warranty/wm:Description/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wm,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelWarrAndMain}}'::text[]))[1])::character varying(256) AS warrantydescription,\n ((xpath('/p1:ProductDescription/p1:Features/wm:Maintenance/wm:NoOfYears/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wm,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelWarrAndMain}}'::text[]))[1])::character varying(256) AS noofyears,\n ((xpath('/p1:ProductDescription/p1:Features/wm:Maintenance/wm:Description/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wm,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelWarrAndMain}}'::text[]))[1])::character varying(256) AS maintenancedescription,\n ((xpath('/p1:ProductDescription/p1:Features/wf:wheel/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wf,http://www.adventure-works.com/schemas/OtherFeatures}}'::text[]))[1])::character varying(256) AS wheel,\n ((xpath('/p1:ProductDescription/p1:Features/wf:saddle/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wf,http://www.adventure-works.com/schemas/OtherFeatures}}'::text[]))[1])::character varying(256) AS saddle,\n ((xpath('/p1:ProductDescription/p1:Features/wf:pedal/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wf,http://www.adventure-works.com/schemas/OtherFeatures}}'::text[]))[1])::character varying(256) AS pedal,\n ((xpath('/p1:ProductDescription/p1:Features/wf:BikeFrame/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wf,http://www.adventure-works.com/schemas/OtherFeatures}}'::text[]))[1])::character varying AS bikeframe,\n ((xpath('/p1:ProductDescription/p1:Features/wf:crankset/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wf,http://www.adventure-works.com/schemas/OtherFeatures}}'::text[]))[1])::character varying(256) AS crankset,\n ((xpath('/p1:ProductDescription/p1:Picture/p1:Angle/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS pictureangle,\n ((xpath('/p1:ProductDescription/p1:Picture/p1:Size/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS picturesize,\n ((xpath('/p1:ProductDescription/p1:Picture/p1:ProductPhotoID/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS productphotoid,\n ((xpath('/p1:ProductDescription/p1:Specifications/Material/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS material,\n ((xpath('/p1:ProductDescription/p1:Specifications/Color/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS color,\n ((xpath('/p1:ProductDescription/p1:Specifications/ProductLine/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS productline,\n ((xpath('/p1:ProductDescription/p1:Specifications/Style/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS style,\n ((xpath('/p1:ProductDescription/p1:Specifications/RiderExperience/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(1024) AS riderexperience,\n rowguid,\n modifieddate\n FROM production.productmodel\n WHERE (catalogdescription IS NOT NULL);" + }, + { + "ObjectName": "production.vproductmodelinstructions", + "SqlStatement": "CREATE VIEW production.vproductmodelinstructions AS\n SELECT productmodelid,\n name,\n ((xpath('/ns:root/text()'::text, instructions, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelManuInstructions}}'::text[]))[1])::character varying AS instructions,\n (((xpath('@LocationID'::text, mfginstructions))[1])::character varying)::integer AS \"LocationID\",\n (((xpath('@SetupHours'::text, mfginstructions))[1])::character varying)::numeric(9,4) AS \"SetupHours\",\n (((xpath('@MachineHours'::text, mfginstructions))[1])::character varying)::numeric(9,4) AS \"MachineHours\",\n (((xpath('@LaborHours'::text, mfginstructions))[1])::character varying)::numeric(9,4) AS \"LaborHours\",\n (((xpath('@LotSize'::text, mfginstructions))[1])::character varying)::integer AS \"LotSize\",\n ((xpath('/step/text()'::text, step))[1])::character varying(1024) AS \"Step\",\n rowguid,\n modifieddate\n FROM ( SELECT locations.productmodelid,\n locations.name,\n locations.rowguid,\n locations.modifieddate,\n locations.instructions,\n locations.mfginstructions,\n unnest(xpath('step'::text, locations.mfginstructions)) AS step\n FROM ( SELECT productmodel.productmodelid,\n productmodel.name,\n productmodel.rowguid,\n productmodel.modifieddate,\n productmodel.instructions,\n unnest(xpath('/ns:root/ns:Location'::text, productmodel.instructions, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelManuInstructions}}'::text[])) AS mfginstructions\n FROM production.productmodel) locations) pm;" + }, + { + "ObjectName": "sales.vpersondemographics", + "SqlStatement": "CREATE VIEW sales.vpersondemographics AS\n SELECT businessentityid,\n (((xpath('n:TotalPurchaseYTD/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying)::money AS totalpurchaseytd,\n (((xpath('n:DateFirstPurchase/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying)::date AS datefirstpurchase,\n (((xpath('n:BirthDate/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying)::date AS birthdate,\n ((xpath('n:MaritalStatus/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying(1) AS maritalstatus,\n ((xpath('n:YearlyIncome/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying(30) AS yearlyincome,\n ((xpath('n:Gender/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying(1) AS gender,\n (((xpath('n:TotalChildren/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying)::integer AS totalchildren,\n (((xpath('n:NumberChildrenAtHome/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying)::integer AS numberchildrenathome,\n ((xpath('n:Education/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying(30) AS education,\n ((xpath('n:Occupation/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying(30) AS occupation,\n (((xpath('n:HomeOwnerFlag/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying)::boolean AS homeownerflag,\n (((xpath('n:NumberCarsOwned/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying)::integer AS numbercarsowned\n FROM person.person\n WHERE (demographics IS NOT NULL);" + }, + { + "ObjectName": "sales.vstorewithdemographics", + "SqlStatement": "CREATE VIEW sales.vstorewithdemographics AS\n SELECT businessentityid,\n name,\n ((unnest(xpath('/ns:StoreSurvey/ns:AnnualSales/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying)::money AS \"AnnualSales\",\n ((unnest(xpath('/ns:StoreSurvey/ns:AnnualRevenue/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying)::money AS \"AnnualRevenue\",\n (unnest(xpath('/ns:StoreSurvey/ns:BankName/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying(50) AS \"BankName\",\n (unnest(xpath('/ns:StoreSurvey/ns:BusinessType/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying(5) AS \"BusinessType\",\n ((unnest(xpath('/ns:StoreSurvey/ns:YearOpened/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying)::integer AS \"YearOpened\",\n (unnest(xpath('/ns:StoreSurvey/ns:Specialty/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying(50) AS \"Specialty\",\n ((unnest(xpath('/ns:StoreSurvey/ns:SquareFeet/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying)::integer AS \"SquareFeet\",\n (unnest(xpath('/ns:StoreSurvey/ns:Brands/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying(30) AS \"Brands\",\n (unnest(xpath('/ns:StoreSurvey/ns:Internet/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying(30) AS \"Internet\",\n ((unnest(xpath('/ns:StoreSurvey/ns:NumberEmployees/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying)::integer AS \"NumberEmployees\"\n FROM sales.store;" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null } ], "UnsupportedFeaturesDesc": "Features of the source database that are not supported on the target YugabyteDB.", @@ -1690,22 +1654,7 @@ } ], "Notes": null, - "MigrationCaveats": [ - { - "FeatureName": "Alter partitioned tables to add Primary Key", - "Objects": [], - "FeatureDescription": "After export schema, the ALTER table should be merged with CREATE table for partitioned tables as alter of partitioned tables to add primary key is not supported." - }, - { - "FeatureName": "Foreign tables", - "Objects": [], - "FeatureDescription": "During the export schema phase, SERVER and USER MAPPING objects are not exported. These should be manually created to make the foreign tables work." - }, - { - "FeatureName": "Policies", - "Objects": [], - "FeatureDescription": "There are some policies that are created for certain users/roles. During the export schema phase, USERs and GRANTs are not exported. Therefore, they will have to be manually created before running import schema." - } - ], - "UnsupportedQueryConstructs": null + "MigrationCaveats": null, + "UnsupportedQueryConstructs": null, + "UnsupportedPlPgSqlObjects": null } diff --git a/migtests/tests/pg/adventureworks/expected_files/expected_schema_analysis_report.json b/migtests/tests/pg/adventureworks/expected_files/expected_schema_analysis_report.json index 78ea5ad73b..369f9122d9 100755 --- a/migtests/tests/pg/adventureworks/expected_files/expected_schema_analysis_report.json +++ b/migtests/tests/pg/adventureworks/expected_files/expected_schema_analysis_report.json @@ -1,6 +1,5 @@ { "VoyagerVersion": "IGNORED", - "MigrationComplexity": "HIGH", "Summary": { "Description": "Objects that will be created on the target YugabyteDB.", "DbName": "adventureworks", @@ -46,8 +45,7 @@ { "ObjectType": "TABLE", "TotalCount": 68, - "InvalidCount": 0, - "InvalidCount": 5, + "InvalidCount": 68, "ObjectNames": "humanresources.department, humanresources.employee, humanresources.employeedepartmenthistory, humanresources.employeepayhistory, humanresources.jobcandidate, humanresources.shift, person.address, person.businessentityaddress, person.countryregion, person.emailaddress, person.person, person.personphone, person.phonenumbertype, person.stateprovince, person.addresstype, person.businessentity, person.businessentitycontact, person.contacttype, person.password, production.billofmaterials, production.culture, production.document, production.illustration, production.location, production.product, production.productcategory, production.productcosthistory, production.productdescription, production.productdocument, production.productinventory, production.productlistpricehistory, production.productmodel, production.productmodelillustration, production.productmodelproductdescriptionculture, production.productphoto, production.productproductphoto, production.productreview, production.productsubcategory, production.scrapreason, production.transactionhistory, production.transactionhistoryarchive, production.unitmeasure, production.workorder, production.workorderrouting, purchasing.purchaseorderdetail, purchasing.purchaseorderheader, purchasing.productvendor, purchasing.shipmethod, purchasing.vendor, sales.customer, sales.creditcard, sales.currencyrate, sales.countryregioncurrency, sales.currency, sales.personcreditcard, sales.store, sales.shoppingcartitem, sales.specialoffer, sales.salesorderdetail, sales.salesorderheader, sales.salesorderheadersalesreason, sales.specialofferproduct, sales.salesperson, sales.salespersonquotahistory, sales.salesreason, sales.salesterritory, sales.salesterritoryhistory, sales.salestaxrate" }, { @@ -59,7 +57,7 @@ { "ObjectType": "VIEW", "TotalCount": 87, - "InvalidCount": 0, + "InvalidCount": 8, "ObjectNames": "hr.d, hr.e, hr.edh, hr.eph, hr.jc, hr.s, humanresources.vemployee, humanresources.vemployeedepartment, humanresources.vemployeedepartmenthistory, humanresources.vjobcandidate, humanresources.vjobcandidateeducation, humanresources.vjobcandidateemployment, pe.a, pe.at, pe.be, pe.bea, pe.bec, pe.cr, pe.ct, pe.e, pe.p, pe.pa, pe.pnt, pe.pp, pe.sp, person.vadditionalcontactinfo, pr.bom, pr.c, pr.d, pr.i, pr.l, pr.p, pr.pc, pr.pch, pr.pd, pr.pdoc, pr.pi, pr.plph, pr.pm, pr.pmi, pr.pmpdc, pr.pp, pr.ppp, pr.pr, pr.psc, pr.sr, pr.th, pr.tha, pr.um, pr.w, pr.wr, production.vproductmodelcatalogdescription, production.vproductmodelinstructions, pu.pod, pu.poh, pu.pv, pu.sm, pu.v, purchasing.vvendorwithaddresses, purchasing.vvendorwithcontacts, sa.c, sa.cc, sa.cr, sa.crc, sa.cu, sa.pcc, sa.s, sa.sci, sa.so, sa.sod, sa.soh, sa.sohsr, sa.sop, sa.sp, sa.spqh, sa.sr, sa.st, sa.sth, sa.tr, sales.vindividualcustomer, sales.vpersondemographics, sales.vsalesperson, sales.vsalespersonsalesbyfiscalyears, sales.vsalespersonsalesbyfiscalyearsdata, sales.vstorewithaddresses, sales.vstorewithcontacts, sales.vstorewithdemographics" }, { @@ -80,7 +78,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Data ingestion is not supported for this type in YugabyteDB so handle this type in different way. Refer link for more details.", "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_datatypes", @@ -91,7 +90,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Data ingestion is not supported for this type in YugabyteDB so handle this type in different way. Refer link for more details.", "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_datatypes", @@ -102,7 +102,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Data ingestion is not supported for this type in YugabyteDB so handle this type in different way. Refer link for more details.", "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_datatypes", @@ -113,7 +114,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Data ingestion is not supported for this type in YugabyteDB so handle this type in different way. Refer link for more details.", "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_datatypes", @@ -124,7 +126,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Data ingestion is not supported for this type in YugabyteDB so handle this type in different way. Refer link for more details.", "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_datatypes", @@ -135,7 +138,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Data ingestion is not supported for this type in YugabyteDB so handle this type in different way. Refer link for more details.", "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_datatypes", @@ -146,7 +150,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Data ingestion is not supported for this type in YugabyteDB so handle this type in different way. Refer link for more details.", "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -157,7 +162,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -168,7 +174,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -179,7 +186,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -190,7 +198,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -201,7 +210,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -212,7 +222,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -223,7 +234,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -234,7 +246,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -245,7 +258,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -256,7 +270,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -267,7 +282,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -278,7 +294,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -289,7 +306,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -300,7 +318,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -311,7 +330,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -322,7 +342,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -333,7 +354,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -344,7 +366,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -355,7 +378,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -366,7 +390,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -377,7 +402,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -388,7 +414,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -399,7 +426,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -410,7 +438,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -421,7 +450,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -432,7 +462,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -443,7 +474,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -454,7 +486,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -465,7 +498,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -476,7 +510,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -487,7 +522,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -498,7 +534,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -509,7 +546,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -520,7 +558,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -531,7 +570,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -542,7 +582,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -553,7 +594,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -564,7 +606,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -575,7 +618,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -586,7 +630,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -597,7 +642,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -608,7 +654,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -619,7 +666,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -630,7 +678,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -641,7 +690,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -652,7 +702,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -663,7 +714,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -674,7 +726,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -685,7 +738,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -696,7 +750,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -707,7 +762,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -718,7 +774,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -729,7 +786,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -740,7 +798,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -751,7 +810,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -762,7 +822,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -773,7 +834,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -784,7 +846,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -795,7 +858,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -806,7 +870,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -817,7 +882,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -828,7 +894,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -839,7 +906,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -850,7 +918,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -861,7 +930,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -872,7 +942,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -883,7 +954,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -894,7 +966,104 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "VIEW", + "ObjectName": "humanresources.vjobcandidate", + "Reason": "XML Functions", + "SqlStatement": "CREATE VIEW humanresources.vjobcandidate AS\n SELECT jobcandidateid,\n businessentityid,\n ((xpath('/n:Resume/n:Name/n:Name.Prefix/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(30) AS \"Name.Prefix\",\n ((xpath('/n:Resume/n:Name/n:Name.First/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(30) AS \"Name.First\",\n ((xpath('/n:Resume/n:Name/n:Name.Middle/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(30) AS \"Name.Middle\",\n ((xpath('/n:Resume/n:Name/n:Name.Last/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(30) AS \"Name.Last\",\n ((xpath('/n:Resume/n:Name/n:Name.Suffix/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(30) AS \"Name.Suffix\",\n ((xpath('/n:Resume/n:Skills/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying AS \"Skills\",\n ((xpath('n:Address/n:Addr.Type/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(30) AS \"Addr.Type\",\n ((xpath('n:Address/n:Addr.Location/n:Location/n:Loc.CountryRegion/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(100) AS \"Addr.Loc.CountryRegion\",\n ((xpath('n:Address/n:Addr.Location/n:Location/n:Loc.State/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(100) AS \"Addr.Loc.State\",\n ((xpath('n:Address/n:Addr.Location/n:Location/n:Loc.City/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(100) AS \"Addr.Loc.City\",\n ((xpath('n:Address/n:Addr.PostalCode/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying(20) AS \"Addr.PostalCode\",\n ((xpath('/n:Resume/n:EMail/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying AS \"EMail\",\n ((xpath('/n:Resume/n:WebSite/text()'::text, resume, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[]))[1])::character varying AS \"WebSite\",\n modifieddate\n FROM humanresources.jobcandidate;", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/views/view.sql", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "VIEW", + "ObjectName": "humanresources.vjobcandidateeducation", + "Reason": "XML Functions", + "SqlStatement": "CREATE VIEW humanresources.vjobcandidateeducation AS\n SELECT jobcandidateid,\n ((xpath('/root/ns:Education/ns:Edu.Level/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(50) AS \"Edu.Level\",\n (((xpath('/root/ns:Education/ns:Edu.StartDate/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(20))::date AS \"Edu.StartDate\",\n (((xpath('/root/ns:Education/ns:Edu.EndDate/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(20))::date AS \"Edu.EndDate\",\n ((xpath('/root/ns:Education/ns:Edu.Degree/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(50) AS \"Edu.Degree\",\n ((xpath('/root/ns:Education/ns:Edu.Major/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(50) AS \"Edu.Major\",\n ((xpath('/root/ns:Education/ns:Edu.Minor/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(50) AS \"Edu.Minor\",\n ((xpath('/root/ns:Education/ns:Edu.GPA/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(5) AS \"Edu.GPA\",\n ((xpath('/root/ns:Education/ns:Edu.GPAScale/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(5) AS \"Edu.GPAScale\",\n ((xpath('/root/ns:Education/ns:Edu.School/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(100) AS \"Edu.School\",\n ((xpath('/root/ns:Education/ns:Edu.Location/ns:Location/ns:Loc.CountryRegion/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(100) AS \"Edu.Loc.CountryRegion\",\n ((xpath('/root/ns:Education/ns:Edu.Location/ns:Location/ns:Loc.State/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(100) AS \"Edu.Loc.State\",\n ((xpath('/root/ns:Education/ns:Edu.Location/ns:Location/ns:Loc.City/text()'::text, doc, '{{ns,http://adventureworks.com}}'::text[]))[1])::character varying(100) AS \"Edu.Loc.City\"\n FROM ( SELECT unnesting.jobcandidateid,\n ((('\u003croot xmlns:ns=\"http://adventureworks.com\"\u003e'::text || ((unnesting.education)::character varying)::text) || '\u003c/root\u003e'::text))::xml AS doc\n FROM ( SELECT jobcandidate.jobcandidateid,\n unnest(xpath('/ns:Resume/ns:Education'::text, jobcandidate.resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])) AS education\n FROM humanresources.jobcandidate) unnesting) jc;", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/views/view.sql", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "VIEW", + "ObjectName": "humanresources.vjobcandidateemployment", + "Reason": "XML Functions", + "SqlStatement": "CREATE VIEW humanresources.vjobcandidateemployment AS\n SELECT jobcandidateid,\n ((unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.StartDate/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying(20))::date AS \"Emp.StartDate\",\n ((unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.EndDate/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying(20))::date AS \"Emp.EndDate\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.OrgName/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying(100) AS \"Emp.OrgName\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.JobTitle/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying(100) AS \"Emp.JobTitle\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.Responsibility/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying AS \"Emp.Responsibility\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.FunctionCategory/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying AS \"Emp.FunctionCategory\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.IndustryCategory/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying AS \"Emp.IndustryCategory\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.Location/ns:Location/ns:Loc.CountryRegion/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying AS \"Emp.Loc.CountryRegion\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.Location/ns:Location/ns:Loc.State/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying AS \"Emp.Loc.State\",\n (unnest(xpath('/ns:Resume/ns:Employment/ns:Emp.Location/ns:Location/ns:Loc.City/text()'::text, resume, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/Resume}}'::text[])))::character varying AS \"Emp.Loc.City\"\n FROM humanresources.jobcandidate;", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/views/view.sql", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "VIEW", + "ObjectName": "person.vadditionalcontactinfo", + "Reason": "XML Functions", + "SqlStatement": "CREATE VIEW person.vadditionalcontactinfo AS\n SELECT p.businessentityid,\n p.firstname,\n p.middlename,\n p.lastname,\n (xpath('(act:telephoneNumber)[1]/act:number/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS telephonenumber,\n btrim((((xpath('(act:telephoneNumber)[1]/act:SpecialInstructions/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1])::character varying)::text) AS telephonespecialinstructions,\n (xpath('(act:homePostalAddress)[1]/act:Street/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS street,\n (xpath('(act:homePostalAddress)[1]/act:City/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS city,\n (xpath('(act:homePostalAddress)[1]/act:StateProvince/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS stateprovince,\n (xpath('(act:homePostalAddress)[1]/act:PostalCode/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS postalcode,\n (xpath('(act:homePostalAddress)[1]/act:CountryRegion/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS countryregion,\n (xpath('(act:homePostalAddress)[1]/act:SpecialInstructions/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS homeaddressspecialinstructions,\n (xpath('(act:eMail)[1]/act:eMailAddress/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS emailaddress,\n btrim((((xpath('(act:eMail)[1]/act:SpecialInstructions/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1])::character varying)::text) AS emailspecialinstructions,\n (xpath('((act:eMail)[1]/act:SpecialInstructions/act:telephoneNumber)[1]/act:number/text()'::text, additional.node, '{{act,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactTypes}}'::text[]))[1] AS emailtelephonenumber,\n p.rowguid,\n p.modifieddate\n FROM (person.person p\n LEFT JOIN ( SELECT person.businessentityid,\n unnest(xpath('/ci:AdditionalContactInfo'::text, person.additionalcontactinfo, '{{ci,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ContactInfo}}'::text[])) AS node\n FROM person.person\n WHERE (person.additionalcontactinfo IS NOT NULL)) additional ON ((p.businessentityid = additional.businessentityid)));", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/views/view.sql", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "VIEW", + "ObjectName": "production.vproductmodelcatalogdescription", + "Reason": "XML Functions", + "SqlStatement": "CREATE VIEW production.vproductmodelcatalogdescription AS\n SELECT productmodelid,\n name,\n ((xpath('/p1:ProductDescription/p1:Summary/html:p/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{html,http://www.w3.org/1999/xhtml}}'::text[]))[1])::character varying AS \"Summary\",\n ((xpath('/p1:ProductDescription/p1:Manufacturer/p1:Name/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying AS manufacturer,\n ((xpath('/p1:ProductDescription/p1:Manufacturer/p1:Copyright/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(30) AS copyright,\n ((xpath('/p1:ProductDescription/p1:Manufacturer/p1:ProductURL/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS producturl,\n ((xpath('/p1:ProductDescription/p1:Features/wm:Warranty/wm:WarrantyPeriod/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wm,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelWarrAndMain}}'::text[]))[1])::character varying(256) AS warrantyperiod,\n ((xpath('/p1:ProductDescription/p1:Features/wm:Warranty/wm:Description/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wm,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelWarrAndMain}}'::text[]))[1])::character varying(256) AS warrantydescription,\n ((xpath('/p1:ProductDescription/p1:Features/wm:Maintenance/wm:NoOfYears/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wm,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelWarrAndMain}}'::text[]))[1])::character varying(256) AS noofyears,\n ((xpath('/p1:ProductDescription/p1:Features/wm:Maintenance/wm:Description/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wm,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelWarrAndMain}}'::text[]))[1])::character varying(256) AS maintenancedescription,\n ((xpath('/p1:ProductDescription/p1:Features/wf:wheel/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wf,http://www.adventure-works.com/schemas/OtherFeatures}}'::text[]))[1])::character varying(256) AS wheel,\n ((xpath('/p1:ProductDescription/p1:Features/wf:saddle/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wf,http://www.adventure-works.com/schemas/OtherFeatures}}'::text[]))[1])::character varying(256) AS saddle,\n ((xpath('/p1:ProductDescription/p1:Features/wf:pedal/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wf,http://www.adventure-works.com/schemas/OtherFeatures}}'::text[]))[1])::character varying(256) AS pedal,\n ((xpath('/p1:ProductDescription/p1:Features/wf:BikeFrame/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wf,http://www.adventure-works.com/schemas/OtherFeatures}}'::text[]))[1])::character varying AS bikeframe,\n ((xpath('/p1:ProductDescription/p1:Features/wf:crankset/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription},{wf,http://www.adventure-works.com/schemas/OtherFeatures}}'::text[]))[1])::character varying(256) AS crankset,\n ((xpath('/p1:ProductDescription/p1:Picture/p1:Angle/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS pictureangle,\n ((xpath('/p1:ProductDescription/p1:Picture/p1:Size/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS picturesize,\n ((xpath('/p1:ProductDescription/p1:Picture/p1:ProductPhotoID/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS productphotoid,\n ((xpath('/p1:ProductDescription/p1:Specifications/Material/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS material,\n ((xpath('/p1:ProductDescription/p1:Specifications/Color/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS color,\n ((xpath('/p1:ProductDescription/p1:Specifications/ProductLine/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS productline,\n ((xpath('/p1:ProductDescription/p1:Specifications/Style/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(256) AS style,\n ((xpath('/p1:ProductDescription/p1:Specifications/RiderExperience/text()'::text, catalogdescription, '{{p1,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelDescription}}'::text[]))[1])::character varying(1024) AS riderexperience,\n rowguid,\n modifieddate\n FROM production.productmodel\n WHERE (catalogdescription IS NOT NULL);", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/views/view.sql", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "VIEW", + "ObjectName": "production.vproductmodelinstructions", + "Reason": "XML Functions", + "SqlStatement": "CREATE VIEW production.vproductmodelinstructions AS\n SELECT productmodelid,\n name,\n ((xpath('/ns:root/text()'::text, instructions, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelManuInstructions}}'::text[]))[1])::character varying AS instructions,\n (((xpath('@LocationID'::text, mfginstructions))[1])::character varying)::integer AS \"LocationID\",\n (((xpath('@SetupHours'::text, mfginstructions))[1])::character varying)::numeric(9,4) AS \"SetupHours\",\n (((xpath('@MachineHours'::text, mfginstructions))[1])::character varying)::numeric(9,4) AS \"MachineHours\",\n (((xpath('@LaborHours'::text, mfginstructions))[1])::character varying)::numeric(9,4) AS \"LaborHours\",\n (((xpath('@LotSize'::text, mfginstructions))[1])::character varying)::integer AS \"LotSize\",\n ((xpath('/step/text()'::text, step))[1])::character varying(1024) AS \"Step\",\n rowguid,\n modifieddate\n FROM ( SELECT locations.productmodelid,\n locations.name,\n locations.rowguid,\n locations.modifieddate,\n locations.instructions,\n locations.mfginstructions,\n unnest(xpath('step'::text, locations.mfginstructions)) AS step\n FROM ( SELECT productmodel.productmodelid,\n productmodel.name,\n productmodel.rowguid,\n productmodel.modifieddate,\n productmodel.instructions,\n unnest(xpath('/ns:root/ns:Location'::text, productmodel.instructions, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/ProductModelManuInstructions}}'::text[])) AS mfginstructions\n FROM production.productmodel) locations) pm;", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/views/view.sql", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "VIEW", + "ObjectName": "sales.vpersondemographics", + "Reason": "XML Functions", + "SqlStatement": "CREATE VIEW sales.vpersondemographics AS\n SELECT businessentityid,\n (((xpath('n:TotalPurchaseYTD/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying)::money AS totalpurchaseytd,\n (((xpath('n:DateFirstPurchase/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying)::date AS datefirstpurchase,\n (((xpath('n:BirthDate/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying)::date AS birthdate,\n ((xpath('n:MaritalStatus/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying(1) AS maritalstatus,\n ((xpath('n:YearlyIncome/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying(30) AS yearlyincome,\n ((xpath('n:Gender/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying(1) AS gender,\n (((xpath('n:TotalChildren/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying)::integer AS totalchildren,\n (((xpath('n:NumberChildrenAtHome/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying)::integer AS numberchildrenathome,\n ((xpath('n:Education/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying(30) AS education,\n ((xpath('n:Occupation/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying(30) AS occupation,\n (((xpath('n:HomeOwnerFlag/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying)::boolean AS homeownerflag,\n (((xpath('n:NumberCarsOwned/text()'::text, demographics, '{{n,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/IndividualSurvey}}'::text[]))[1])::character varying)::integer AS numbercarsowned\n FROM person.person\n WHERE (demographics IS NOT NULL);", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/views/view.sql", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_features", + "ObjectType": "VIEW", + "ObjectName": "sales.vstorewithdemographics", + "Reason": "XML Functions", + "SqlStatement": "CREATE VIEW sales.vstorewithdemographics AS\n SELECT businessentityid,\n name,\n ((unnest(xpath('/ns:StoreSurvey/ns:AnnualSales/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying)::money AS \"AnnualSales\",\n ((unnest(xpath('/ns:StoreSurvey/ns:AnnualRevenue/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying)::money AS \"AnnualRevenue\",\n (unnest(xpath('/ns:StoreSurvey/ns:BankName/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying(50) AS \"BankName\",\n (unnest(xpath('/ns:StoreSurvey/ns:BusinessType/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying(5) AS \"BusinessType\",\n ((unnest(xpath('/ns:StoreSurvey/ns:YearOpened/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying)::integer AS \"YearOpened\",\n (unnest(xpath('/ns:StoreSurvey/ns:Specialty/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying(50) AS \"Specialty\",\n ((unnest(xpath('/ns:StoreSurvey/ns:SquareFeet/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying)::integer AS \"SquareFeet\",\n (unnest(xpath('/ns:StoreSurvey/ns:Brands/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying(30) AS \"Brands\",\n (unnest(xpath('/ns:StoreSurvey/ns:Internet/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying(30) AS \"Internet\",\n ((unnest(xpath('/ns:StoreSurvey/ns:NumberEmployees/text()'::text, demographics, '{{ns,http://schemas.microsoft.com/sqlserver/2004/07/adventure-works/StoreSurvey}}'::text[])))::character varying)::integer AS \"NumberEmployees\"\n FROM sales.store;", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/adventureworks/export-dir/schema/views/view.sql", + "Suggestion": "", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/1043", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null } ] } diff --git a/migtests/tests/pg/assessment-report-test-uqc/cleanup-db b/migtests/tests/pg/assessment-report-test-uqc/cleanup-db new file mode 100755 index 0000000000..2793da7cec --- /dev/null +++ b/migtests/tests/pg/assessment-report-test-uqc/cleanup-db @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -e +set -x + +source ${SCRIPTS}/functions.sh + + +echo "Deleting ${SOURCE_DB_NAME} database on source" +run_psql postgres "DROP DATABASE ${SOURCE_DB_NAME};" \ No newline at end of file diff --git a/migtests/tests/pg/assessment-report-test-uqc/env.sh b/migtests/tests/pg/assessment-report-test-uqc/env.sh new file mode 100644 index 0000000000..0d09ece1c9 --- /dev/null +++ b/migtests/tests/pg/assessment-report-test-uqc/env.sh @@ -0,0 +1,3 @@ +export SOURCE_DB_TYPE="postgresql" +export SOURCE_DB_NAME=${SOURCE_DB_NAME:-"pg_assessment_report_uqc"} +export SOURCE_DB_SCHEMA="sales,analytics" diff --git a/migtests/tests/pg/assessment-report-test-uqc/expectedAssessmentReport.json b/migtests/tests/pg/assessment-report-test-uqc/expectedAssessmentReport.json new file mode 100644 index 0000000000..29b451e198 --- /dev/null +++ b/migtests/tests/pg/assessment-report-test-uqc/expectedAssessmentReport.json @@ -0,0 +1,308 @@ +{ + "VoyagerVersion": "IGNORED", + "TargetDBVersion": "IGNORED", + "MigrationComplexity": "MEDIUM", + "SchemaSummary": { + "Description": "Objects that will be created on the target YugabyteDB.", + "DbName": "pg_assessment_report_uqc", + "SchemaNames": [ + "sales", + "analytics" + ], + "DbVersion": "14.13 (Ubuntu 14.13-1.pgdg20.04+1)", + "DatabaseObjects": [ + { + "ObjectType": "SCHEMA", + "TotalCount": 2, + "InvalidCount": 0, + "ObjectNames": "analytics, sales" + }, + { + "ObjectType": "EXTENSION", + "TotalCount": 1, + "InvalidCount": 0, + "ObjectNames": "pg_stat_statements" + }, + { + "ObjectType": "TABLE", + "TotalCount": 7, + "InvalidCount": 2, + "ObjectNames": "analytics.metrics, sales.orders, sales.test_json_chk, sales.events, sales.json_data, sales.customer_account, sales.recent_transactions" + }, + { + "ObjectType": "SEQUENCE", + "TotalCount": 1, + "InvalidCount":0, + "ObjectNames": "sales.recent_transactions_transaction_id_seq" + + }, + { + "ObjectType": "VIEW", + "TotalCount": 3, + "InvalidCount": 3, + "ObjectNames": "sales.employ_depart_view, sales.event_analysis_view, sales.event_analysis_view2" + }, + { + "ObjectType": "FUNCTION", + "TotalCount": 1, + "InvalidCount": 1, + "ObjectNames": "sales.get_user_info" + } + ] + }, + "Sizing": { + "SizingRecommendation": { + "ColocatedTables": [ + "sales.orders", + "analytics.metrics", + "sales.customer_account", + "sales.recent_transactions", + "sales.events", + "sales.json_data", + "sales.test_json_chk" + ], + "ColocatedReasoning": "Recommended instance type with 4 vCPU and 16 GiB memory could fit 7 objects (7 tables/materialized views and 0 explicit/implicit indexes) with 0.00 MB size and throughput requirement of 0 reads/sec and 0 writes/sec as colocated. Non leaf partition tables/indexes and unsupported tables/indexes were not considered.", + "ShardedTables": null, + "NumNodes": 3, + "VCPUsPerInstance": 4, + "MemoryPerInstance": 16, + "OptimalSelectConnectionsPerNode": 8, + "OptimalInsertConnectionsPerNode": 12, + "EstimatedTimeInMinForImport": 1, + "ParallelVoyagerJobs": 1 + }, + "FailureReasoning": "" + }, + "UnsupportedDataTypes": [ + { + "SchemaName": "sales", + "TableName": "event_analysis_view", + "ColumnName": "all_event_ranges", + "DataType": "datemultirange" + } + ], + "UnsupportedDataTypesDesc": "Data types of the source database that are not supported on the target YugabyteDB.", + "UnsupportedFeatures": [ + { + "FeatureName": "Aggregate Functions", + "Objects": [ + { + "ObjectName": "sales.event_analysis_view", + "SqlStatement": "CREATE VIEW sales.event_analysis_view AS\n SELECT range_agg(event_range) AS all_event_ranges\n FROM sales.events;" + }, + { + "ObjectName": "sales.event_analysis_view2", + "SqlStatement": "CREATE VIEW sales.event_analysis_view2 AS\n SELECT range_intersect_agg(event_range) AS overlapping_range\n FROM sales.events;" + }, + { + "ObjectName": "sales.employ_depart_view", + "SqlStatement": "CREATE VIEW sales.employ_depart_view AS\n SELECT any_value(name) AS any_employee\n FROM public.employees;" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "FeatureName": "Jsonb Subscripting", + "Objects": [ + { + "ObjectName": "sales.test_json_chk", + "SqlStatement": "CREATE TABLE sales.test_json_chk (\n id integer,\n name text,\n email text,\n active text,\n data jsonb,\n CONSTRAINT test_json_chk_data_check CHECK ((data['key'::text] \u003c\u003e '{}'::jsonb))\n);" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#jsonb-subscripting", + "MinimumVersionsFixedIn": null + }, + { + "FeatureName": "Json Type Predicate", + "Objects": [ + { + "ObjectName": "sales.json_data", + "SqlStatement": "CREATE TABLE sales.json_data (\n id integer NOT NULL,\n array_column text,\n unique_keys_column text,\n CONSTRAINT json_data_array_column_check CHECK ((array_column IS JSON ARRAY)),\n CONSTRAINT json_data_unique_keys_column_check CHECK ((unique_keys_column IS JSON WITH UNIQUE KEYS))\n);" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + } + ], + "UnsupportedFeaturesDesc": "Features of the source database that are not supported on the target YugabyteDB.", + "TableIndexStats": [ + { + "SchemaName": "sales", + "ObjectName": "customer_account", + "RowCount": 4, + "ColumnCount": 2, + "Reads": 7, + "Writes": 6, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, + { + "SchemaName": "sales", + "ObjectName": "recent_transactions", + "RowCount": 3, + "ColumnCount": 3, + "Reads": 3, + "Writes": 3, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, + { + "SchemaName": "sales", + "ObjectName": "test_json_chk", + "RowCount": 2, + "ColumnCount": 5, + "Reads": 6, + "Writes": 2, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, + { + "SchemaName": "sales", + "ObjectName": "orders", + "RowCount": 2, + "ColumnCount": 3, + "Reads": 0, + "Writes": 2, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, + { + "SchemaName": "sales", + "ObjectName": "events", + "RowCount": 3, + "ColumnCount": 2, + "Reads": 6, + "Writes": 3, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, + { + "SchemaName": "sales", + "ObjectName": "json_data", + "RowCount": 0, + "ColumnCount": 3, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "analytics", + "ObjectName": "metrics", + "RowCount": 2, + "ColumnCount": 3, + "Reads": 2, + "Writes": 2, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + } + ], + "Notes": null, + "MigrationCaveats": null, + "UnsupportedQueryConstructs": [ + { + "ConstructTypeName": "Advisory Locks", + "Query": "SELECT metric_name, pg_advisory_lock(metric_id)\nFROM analytics.metrics\nWHERE metric_value \u003e $1", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, + { + "ConstructTypeName": "Aggregate Functions", + "Query": "SELECT range_intersect_agg(event_range) AS intersection_of_ranges\nFROM sales.events", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "ConstructTypeName": "Aggregate Functions", + "Query": "SELECT range_agg(event_range) AS union_of_ranges\nFROM sales.events", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "ConstructTypeName": "Aggregate Functions", + "Query": "SELECT\n any_value(name) AS any_employee\n FROM employees", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "ConstructTypeName": "Merge Statement", + "Query": "MERGE INTO sales.customer_account ca\nUSING sales.recent_transactions t \nON t.customer_id = ca.customer_id\nWHEN MATCHED THEN\n UPDATE SET balance = balance + transaction_value\nWHEN NOT MATCHED THEN\n INSERT (customer_id, balance)\n VALUES (t.customer_id, t.transaction_value)", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#merge-command", + "MinimumVersionsFixedIn": null + }, + { + + "ConstructTypeName": "Jsonb Subscripting", + "Query": "SELECT \n data,\n data[$1] AS name, \n (data[$2]) as active\nFROM sales.test_json_chk", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#jsonb-subscripting", + "MinimumVersionsFixedIn": null + }, + { + "ConstructTypeName": "Jsonb Subscripting", + "Query": "SELECT (sales.get_user_info($1))[$2] AS user_info", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#jsonb-subscripting", + "MinimumVersionsFixedIn": null + }, + { + "ConstructTypeName": "Jsonb Subscripting", + "Query": "SELECT (jsonb_build_object($1, $2, $3, $4, $5, $6) || $7)[$8] AS json_obj", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#jsonb-subscripting", + "MinimumVersionsFixedIn": null + }, + { + "ConstructTypeName": "Jsonb Subscripting", + "Query": "SELECT ($1 :: jsonb)[$2][$3] as b", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#jsonb-subscripting", + "MinimumVersionsFixedIn": null + }, + { + "ConstructTypeName": "Json Type Predicate", + "Query": "SELECT * \nFROM sales.json_data\nWHERE array_column IS JSON ARRAY", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + } + ], + "UnsupportedPlPgSqlObjects": [ + { + "FeatureName": "Jsonb Subscripting", + "Objects": [ + { + "ObjectType": "FUNCTION", + "ObjectName": "sales.get_user_info", + "SqlStatement": "SELECT\n data,\n data['name'] AS name,\n (data['active']) as active\n FROM sales.test_json_chk;" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#jsonb-subscripting", + "MinimumVersionsFixedIn": null + } + ] +} \ No newline at end of file diff --git a/migtests/tests/pg/assessment-report-test-uqc/fix-schema b/migtests/tests/pg/assessment-report-test-uqc/fix-schema new file mode 100755 index 0000000000..e69de29bb2 diff --git a/migtests/tests/pg/assessment-report-test-uqc/init-db b/migtests/tests/pg/assessment-report-test-uqc/init-db new file mode 100755 index 0000000000..c34c7fd127 --- /dev/null +++ b/migtests/tests/pg/assessment-report-test-uqc/init-db @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -e +set -x + +source ${SCRIPTS}/functions.sh + +echo "Creating ${SOURCE_DB_NAME} database on source" +run_psql postgres "DROP DATABASE IF EXISTS ${SOURCE_DB_NAME};" +run_psql postgres "CREATE DATABASE ${SOURCE_DB_NAME};" + +echo "Initialising source database." + +run_psql "${SOURCE_DB_NAME}" "\i pg_assessment_report_uqc.sql" + +run_psql "${SOURCE_DB_NAME}" "\i unsupported_query_constructs.sql" + +echo "End of init-db script" \ No newline at end of file diff --git a/migtests/tests/pg/assessment-report-test-uqc/pg_assessment_report_uqc.sql b/migtests/tests/pg/assessment-report-test-uqc/pg_assessment_report_uqc.sql new file mode 100644 index 0000000000..bb31768d8b --- /dev/null +++ b/migtests/tests/pg/assessment-report-test-uqc/pg_assessment_report_uqc.sql @@ -0,0 +1,146 @@ +-- Create multiple schemas +CREATE SCHEMA public; +CREATE SCHEMA sales; +CREATE SCHEMA hr; +CREATE SCHEMA analytics; + +-- Create tables in each schema +CREATE TABLE public.employees ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + name TEXT, + department_id INT +); + +CREATE TABLE sales.orders ( + order_id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + customer_id INT, + amount NUMERIC +); + +CREATE TABLE hr.departments ( + department_id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + department_name TEXT, + location TEXT +); + +CREATE TABLE analytics.metrics ( + metric_id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + metric_name TEXT, + metric_value NUMERIC +); + +-- Create a view in public schema +CREATE VIEW public.employee_view AS +SELECT e.id, e.name, d.department_name +FROM public.employees e +JOIN hr.departments d ON e.department_id = d.department_id; + + +-- Insert some sample data +INSERT INTO hr.departments (department_name, location) VALUES ('Engineering', 'Building A'); +INSERT INTO hr.departments (department_name, location) VALUES ('Sales', 'Building B'); +INSERT INTO public.employees (name, department_id) VALUES ('Alice', 1), ('Bob', 1), ('Charlie', 2); +INSERT INTO sales.orders (customer_id, amount) VALUES (101, 500.00), (102, 1200.00); +INSERT INTO analytics.metrics (metric_name, metric_value) VALUES ('ConversionRate', 0.023), ('ChurnRate', 0.05); + +create view sales.employ_depart_view AS SELECT + any_value(name) AS any_employee + FROM employees; + +CREATE TABLE sales.customer_account ( + customer_id INT PRIMARY KEY, + balance NUMERIC(10, 2) NOT NULL +); + +INSERT INTO sales.customer_account (customer_id, balance) +VALUES + (1, 100.00), + (2, 200.00), + (3, 300.00); + +CREATE TABLE sales.recent_transactions ( + transaction_id SERIAL PRIMARY KEY, + customer_id INT NOT NULL, + transaction_value NUMERIC(10, 2) NOT NULL +); + +INSERT INTO sales.recent_transactions (customer_id, transaction_value) +VALUES + (1, 50.00), + (3, -25.00), + (4, 150.00); +CREATE TABLE sales.test_json_chk ( + id int, + name text, + email text, + active text, + data jsonb, + CHECK (data['key']<>'{}') +); + +INSERT INTO sales.test_json_chk (id, name, email, active, data) +VALUES (1, 'John Doe', 'john@example.com', 'Y', jsonb_build_object('key', 'value', 'name', 'John Doe', 'active', 'Y')); + +INSERT INTO sales.test_json_chk (id, name, email, active, data) +VALUES (2, 'Jane Smith', 'jane@example.com', 'N', jsonb_build_object('key', 'value', 'name', 'Jane Smith', 'active', 'N')); + +CREATE OR REPLACE FUNCTION sales.get_user_info(user_id INT) +RETURNS JSONB AS $$ +BEGIN + PERFORM + data, + data['name'] AS name, + (data['active']) as active + FROM sales.test_json_chk; + + RETURN ( + SELECT jsonb_build_object( + 'id', id, + 'name', name, + 'email', email, + 'active', active + ) + FROM sales.test_json_chk + WHERE id = user_id + ); +END; +$$ LANGUAGE plpgsql; +CREATE TABLE sales.events ( + id int PRIMARY KEY, + event_range daterange +); + +-- Insert some ranges +INSERT INTO sales.events (id, event_range) VALUES + (1,'[2024-01-01, 2024-01-10]'::daterange), + (2,'[2024-01-05, 2024-01-15]'::daterange), + (3,'[2024-01-20, 2024-01-25]'::daterange); + +CREATE VIEW sales.event_analysis_view AS +SELECT + range_agg(event_range) AS all_event_ranges +FROM + sales.events; + +CREATE VIEW sales.event_analysis_view2 AS +SELECT + range_intersect_agg(event_range) AS overlapping_range +FROM + sales.events; + +-- PG 16 and above feature +CREATE TABLE sales.json_data ( + id int PRIMARY KEY, + array_column TEXT CHECK (array_column IS JSON ARRAY), + unique_keys_column TEXT CHECK (unique_keys_column IS JSON WITH UNIQUE KEYS) +); + +INSERT INTO public.json_data ( + id, data_column, object_column, array_column, scalar_column, unique_keys_column +) VALUES ( + 1, '{"key": "value"}', + 2, '{"name": "John", "age": 30}', + 3, '[1, 2, 3, 4]', + 4, '"hello"', + 5, '{"uniqueKey1": "value1", "uniqueKey2": "value2"}' +); diff --git a/migtests/tests/pg/assessment-report-test-uqc/unsupported_query_constructs.sql b/migtests/tests/pg/assessment-report-test-uqc/unsupported_query_constructs.sql new file mode 100644 index 0000000000..9133844f14 --- /dev/null +++ b/migtests/tests/pg/assessment-report-test-uqc/unsupported_query_constructs.sql @@ -0,0 +1,61 @@ +-- Unsupported Query Constructs +DROP EXTENSION IF EXISTS pg_stat_statements; +CREATE EXTENSION pg_stat_statements; +SELECT pg_stat_statements_reset(); +SELECT * FROM pg_stat_statements; + + +-- 1) System columns usage (public schema) +SELECT name, xmin FROM public.employees WHERE id = 1; + +-- 2) Advisory locks (hr schema) +SELECT hr.departments.department_name, pg_advisory_lock(hr.departments.department_id) +FROM hr.departments +WHERE department_name = 'Engineering'; + +-- 3) XML function usage (public schema) +SELECT xmlelement(name "employee_data", name) AS emp_xml +FROM public.employees; + +-- 4) Advisory locks (analytics schema) +SELECT metric_name, pg_advisory_lock(metric_id) +FROM analytics.metrics +WHERE metric_value > 0.02; + +-- Aggregate functions UQC NOT REPORTING as it need PG16 upgarde in pipeline from PG15 +SELECT + any_value(name) AS any_employee + FROM employees; + +MERGE INTO sales.customer_account ca +USING sales.recent_transactions t +ON t.customer_id = ca.customer_id +WHEN MATCHED THEN + UPDATE SET balance = balance + transaction_value +WHEN NOT MATCHED THEN + INSERT (customer_id, balance) + VALUES (t.customer_id, t.transaction_value); + +select * from sales.customer_account ; +SELECT (sales.get_user_info(2))['name'] AS user_info; + +SELECT (jsonb_build_object('name', 'PostgreSQL', 'version', 17, 'open_source', TRUE) || '{"key": "value2"}')['name'] AS json_obj; + +SELECT + data, + data['name'] AS name, + (data['active']) as active +FROM sales.test_json_chk; + +SELECT ('{"a": { "b": {"c": "1"}}}' :: jsonb)['a']['b'] as b; +--PG15 +SELECT range_agg(event_range) AS union_of_ranges +FROM sales.events; + +SELECT range_intersect_agg(event_range) AS intersection_of_ranges +FROM sales.events; + +-- -- PG 16 and above feature +SELECT * +FROM sales.json_data +WHERE array_column IS JSON ARRAY; diff --git a/migtests/tests/pg/assessment-report-test/expectedAssessmentReport.json b/migtests/tests/pg/assessment-report-test/expectedAssessmentReport.json index 9294d7ad78..d26b9969e8 100644 --- a/migtests/tests/pg/assessment-report-test/expectedAssessmentReport.json +++ b/migtests/tests/pg/assessment-report-test/expectedAssessmentReport.json @@ -1,5 +1,6 @@ { "VoyagerVersion": "IGNORED", + "TargetDBVersion": "IGNORED", "MigrationComplexity": "HIGH", "SchemaSummary": { "Description": "Objects that will be created on the target YugabyteDB.", @@ -19,9 +20,9 @@ }, { "ObjectType": "EXTENSION", - "TotalCount": 3, + "TotalCount": 4, "InvalidCount": 0, - "ObjectNames": "citext, pgcrypto, pg_stat_statements" + "ObjectNames": "citext, pgcrypto, pg_stat_statements, lo" }, { "ObjectType": "TYPE", @@ -37,28 +38,27 @@ }, { "ObjectType": "SEQUENCE", - "TotalCount": 26, + "TotalCount": 42, "InvalidCount": 0, - "ObjectNames": "public.\"Case_Sensitive_Columns_id_seq\", public.\"Mixed_Case_Table_Name_Test_id_seq\", public.\"Recipients_id_seq\", public.\"WITH_id_seq\", public.employees2_id_seq, public.ext_test_id_seq, public.mixed_data_types_table1_id_seq, public.mixed_data_types_table2_id_seq, public.orders2_id_seq, public.parent_table_id_seq, public.with_example1_id_seq, public.with_example2_id_seq, schema2.\"Case_Sensitive_Columns_id_seq\", schema2.\"Mixed_Case_Table_Name_Test_id_seq\", schema2.\"Recipients_id_seq\", schema2.\"WITH_id_seq\", schema2.employees2_id_seq, schema2.ext_test_id_seq, schema2.mixed_data_types_table1_id_seq, schema2.mixed_data_types_table2_id_seq, schema2.orders2_id_seq, schema2.parent_table_id_seq, schema2.with_example1_id_seq, schema2.with_example2_id_seq, test_views.view_table1_id_seq, test_views.view_table2_id_seq" + "ObjectNames": "public.\"Case_Sensitive_Columns_id_seq\", public.\"Mixed_Case_Table_Name_Test_id_seq\", public.\"Recipients_id_seq\", public.\"WITH_id_seq\", public.bigint_multirange_table_id_seq, public.date_multirange_table_id_seq, public.employees2_id_seq, public.employees_employee_id_seq, public.employeesforview_id_seq, public.ext_test_id_seq, public.int_multirange_table_id_seq, public.mixed_data_types_table1_id_seq, public.mixed_data_types_table2_id_seq, public.numeric_multirange_table_id_seq, public.orders2_id_seq, public.ordersentry_order_id_seq, public.parent_table_id_seq, public.timestamp_multirange_table_id_seq, public.timestamptz_multirange_table_id_seq, public.with_example1_id_seq, public.with_example2_id_seq, schema2.\"Case_Sensitive_Columns_id_seq\", schema2.\"Mixed_Case_Table_Name_Test_id_seq\", schema2.\"Recipients_id_seq\", schema2.\"WITH_id_seq\", schema2.bigint_multirange_table_id_seq, schema2.date_multirange_table_id_seq, schema2.employees2_id_seq, schema2.employeesforview_id_seq, schema2.ext_test_id_seq, schema2.int_multirange_table_id_seq, schema2.mixed_data_types_table1_id_seq, schema2.mixed_data_types_table2_id_seq, schema2.numeric_multirange_table_id_seq, schema2.orders2_id_seq, schema2.parent_table_id_seq, schema2.timestamp_multirange_table_id_seq, schema2.timestamptz_multirange_table_id_seq, schema2.with_example1_id_seq, schema2.with_example2_id_seq, test_views.view_table1_id_seq, test_views.view_table2_id_seq" }, { "ObjectType": "TABLE", - "TotalCount": 63, - "InvalidCount": 20, - "ObjectNames": "public.\"Case_Sensitive_Columns\", public.\"Mixed_Case_Table_Name_Test\", public.\"Recipients\", public.\"WITH\", public.audit, public.sales_region, public.boston, public.c, public.parent_table, public.child_table, public.citext_type, public.combined_tbl, public.documents, public.employees2, public.ext_test, public.foo, public.inet_type, public.london, public.mixed_data_types_table1, public.mixed_data_types_table2, public.orders, public.orders2, public.products, public.session_log, public.session_log1, public.session_log2, public.sydney, public.test_exclude_basic, public.test_jsonb, public.test_xml_type, public.ts_query_table, public.tt, public.with_example1, public.with_example2, schema2.\"Case_Sensitive_Columns\", schema2.\"Mixed_Case_Table_Name_Test\", schema2.\"Recipients\", schema2.\"WITH\", schema2.audit, schema2.sales_region, schema2.boston, schema2.c, schema2.parent_table, schema2.child_table, schema2.employees2, schema2.ext_test, schema2.foo, schema2.london, schema2.mixed_data_types_table1, schema2.mixed_data_types_table2, schema2.orders, schema2.orders2, schema2.products, schema2.session_log, schema2.session_log1, schema2.session_log2, schema2.sydney, schema2.test_xml_type, schema2.tt, schema2.with_example1, schema2.with_example2, test_views.view_table1, test_views.view_table2" + "TotalCount": 93, + "InvalidCount": 42, + "ObjectNames": "public.\"Case_Sensitive_Columns\", public.\"Mixed_Case_Table_Name_Test\", public.\"Recipients\", public.\"WITH\", public.audit, public.bigint_multirange_table, public.boston, public.c, public.child_table, public.citext_type, public.combined_tbl, public.date_multirange_table, public.documents, public.employees, public.employees2, public.employeescopyfromwhere, public.employeescopyonerror, public.employeesforview, public.ext_test, public.foo, public.inet_type, public.int_multirange_table, public.library_nested, public.london, public.mixed_data_types_table1, public.mixed_data_types_table2, public.numeric_multirange_table, public.orders, public.orders2, public.orders_lateral, public.ordersentry, public.parent_table, public.products, public.sales_region, public.sales_unique_nulls_not_distinct, public.sales_unique_nulls_not_distinct_alter, public.session_log, public.session_log1, public.session_log2, public.sydney, public.test_exclude_basic, public.test_jsonb, public.test_xml_type, public.timestamp_multirange_table, public.timestamptz_multirange_table, public.ts_query_table, public.tt, public.users_unique_nulls_distinct, public.users_unique_nulls_not_distinct, public.users_unique_nulls_not_distinct_index, public.with_example1, public.with_example2, schema2.\"Case_Sensitive_Columns\", schema2.\"Mixed_Case_Table_Name_Test\", schema2.\"Recipients\", schema2.\"WITH\", schema2.audit, schema2.bigint_multirange_table, schema2.boston, schema2.c, schema2.child_table, schema2.date_multirange_table, schema2.employees2, schema2.employeesforview, schema2.ext_test, schema2.foo, schema2.int_multirange_table, schema2.london, schema2.mixed_data_types_table1, schema2.mixed_data_types_table2, schema2.numeric_multirange_table, schema2.orders, schema2.orders2, schema2.parent_table, schema2.products, schema2.sales_region, schema2.sales_unique_nulls_not_distinct, schema2.sales_unique_nulls_not_distinct_alter, schema2.session_log, schema2.session_log1, schema2.session_log2, schema2.sydney, schema2.test_xml_type, schema2.timestamp_multirange_table, schema2.timestamptz_multirange_table, schema2.tt, schema2.users_unique_nulls_distinct, schema2.users_unique_nulls_not_distinct, schema2.users_unique_nulls_not_distinct_index, schema2.with_example1, schema2.with_example2, test_views.view_table1, test_views.view_table2" }, { "ObjectType": "INDEX", - "TotalCount": 24, - "InvalidCount": 20, - "ObjectNames": "idx1 ON public.combined_tbl, idx2 ON public.combined_tbl, idx3 ON public.combined_tbl, idx4 ON public.combined_tbl, idx5 ON public.combined_tbl, idx6 ON public.combined_tbl, idx7 ON public.combined_tbl, idx_array ON public.documents, idx_box_data ON public.mixed_data_types_table1, idx_box_data_brin ON public.mixed_data_types_table1, idx_citext ON public.citext_type, idx_citext1 ON public.citext_type, idx_citext2 ON public.citext_type, idx_inet ON public.inet_type, idx_inet1 ON public.inet_type, idx_json ON public.test_jsonb, idx_json2 ON public.test_jsonb, idx_point_data ON public.mixed_data_types_table1, idx_valid ON public.test_jsonb, tsquery_idx ON public.ts_query_table, tsvector_idx ON public.documents, idx_box_data ON schema2.mixed_data_types_table1, idx_box_data_spgist ON schema2.mixed_data_types_table1, idx_point_data ON schema2.mixed_data_types_table1" + "TotalCount": 28, + "InvalidCount": 24, + "ObjectNames": "idx1 ON public.combined_tbl, idx2 ON public.combined_tbl, idx3 ON public.combined_tbl, idx4 ON public.combined_tbl, idx5 ON public.combined_tbl, idx6 ON public.combined_tbl, idx7 ON public.combined_tbl, idx8 ON public.combined_tbl, idx9 ON public.combined_tbl, idx_array ON public.documents, idx_box_data ON public.mixed_data_types_table1, idx_box_data ON schema2.mixed_data_types_table1, idx_box_data_brin ON public.mixed_data_types_table1, idx_box_data_spgist ON schema2.mixed_data_types_table1, idx_citext ON public.citext_type, idx_citext1 ON public.citext_type, idx_citext2 ON public.citext_type, idx_inet ON public.inet_type, idx_inet1 ON public.inet_type, idx_json ON public.test_jsonb, idx_json2 ON public.test_jsonb, idx_point_data ON public.mixed_data_types_table1, idx_point_data ON schema2.mixed_data_types_table1, idx_valid ON public.test_jsonb, tsquery_idx ON public.ts_query_table, tsvector_idx ON public.documents, users_unique_nulls_not_distinct_index_email ON public.users_unique_nulls_not_distinct_index, users_unique_nulls_not_distinct_index_email ON schema2.users_unique_nulls_not_distinct_index" }, { "ObjectType": "FUNCTION", - "TotalCount": 7, - "InvalidCount": 0, - "ObjectNames": "public.auditlogfunc, public.check_sales_region, public.prevent_update_shipped_without_date, public.total, schema2.auditlogfunc, schema2.prevent_update_shipped_without_date, schema2.total" - }, + "TotalCount": 11, + "InvalidCount": 4, + "ObjectNames": "public.manage_large_object, public.auditlogfunc, public.check_sales_region, public.prevent_update_shipped_without_date, public.process_combined_tbl, public.process_order, public.total, schema2.auditlogfunc, schema2.prevent_update_shipped_without_date, schema2.process_order, schema2.total" }, { "ObjectType": "AGGREGATE", "TotalCount": 2, @@ -67,21 +67,21 @@ }, { "ObjectType": "PROCEDURE", - "TotalCount": 2, - "InvalidCount": 0, - "ObjectNames": "public.tt_insert_data, schema2.tt_insert_data" + "TotalCount": 3, + "InvalidCount": 1, + "ObjectNames": "public.tt_insert_data, public.update_combined_tbl_data, schema2.tt_insert_data" }, { "ObjectType": "VIEW", - "TotalCount": 6, - "InvalidCount": 2, - "ObjectNames": "public.sales_employees, schema2.sales_employees, test_views.v1, test_views.v2, test_views.v3, test_views.v4" + "TotalCount": 10, + "InvalidCount": 6, + "ObjectNames": "public.ordersentry_view, public.sales_employees, public.top_employees_view, public.view_explicit_security_invoker, schema2.sales_employees, schema2.top_employees_view, test_views.v1, test_views.v2, test_views.v3, test_views.v4" }, { "ObjectType": "TRIGGER", - "TotalCount": 3, - "InvalidCount": 1, - "ObjectNames": "audit_trigger ON public.tt, before_sales_region_insert_update ON public.sales_region, audit_trigger ON schema2.tt" + "TotalCount": 4, + "InvalidCount": 4, + "ObjectNames": "t_raster ON public.combined_tbl, audit_trigger ON public.tt, before_sales_region_insert_update ON public.sales_region, audit_trigger ON schema2.tt" }, { "ObjectType": "MVIEW", @@ -100,6 +100,12 @@ "TotalCount": 4, "InvalidCount": 2, "ObjectNames": "policy_test_fine ON public.test_exclude_basic, policy_test_fine_2 ON public.employees2, policy_test_report ON public.test_xml_type, policy_test_report ON schema2.test_xml_type" + }, + { + "ObjectType": "COLLATION", + "TotalCount": 2, + "InvalidCount": 1, + "ObjectNames": "schema2.ignore_accents, public.\"numeric\"" } ] }, @@ -122,6 +128,7 @@ "schema2.child_table", "schema2.parent_table", "schema2.tbl_unlogged", + "public.ordersentry", "schema2.orders2", "schema2.tt", "schema2.ext_test", @@ -161,14 +168,43 @@ "schema2.products", "schema2.foo", "schema2.Case_Sensitive_Columns", + "schema2.employeesforview", "schema2.with_example1", "test_views.xyz_mview", "test_views.view_table2", "test_views.mv1", "test_views.abc_mview", - "test_views.view_table1" + "test_views.view_table1", + "public.library_nested", + "public.orders_lateral", + "public.employees", + "public.employeescopyfromwhere", + "public.employeescopyonerror", + "public.bigint_multirange_table", + "public.date_multirange_table", + "public.int_multirange_table", + "public.numeric_multirange_table", + "public.timestamp_multirange_table", + "public.timestamptz_multirange_table", + "schema2.bigint_multirange_table", + "schema2.date_multirange_table", + "schema2.int_multirange_table", + "schema2.numeric_multirange_table", + "schema2.timestamp_multirange_table", + "schema2.timestamptz_multirange_table", + "public.employeesforview", + "schema2.users_unique_nulls_distinct", + "schema2.users_unique_nulls_not_distinct", + "schema2.sales_unique_nulls_not_distinct", + "public.users_unique_nulls_not_distinct_index", + "schema2.users_unique_nulls_not_distinct_index", + "schema2.sales_unique_nulls_not_distinct_alter", + "public.users_unique_nulls_distinct", + "public.users_unique_nulls_not_distinct", + "public.sales_unique_nulls_not_distinct", + "public.sales_unique_nulls_not_distinct_alter" ], - "ColocatedReasoning": "Recommended instance type with 4 vCPU and 16 GiB memory could fit 69 objects (61 tables/materialized views and 8 explicit/implicit indexes) with 0.00 MB size and throughput requirement of 0 reads/sec and 0 writes/sec as colocated. Rest 24 objects (5 tables/materialized views and 19 explicit/implicit indexes) with 0.00 MB size and throughput requirement of 0 reads/sec and 0 writes/sec need to be migrated as range partitioned tables. Non leaf partition tables/indexes and unsupported tables/indexes were not considered.", + "ColocatedReasoning": "Recommended instance type with 4 vCPU and 16 GiB memory could fit 109 objects (91 tables/materialized views and 18 explicit/implicit indexes) with 0.00 MB size and throughput requirement of 0 reads/sec and 0 writes/sec as colocated. Rest 28 objects (5 tables/materialized views and 23 explicit/implicit indexes) with 0.00 MB size and throughput requirement of 0 reads/sec and 0 writes/sec need to be migrated as range partitioned tables. Non leaf partition tables/indexes and unsupported tables/indexes were not considered.", "ShardedTables": [ "public.combined_tbl", "public.citext_type", @@ -193,6 +229,12 @@ "ColumnName": "lsn", "DataType": "pg_lsn" }, + { + "SchemaName": "public", + "TableName": "combined_tbl", + "ColumnName": "raster", + "DataType": "lo" + }, { "SchemaName": "public", "TableName": "mixed_data_types_table1", @@ -228,6 +270,108 @@ "TableName": "test_xml_type", "ColumnName": "data", "DataType": "xml" + }, + { + "SchemaName": "public", + "TableName": "ordersentry_view", + "ColumnName": "order_xml", + "DataType": "xml" + }, + { + "SchemaName": "public", + "TableName": "ordersentry_view", + "ColumnName": "summary_xml", + "DataType": "xml" + }, + { + "SchemaName": "public", + "TableName": "ordersentry_view", + "ColumnName": "transaction_id", + "DataType": "xid" + }, + { + "SchemaName": "public", + "TableName": "library_nested", + "ColumnName": "lib_data", + "DataType": "xml" + }, + { + "SchemaName": "public", + "TableName": "orders_lateral", + "ColumnName": "order_details", + "DataType": "xml" + }, + { + "SchemaName": "public", + "TableName": "date_multirange_table", + "ColumnName": "project_dates", + "DataType": "datemultirange" + }, + { + "SchemaName": "public", + "TableName": "numeric_multirange_table", + "ColumnName": "price_ranges", + "DataType": "nummultirange" + }, + { + "SchemaName": "public", + "TableName": "bigint_multirange_table", + "ColumnName": "value_ranges", + "DataType": "int8multirange" + }, + { + "SchemaName": "public", + "TableName": "int_multirange_table", + "ColumnName": "value_ranges", + "DataType": "int4multirange" + }, + { + "SchemaName": "public", + "TableName": "timestamp_multirange_table", + "ColumnName": "event_times", + "DataType": "tsmultirange" + }, + { + "SchemaName": "public", + "TableName": "timestamptz_multirange_table", + "ColumnName": "global_event_times", + "DataType": "tstzmultirange" + }, + { + "SchemaName": "schema2", + "TableName": "date_multirange_table", + "ColumnName": "project_dates", + "DataType": "datemultirange" + }, + { + "SchemaName": "schema2", + "TableName": "bigint_multirange_table", + "ColumnName": "value_ranges", + "DataType": "int8multirange" + }, + { + "SchemaName": "schema2", + "TableName": "int_multirange_table", + "ColumnName": "value_ranges", + "DataType": "int4multirange" + }, + { + "SchemaName": "schema2", + "TableName": "numeric_multirange_table", + "ColumnName": "price_ranges", + "DataType": "nummultirange" + }, + { + "SchemaName": "schema2", + "TableName": "timestamp_multirange_table", + "ColumnName": "event_times", + "DataType": "tsmultirange" + }, + { + "SchemaName": "schema2", + "TableName": "timestamptz_multirange_table", + "ColumnName": "global_event_times", + "DataType": "tstzmultirange" } ], "UnsupportedDataTypesDesc": "Data types of the source database that are not supported on the target YugabyteDB.", @@ -252,8 +396,9 @@ "SqlStatement": "CREATE INDEX idx_point_data ON schema2.mixed_data_types_table1 USING gist (point_data);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "BRIN indexes", "Objects": [ @@ -262,8 +407,9 @@ "SqlStatement": "CREATE INDEX idx_box_data_brin ON public.mixed_data_types_table1 USING brin (box_data);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "SPGIST indexes", "Objects": [ @@ -272,8 +418,9 @@ "SqlStatement": "CREATE INDEX idx_box_data_spgist ON schema2.mixed_data_types_table1 USING spgist (box_data);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Constraint triggers", "Objects": [ @@ -286,8 +433,9 @@ "SqlStatement": "CREATE CONSTRAINT TRIGGER enforce_shipped_date_constraint AFTER UPDATE ON schema2.orders2 NOT DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW WHEN ((((new.status)::text = 'shipped'::text) AND (new.shipped_date IS NULL))) EXECUTE FUNCTION schema2.prevent_update_shipped_without_date();" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#constraint-trigger-is-not-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#constraint-trigger-is-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Inherited tables", "Objects": [ @@ -300,8 +448,9 @@ "SqlStatement": "CREATE TABLE schema2.child_table (\n specific_column1 date\n)\nINHERITS (schema2.parent_table);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Tables with stored generated columns", "Objects": [ @@ -314,28 +463,9 @@ "SqlStatement": "CREATE TABLE schema2.employees2 (\n id integer NOT NULL,\n first_name character varying(50) NOT NULL,\n last_name character varying(50) NOT NULL,\n full_name character varying(101) GENERATED ALWAYS AS ((((first_name)::text || ' '::text) || (last_name)::text)) STORED,\n department character varying(50)\n);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#generated-always-as-stored-type-column-is-not-supported" - }, - { - "FeatureName": "Conversion objects", - "Objects": [] - }, - { - "FeatureName": "Gin indexes on multi-columns", - "Objects": [] - }, - { - "FeatureName": "Setting attribute=value on column", - "Objects": [] - }, - { - "FeatureName": "Disabling rule on table", - "Objects": [] - }, - { - "FeatureName": "REFERENCING clause for triggers", - "Objects": [] - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#generated-always-as-stored-type-column-is-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "BEFORE ROW triggers on Partitioned tables", "Objects": [ @@ -344,19 +474,8 @@ "SqlStatement": "CREATE TRIGGER before_sales_region_insert_update BEFORE INSERT OR UPDATE ON public.sales_region FOR EACH ROW EXECUTE FUNCTION public.check_sales_region();" } ], - "DocsLink":"https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#before-row-triggers-on-partitioned-tables" - }, - { - "FeatureName": "Clustering table on index", - "Objects": [] - }, - { - "FeatureName": "Storage parameters in DDLs", - "Objects": [] - }, - { - "FeatureName": "Extensions", - "Objects": [] + "DocsLink":"https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#before-row-triggers-on-partitioned-tables", + "MinimumVersionsFixedIn": null }, { "FeatureName": "Exclusion constraints", @@ -366,8 +485,9 @@ "SqlStatement": "ALTER TABLE ONLY public.test_exclude_basic\n ADD CONSTRAINT no_same_name_address EXCLUDE USING btree (name WITH =, address WITH =);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#exclusion-constraints-is-not-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#exclusion-constraints-is-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Deferrable constraints", "Objects": [ @@ -380,22 +500,24 @@ "SqlStatement": "ALTER TABLE ONLY schema2.orders2\n ADD CONSTRAINT orders2_order_number_key UNIQUE (order_number) DEFERRABLE;" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#deferrable-constraint-on-constraints-other-than-foreign-keys-is-not-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#deferrable-constraint-on-constraints-other-than-foreign-keys-is-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "View with check option", "Objects": [ { "ObjectName": "public.sales_employees", - "SqlStatement": "CREATE VIEW public.sales_employees AS\n SELECT employees2.id,\n employees2.first_name,\n employees2.last_name,\n employees2.full_name\n FROM public.employees2\n WHERE ((employees2.department)::text = 'sales'::text)\n WITH CASCADED CHECK OPTION;" + "SqlStatement": "CREATE VIEW public.sales_employees AS\n SELECT id,\n first_name,\n last_name,\n full_name\n FROM public.employees2\n WHERE ((department)::text = 'sales'::text)\n WITH CASCADED CHECK OPTION;" }, { "ObjectName": "schema2.sales_employees", - "SqlStatement": "CREATE VIEW schema2.sales_employees AS\n SELECT employees2.id,\n employees2.first_name,\n employees2.last_name,\n employees2.full_name\n FROM schema2.employees2\n WHERE ((employees2.department)::text = 'sales'::text)\n WITH CASCADED CHECK OPTION;" + "SqlStatement": "CREATE VIEW schema2.sales_employees AS\n SELECT id,\n first_name,\n last_name,\n full_name\n FROM schema2.employees2\n WHERE ((department)::text = 'sales'::text)\n WITH CASCADED CHECK OPTION;" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#view-with-check-option-is-not-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#view-with-check-option-is-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Index on complex datatypes", "Objects": [ @@ -454,123 +576,503 @@ { "ObjectName": "USER_DEFINED_TYPE: idx7 ON public.combined_tbl", "SqlStatement": "CREATE INDEX idx7 ON public.combined_tbl USING btree (address);" + }, + { + "ObjectName": "DATERANGE: idx8 ON public.combined_tbl", + "SqlStatement": "CREATE INDEX idx8 ON public.combined_tbl USING btree (d);" + }, + { + "ObjectName": "INTERVAL: idx9 ON public.combined_tbl", + "SqlStatement": "CREATE INDEX idx9 ON public.combined_tbl USING btree (inds3);" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "FeatureName": "Deterministic attribute in collation", + "Objects": [ + { + "ObjectName": "schema2.ignore_accents", + "SqlStatement": "CREATE COLLATION schema2.ignore_accents (provider = icu, deterministic = false, locale = 'und-u-kc-ks-level1');" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "FeatureName": "Primary / Unique key constraints on complex datatypes", + "Objects": [ + { + "ObjectName": "public.combined_tbl, constraint: (combined_tbl_bittv_key)", + "SqlStatement": "ALTER TABLE ONLY public.combined_tbl\n ADD CONSTRAINT combined_tbl_bittv_key UNIQUE (bittv);" + }, + { + "ObjectName": "public.combined_tbl, constraint: (uk)", + "SqlStatement": "ALTER TABLE ONLY public.combined_tbl\n ADD CONSTRAINT uk UNIQUE (lsn);" + }, + { + "ObjectName": "public.combined_tbl, constraint: (combined_tbl_pkey)", + "SqlStatement": "ALTER TABLE ONLY public.combined_tbl\n ADD CONSTRAINT combined_tbl_pkey PRIMARY KEY (id, arr_enum);" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "FeatureName": "System Columns", + "Objects": [ + { + "ObjectName": "public.ordersentry_view", + "SqlStatement": "CREATE VIEW public.ordersentry_view AS\n SELECT order_id,\n customer_name,\n product_name,\n quantity,\n price,\n XMLELEMENT(NAME \"OrderDetails\", XMLELEMENT(NAME \"Customer\", customer_name), XMLELEMENT(NAME \"Product\", product_name), XMLELEMENT(NAME \"Quantity\", quantity), XMLELEMENT(NAME \"TotalPrice\", (price * (quantity)::numeric))) AS order_xml,\n XMLCONCAT(XMLELEMENT(NAME \"Customer\", customer_name), XMLELEMENT(NAME \"Product\", product_name)) AS summary_xml,\n pg_try_advisory_lock((hashtext((customer_name || product_name)))::bigint) AS lock_acquired,\n ctid AS row_ctid,\n xmin AS transaction_id\n FROM public.ordersentry;" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#system-columns-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "FeatureName": "XML Functions", + "Objects": [ + { + "ObjectName": "public.ordersentry_view", + "SqlStatement": "CREATE VIEW public.ordersentry_view AS\n SELECT order_id,\n customer_name,\n product_name,\n quantity,\n price,\n XMLELEMENT(NAME \"OrderDetails\", XMLELEMENT(NAME \"Customer\", customer_name), XMLELEMENT(NAME \"Product\", product_name), XMLELEMENT(NAME \"Quantity\", quantity), XMLELEMENT(NAME \"TotalPrice\", (price * (quantity)::numeric))) AS order_xml,\n XMLCONCAT(XMLELEMENT(NAME \"Customer\", customer_name), XMLELEMENT(NAME \"Product\", product_name)) AS summary_xml,\n pg_try_advisory_lock((hashtext((customer_name || product_name)))::bigint) AS lock_acquired,\n ctid AS row_ctid,\n xmin AS transaction_id\n FROM public.ordersentry;" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "FeatureName": "Advisory Locks", + "Objects": [ + { + "ObjectName": "public.ordersentry_view", + "SqlStatement": "CREATE VIEW public.ordersentry_view AS\n SELECT order_id,\n customer_name,\n product_name,\n quantity,\n price,\n XMLELEMENT(NAME \"OrderDetails\", XMLELEMENT(NAME \"Customer\", customer_name), XMLELEMENT(NAME \"Product\", product_name), XMLELEMENT(NAME \"Quantity\", quantity), XMLELEMENT(NAME \"TotalPrice\", (price * (quantity)::numeric))) AS order_xml,\n XMLCONCAT(XMLELEMENT(NAME \"Customer\", customer_name), XMLELEMENT(NAME \"Product\", product_name)) AS summary_xml,\n pg_try_advisory_lock((hashtext((customer_name || product_name)))::bigint) AS lock_acquired,\n ctid AS row_ctid,\n xmin AS transaction_id\n FROM public.ordersentry;" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, + { + "FeatureName": "Large Object Functions", + "Objects": [ + { + "ObjectName": "t_raster ON public.combined_tbl", + "SqlStatement": "CREATE TRIGGER t_raster BEFORE DELETE OR UPDATE ON public.combined_tbl FOR EACH ROW EXECUTE FUNCTION public.lo_manage('raster');" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#large-objects-and-its-functions-are-currently-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "FeatureName": "Foreign key constraint references partitioned table", + "Objects": [ + { + "ObjectName": "public.test_jsonb, constraint: (test_jsonb_id_region_fkey)", + "SqlStatement": "ALTER TABLE ONLY public.test_jsonb\n ADD CONSTRAINT test_jsonb_id_region_fkey FOREIGN KEY (id, region) REFERENCES public.sales_region(id, region);" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "FeatureName": "Regex Functions", + "Objects": [ + { + "ObjectName": "public.ordersentry", + "SqlStatement": "CREATE TABLE public.ordersentry (\n order_id integer NOT NULL,\n customer_name text NOT NULL,\n product_name text NOT NULL,\n quantity integer NOT NULL,\n price numeric(10,2) NOT NULL,\n processed_at timestamp without time zone,\n r integer DEFAULT regexp_count('This is an example. Another example. Example is a common word.'::text, 'example'::text)\n);" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "FeatureName": "FETCH .. WITH TIES Clause", + "Objects": [ + { + "ObjectName": "public.top_employees_view", + "SqlStatement": "CREATE VIEW public.top_employees_view AS\n SELECT id,\n first_name,\n last_name,\n salary\n FROM ( SELECT employeesforview.id,\n employeesforview.first_name,\n employeesforview.last_name,\n employeesforview.salary\n FROM public.employeesforview\n ORDER BY employeesforview.salary DESC\n FETCH FIRST 2 ROWS WITH TIES) top_employees;" + }, + { + "ObjectName": "schema2.top_employees_view", + "SqlStatement": "CREATE VIEW schema2.top_employees_view AS\n SELECT id,\n first_name,\n last_name,\n salary\n FROM ( SELECT employeesforview.id,\n employeesforview.first_name,\n employeesforview.last_name,\n employeesforview.salary\n FROM schema2.employeesforview\n ORDER BY employeesforview.salary DESC\n FETCH FIRST 2 ROWS WITH TIES) top_employees;" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null }, { - "FeatureName": "Unlogged tables", + "FeatureName": "Security Invoker Views", "Objects": [ { - "ObjectName": "public.tbl_unlogged", - "SqlStatement": "CREATE UNLOGGED TABLE public.tbl_unlogged (\n id integer,\n val text\n);" + "ObjectName": "public.view_explicit_security_invoker", + "SqlStatement": "CREATE VIEW public.view_explicit_security_invoker WITH (security_invoker='true') AS\n SELECT employee_id,\n first_name\n FROM public.employees;" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "FeatureName": "Unique Nulls Not Distinct", + "Objects": [ + { + "ObjectName": "users_unique_nulls_not_distinct_index_email ON public.users_unique_nulls_not_distinct_index", + "SqlStatement": "CREATE UNIQUE INDEX users_unique_nulls_not_distinct_index_email ON public.users_unique_nulls_not_distinct_index USING btree (email) NULLS NOT DISTINCT;" + }, + { + "ObjectName": "users_unique_nulls_not_distinct_index_email ON schema2.users_unique_nulls_not_distinct_index", + "SqlStatement": "CREATE UNIQUE INDEX users_unique_nulls_not_distinct_index_email ON schema2.users_unique_nulls_not_distinct_index USING btree (email) NULLS NOT DISTINCT;" + }, + { + "ObjectName": "public.sales_unique_nulls_not_distinct", + "SqlStatement": "ALTER TABLE ONLY public.sales_unique_nulls_not_distinct\n ADD CONSTRAINT sales_unique_nulls_not_distin_store_id_product_id_sale_date_key UNIQUE NULLS NOT DISTINCT (store_id, product_id, sale_date);" + }, + { + "ObjectName": "public.sales_unique_nulls_not_distinct_alter", + "SqlStatement": "ALTER TABLE ONLY public.sales_unique_nulls_not_distinct_alter\n ADD CONSTRAINT sales_unique_nulls_not_distinct_alter_unique UNIQUE NULLS NOT DISTINCT (store_id, product_id, sale_date);" + }, + { + "ObjectName": "public.users_unique_nulls_not_distinct", + "SqlStatement": "ALTER TABLE ONLY public.users_unique_nulls_not_distinct\n ADD CONSTRAINT users_unique_nulls_not_distinct_email_key UNIQUE NULLS NOT DISTINCT (email);" + }, + { + "ObjectName": "schema2.sales_unique_nulls_not_distinct", + "SqlStatement": "ALTER TABLE ONLY schema2.sales_unique_nulls_not_distinct\n ADD CONSTRAINT sales_unique_nulls_not_distin_store_id_product_id_sale_date_key UNIQUE NULLS NOT DISTINCT (store_id, product_id, sale_date);" }, { - "ObjectName": "schema2.tbl_unlogged", - "SqlStatement": "CREATE UNLOGGED TABLE schema2.tbl_unlogged (\n id integer,\n val text\n);" + "ObjectName": "schema2.sales_unique_nulls_not_distinct_alter", + "SqlStatement": "ALTER TABLE ONLY schema2.sales_unique_nulls_not_distinct_alter\n ADD CONSTRAINT sales_unique_nulls_not_distinct_alter_unique UNIQUE NULLS NOT DISTINCT (store_id, product_id, sale_date);" + }, + { + "ObjectName": "schema2.users_unique_nulls_not_distinct", + "SqlStatement": "ALTER TABLE ONLY schema2.users_unique_nulls_not_distinct\n ADD CONSTRAINT users_unique_nulls_not_distinct_email_key UNIQUE NULLS NOT DISTINCT (email);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unlogged-table-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null } ], "UnsupportedFeaturesDesc": "Features of the source database that are not supported on the target YugabyteDB.", "TableIndexStats": [ { - "SchemaName": "public", - "ObjectName": "Recipients", - "RowCount": 1, - "ColumnCount": 4, + "SchemaName": "schema2", + "ObjectName": "sales_unique_nulls_not_distinct_alter_unique", + "RowCount": null, + "ColumnCount": 3, "Reads": 0, - "Writes": 1, + "Writes": 0, "ReadsPerSecond": 0, "WritesPerSecond": 0, - "IsIndex": false, + "IsIndex": true, "ObjectType": "", - "ParentTableName": null, + "ParentTableName": "schema2.sales_unique_nulls_not_distinct_alter", "SizeInBytes": 8192 }, { - "SchemaName": "public", - "ObjectName": "orders", - "RowCount": 3, + "SchemaName": "schema2", + "ObjectName": "sales_unique_nulls_not_distin_store_id_product_id_sale_date_key", + "RowCount": null, "ColumnCount": 3, "Reads": 0, - "Writes": 3, + "Writes": 0, "ReadsPerSecond": 0, "WritesPerSecond": 0, - "IsIndex": false, + "IsIndex": true, "ObjectType": "", - "ParentTableName": null, + "ParentTableName": "schema2.sales_unique_nulls_not_distinct", "SizeInBytes": 8192 }, { - "SchemaName": "public", - "ObjectName": "c", - "RowCount": 12, - "ColumnCount": 3, + "SchemaName": "schema2", + "ObjectName": "users_unique_nulls_not_distinct_email_key", + "RowCount": null, + "ColumnCount": 1, "Reads": 0, - "Writes": 12, + "Writes": 0, "ReadsPerSecond": 0, "WritesPerSecond": 0, - "IsIndex": false, + "IsIndex": true, "ObjectType": "", - "ParentTableName": null, + "ParentTableName": "schema2.users_unique_nulls_not_distinct", "SizeInBytes": 8192 }, { - "SchemaName": "public", - "ObjectName": "Case_Sensitive_Columns", - "RowCount": 5, - "ColumnCount": 6, - "Reads": 5, - "Writes": 5, + "SchemaName": "schema2", + "ObjectName": "users_unique_nulls_distinct_email_key", + "RowCount": null, + "ColumnCount": 1, + "Reads": 0, + "Writes": 0, "ReadsPerSecond": 0, "WritesPerSecond": 0, - "IsIndex": false, + "IsIndex": true, "ObjectType": "", - "ParentTableName": null, + "ParentTableName": "schema2.users_unique_nulls_distinct", "SizeInBytes": 8192 }, { "SchemaName": "public", - "ObjectName": "inet_type", - "RowCount": 0, - "ColumnCount": 2, + "ObjectName": "sales_unique_nulls_not_distinct_alter_unique", + "RowCount": null, + "ColumnCount": 3, "Reads": 0, "Writes": 0, "ReadsPerSecond": 0, "WritesPerSecond": 0, - "IsIndex": false, + "IsIndex": true, "ObjectType": "", - "ParentTableName": null, - "SizeInBytes": 0 + "ParentTableName": "public.sales_unique_nulls_not_distinct_alter", + "SizeInBytes": 8192 }, { "SchemaName": "public", - "ObjectName": "WITH", - "RowCount": 3, - "ColumnCount": 2, + "ObjectName": "sales_unique_nulls_not_distin_store_id_product_id_sale_date_key", + "RowCount": null, + "ColumnCount": 3, "Reads": 0, - "Writes": 3, + "Writes": 0, "ReadsPerSecond": 0, "WritesPerSecond": 0, - "IsIndex": false, + "IsIndex": true, "ObjectType": "", - "ParentTableName": null, + "ParentTableName": "public.sales_unique_nulls_not_distinct", "SizeInBytes": 8192 }, { "SchemaName": "public", - "ObjectName": "tt", - "RowCount": 8, + "ObjectName": "users_unique_nulls_not_distinct_email_key", + "RowCount": null, "ColumnCount": 1, - "Reads": 4, - "Writes": 8, + "Reads": 0, + "Writes": 0, "ReadsPerSecond": 0, "WritesPerSecond": 0, - "IsIndex": false, + "IsIndex": true, "ObjectType": "", - "ParentTableName": null, + "ParentTableName": "public.users_unique_nulls_not_distinct", + "SizeInBytes": 8192 + }, + { + "SchemaName": "public", + "ObjectName": "users_unique_nulls_distinct_email_key", + "RowCount": null, + "ColumnCount": 1, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": true, + "ObjectType": "", + "ParentTableName": "public.users_unique_nulls_distinct", + "SizeInBytes": 8192 + }, + { + "SchemaName": "schema2", + "ObjectName": "users_unique_nulls_not_distinct", + "RowCount": 0, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "schema2", + "ObjectName": "users_unique_nulls_distinct", + "RowCount": 0, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "schema2", + "ObjectName": "sales_unique_nulls_not_distinct", + "RowCount": 0, + "ColumnCount": 3, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "schema2", + "ObjectName": "sales_unique_nulls_not_distinct_alter", + "RowCount": 0, + "ColumnCount": 3, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "public", + "ObjectName": "users_unique_nulls_not_distinct", + "RowCount": 0, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "public", + "ObjectName": "users_unique_nulls_distinct", + "RowCount": 0, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "public", + "ObjectName": "sales_unique_nulls_not_distinct_alter", + "RowCount": 0, + "ColumnCount": 3, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "public", + "ObjectName": "sales_unique_nulls_not_distinct", + "RowCount": 0, + "ColumnCount": 3, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "public", + "ObjectName": "Recipients", + "RowCount": 1, + "ColumnCount": 4, + "Reads": 0, + "Writes": 1, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, + { + "SchemaName": "public", + "ObjectName": "orders", + "RowCount": 3, + "ColumnCount": 3, + "Reads": 0, + "Writes": 3, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, + { + "SchemaName": "public", + "ObjectName": "c", + "RowCount": 12, + "ColumnCount": 3, + "Reads": 0, + "Writes": 12, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, + { + "SchemaName": "public", + "ObjectName": "Case_Sensitive_Columns", + "RowCount": 5, + "ColumnCount": 6, + "Reads": 5, + "Writes": 5, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, + { + "SchemaName": "public", + "ObjectName": "inet_type", + "RowCount": 0, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "public", + "ObjectName": "WITH", + "RowCount": 3, + "ColumnCount": 2, + "Reads": 0, + "Writes": 3, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, + { + "SchemaName": "public", + "ObjectName": "tt", + "RowCount": 8, + "ColumnCount": 1, + "Reads": 4, + "Writes": 8, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, "SizeInBytes": 8192 }, { @@ -605,7 +1107,7 @@ "SchemaName": "public", "ObjectName": "combined_tbl", "RowCount": 0, - "ColumnCount": 9, + "ColumnCount": 12, "Reads": 0, "Writes": 0, "ReadsPerSecond": 0, @@ -629,6 +1131,34 @@ "ParentTableName": null, "SizeInBytes": 0 }, + { + "SchemaName": "public", + "ObjectName": "orders_lateral", + "RowCount": 0, + "ColumnCount": 3, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "public", + "ObjectName": "library_nested", + "RowCount": 1, + "ColumnCount": 2, + "Reads": 1, + "Writes": 1, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, { "SchemaName": "public", "ObjectName": "orders2", @@ -727,6 +1257,20 @@ "ParentTableName": null, "SizeInBytes": 8192 }, + { + "SchemaName": "public", + "ObjectName": "employees", + "RowCount": 10, + "ColumnCount": 4, + "Reads": 0, + "Writes": 10, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, { "SchemaName": "public", "ObjectName": "employees2", @@ -773,7 +1317,7 @@ "SchemaName": "public", "ObjectName": "test_jsonb", "RowCount": 0, - "ColumnCount": 3, + "ColumnCount": 4, "Reads": 0, "Writes": 0, "ReadsPerSecond": 0, @@ -1008,9 +1552,23 @@ "SizeInBytes": 0 }, { - "SchemaName": "schema2", - "ObjectName": "tbl_unlogged", - "RowCount": 0, + "SchemaName": "public", + "ObjectName": "ordersentry", + "RowCount": 6, + "ColumnCount": 7, + "Reads": 0, + "Writes": 6, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, + { + "SchemaName": "schema2", + "ObjectName": "tbl_unlogged", + "RowCount": 0, "ColumnCount": 2, "Reads": 0, "Writes": 0, @@ -1623,6 +2181,34 @@ "ParentTableName": "public.citext_type", "SizeInBytes": 8192 }, + { + "SchemaName": "public", + "ObjectName": "idx8", + "RowCount": null, + "ColumnCount": 1, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": true, + "ObjectType": "", + "ParentTableName": "public.combined_tbl", + "SizeInBytes": 8192 + }, + { + "SchemaName": "public", + "ObjectName": "idx9", + "RowCount": null, + "ColumnCount": 1, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": true, + "ObjectType": "", + "ParentTableName": "public.combined_tbl", + "SizeInBytes": 8192 + }, { "SchemaName": "public", "ObjectName": "idx7", @@ -1665,6 +2251,34 @@ "ParentTableName": "public.combined_tbl", "SizeInBytes": 8192 }, + { + "SchemaName": "public", + "ObjectName": "combined_tbl_bittv_key", + "RowCount": null, + "ColumnCount": 1, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": true, + "ObjectType": "", + "ParentTableName": "public.combined_tbl", + "SizeInBytes": 8192 + }, + { + "SchemaName": "public", + "ObjectName": "uk", + "RowCount": null, + "ColumnCount": 1, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": true, + "ObjectType": "", + "ParentTableName": "public.combined_tbl", + "SizeInBytes": 8192 + }, { "SchemaName": "public", "ObjectName": "idx4", @@ -1776,30 +2390,293 @@ "ObjectType": "", "ParentTableName": "schema2.mixed_data_types_table1", "SizeInBytes": 8192 - } - ], - "Notes": null, - "MigrationCaveats": [ + }, { - "FeatureName": "Alter partitioned tables to add Primary Key", - "Objects": [ - { - "ObjectName": "public.sales_region", - "SqlStatement": "ALTER TABLE ONLY public.sales_region\n ADD CONSTRAINT sales_region_pkey PRIMARY KEY (id, region);" - }, - { - "ObjectName": "schema2.sales_region", - "SqlStatement": "ALTER TABLE ONLY schema2.sales_region\n ADD CONSTRAINT sales_region_pkey PRIMARY KEY (id, region);" - } - ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#adding-primary-key-to-a-partitioned-table-results-in-an-error", - "FeatureDescription": "After export schema, the ALTER table should be merged with CREATE table for partitioned tables as alter of partitioned tables to add primary key is not supported." + "SchemaName": "public", + "ObjectName": "employeesforview", + "RowCount": 0, + "ColumnCount": 4, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 }, { - "FeatureName": "Foreign tables", - "Objects": [], - "FeatureDescription": "During the export schema phase, SERVER and USER MAPPING objects are not exported. These should be manually created to make the foreign tables work." + "SchemaName": "public", + "ObjectName": "employeescopyfromwhere", + "RowCount": 2, + "ColumnCount": 3, + "Reads": 0, + "Writes": 2, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, + { + "SchemaName": "public", + "ObjectName": "bigint_multirange_table", + "RowCount": null, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "public", + "ObjectName": "date_multirange_table", + "RowCount": null, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "public", + "ObjectName": "int_multirange_table", + "RowCount": null, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "public", + "ObjectName": "numeric_multirange_table", + "RowCount": null, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "public", + "ObjectName": "timestamp_multirange_table", + "RowCount": null, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 }, + { + "SchemaName": "public", + "ObjectName": "timestamptz_multirange_table", + "RowCount": null, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "schema2", + "ObjectName": "employeesforview", + "RowCount": 0, + "ColumnCount": 4, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "schema2", + "ObjectName": "bigint_multirange_table", + "RowCount": null, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "schema2", + "ObjectName": "date_multirange_table", + "RowCount": null, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "schema2", + "ObjectName": "int_multirange_table", + "RowCount": null, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "schema2", + "ObjectName": "numeric_multirange_table", + "RowCount": null, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "schema2", + "ObjectName": "timestamp_multirange_table", + "RowCount": null, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "schema2", + "ObjectName": "timestamptz_multirange_table", + "RowCount": null, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "public", + "ObjectName": "employeescopyonerror", + "RowCount": 3, + "ColumnCount": 3, + "Reads": 0, + "Writes": 3, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 8192 + }, + { + "SchemaName": "public", + "ObjectName": "users_unique_nulls_not_distinct_index_email", + "RowCount": null, + "ColumnCount": 1, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": true, + "ObjectType": "", + "ParentTableName": "public.users_unique_nulls_not_distinct_index", + "SizeInBytes": 8192 + }, + { + "SchemaName": "public", + "ObjectName": "users_unique_nulls_not_distinct_index", + "RowCount": 0, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "schema2", + "ObjectName": "users_unique_nulls_not_distinct_index", + "RowCount": 0, + "ColumnCount": 2, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": false, + "ObjectType": "", + "ParentTableName": null, + "SizeInBytes": 0 + }, + { + "SchemaName": "schema2", + "ObjectName": "users_unique_nulls_not_distinct_index_email", + "RowCount": null, + "ColumnCount": 1, + "Reads": 0, + "Writes": 0, + "ReadsPerSecond": 0, + "WritesPerSecond": 0, + "IsIndex": true, + "ObjectType": "", + "ParentTableName": "schema2.users_unique_nulls_not_distinct_index", + "SizeInBytes": 8192 + } + + ], + "Notes": [ + "There are some Unlogged tables in the schema. They will be created as regular LOGGED tables in YugabyteDB as unlogged tables are not supported." + ], + "MigrationCaveats": [ { "FeatureName": "Policies", "Objects": [ @@ -1813,8 +2690,9 @@ } ], "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", - "FeatureDescription": "There are some policies that are created for certain users/roles. During the export schema phase, USERs and GRANTs are not exported. Therefore, they will have to be manually created before running import schema." - }, + "FeatureDescription": "There are some policies that are created for certain users/roles. During the export schema phase, USERs and GRANTs are not exported. Therefore, they will have to be manually created before running import schema.", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Unsupported Data Types for Live Migration", "Objects": [ @@ -1860,8 +2738,9 @@ } ], "DocsLink":"https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", - "FeatureDescription": "There are some data types in the schema that are not supported by live migration of data. These columns will be excluded when exporting and importing data in live migration workflows." - }, + "FeatureDescription": "There are some data types in the schema that are not supported by live migration of data. These columns will be excluded when exporting and importing data in live migration workflows.", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Unsupported Data Types for Live Migration with Fall-forward/Fallback", "Objects": [ @@ -1903,59 +2782,154 @@ } ], "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", - "FeatureDescription": "There are some data types in the schema that are not supported by live migration with fall-forward/fall-back. These columns will be excluded when exporting and importing data in live migration workflows." + "FeatureDescription": "There are some data types in the schema that are not supported by live migration with fall-forward/fall-back. These columns will be excluded when exporting and importing data in live migration workflows.", + "MinimumVersionsFixedIn": null } ], "UnsupportedQueryConstructs": [ { "ConstructTypeName": "Advisory Locks", "Query": "SELECT pg_advisory_xact_lock($1,$2)", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, { "ConstructTypeName": "Advisory Locks", "Query": "SELECT pg_advisory_unlock($1,$2)", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, { "ConstructTypeName": "Advisory Locks", "Query": "SELECT pg_advisory_unlock_all()", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, { "ConstructTypeName": "Advisory Locks", "Query": "SELECT pg_advisory_lock($1,$2)", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + }, { "ConstructTypeName": "System Columns", "Query": "SELECT ctid, tableoid, xmin, xmax, cmin, cmax\nFROM employees2", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#system-columns-is-not-yet-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#system-columns-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, { "ConstructTypeName": "XML Functions", "Query": "SELECT xmlparse(document $1) as xmldata", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, { "ConstructTypeName": "XML Functions", "Query": "SELECT table_to_xml($1, $2, $3, $4)", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, { "ConstructTypeName": "XML Functions", "Query": "SELECT xmlforest(first_name AS element1, last_name AS element2) FROM employees2", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, { "ConstructTypeName": "XML Functions", "Query": "SELECT xmlelement(name root, xmlelement(name child, $1))", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, { "ConstructTypeName": "XML Functions", "Query": "SELECT xml_is_well_formed($1)", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "ConstructTypeName": "XML Functions", + "Query": "SELECT\n s.section_name,\n b.title,\n b.author\nFROM\n library_nested l,\n XMLTABLE(\n $1\n PASSING l.lib_data\n COLUMNS\n section_name TEXT PATH $2,\n books XML PATH $3\n ) AS s,\n XMLTABLE(\n $4\n PASSING s.books\n COLUMNS\n title TEXT PATH $5,\n author TEXT PATH $6\n) AS b", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "ConstructTypeName": "XML Functions", + "Query": "SELECT\n o.order_id,\n items.product,\n items.quantity::INT\nFROM\n orders_lateral o\n CROSS JOIN LATERAL XMLTABLE(\n $1\n PASSING o.order_details\n COLUMNS\n product TEXT PATH $2,\n quantity TEXT PATH $3\n) AS items", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "ConstructTypeName": "XML Functions", + "Query": "SELECT *\nFROM xmltable(\n $1\n PASSING $2\n COLUMNS \n name TEXT PATH $3\n)", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", + "MinimumVersionsFixedIn": null + }, + { + "ConstructTypeName": "Large Object Functions", + "Query": "SELECT lo_create($1)", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#large-objects-and-its-functions-are-currently-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "ConstructTypeName": "COPY FROM ... WHERE", + "Query": "COPY employeesCopyFromWhere (id, name, age)\nFROM STDIN WITH (FORMAT csv)\nWHERE age \u003e 30", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null + }, + { + "ConstructTypeName": "COPY ... ON_ERROR", + "Query": "COPY employeesCopyOnError (id, name, age)\nFROM STDIN WITH (FORMAT csv, ON_ERROR IGNORE )", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", + "MinimumVersionsFixedIn": null } + ], + "UnsupportedPlPgSqlObjects": [ + { + "FeatureName": "Large Object Functions", + "Objects": [ + { + "ObjectType": "FUNCTION", + "ObjectName": "public.manage_large_object", + "SqlStatement": "SELECT lo_unlink(loid);" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#large-objects-and-its-functions-are-currently-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "FeatureName": "Referenced type declaration of variables", + "Objects": [ + { + "ObjectType": "FUNCTION", + "ObjectName": "public.process_combined_tbl", + "SqlStatement": "public.combined_tbl.maddr%TYPE" + }, + { + "ObjectType": "PROCEDURE", + "ObjectName": "public.update_combined_tbl_data", + "SqlStatement": "public.combined_tbl.maddr%TYPE" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "FeatureName": "Advisory Locks", + "Objects": [ + { + "ObjectType": "FUNCTION", + "ObjectName": "public.process_order", + "SqlStatement": "SELECT pg_advisory_unlock(orderid);" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "schema2.process_order", + "SqlStatement": "SELECT pg_advisory_unlock(orderid);" + } + ], + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", + "MinimumVersionsFixedIn": null + } ] -} \ No newline at end of file +} diff --git a/migtests/tests/pg/assessment-report-test/init-db b/migtests/tests/pg/assessment-report-test/init-db index dc1d41a465..5160094487 100755 --- a/migtests/tests/pg/assessment-report-test/init-db +++ b/migtests/tests/pg/assessment-report-test/init-db @@ -24,7 +24,8 @@ cat < "$TEMP_SQL" \i ${TEST_DIR}/../misc-objects-2/pg_misc_objects2.sql; \i ${TEST_DIR}/../views-and-rules/pg_views_and_rules_automation.sql; \i pg_assessment_report.sql; -\i unsupported_query_constructs.sql +\i unsupported_query_constructs.sql; + CREATE SCHEMA IF NOT EXISTS schema2; SET SEARCH_PATH TO schema2; \i ${TEST_DIR}/../misc-objects-1/schema.sql; diff --git a/migtests/tests/pg/assessment-report-test/pg_assessment_report.sql b/migtests/tests/pg/assessment-report-test/pg_assessment_report.sql index da73b1ceaa..c0ffcb0d5d 100644 --- a/migtests/tests/pg/assessment-report-test/pg_assessment_report.sql +++ b/migtests/tests/pg/assessment-report-test/pg_assessment_report.sql @@ -24,6 +24,36 @@ CREATE TABLE Mixed_Data_Types_Table2 ( path_data PATH ); +CREATE TABLE int_multirange_table ( + id SERIAL PRIMARY KEY, + value_ranges int4multirange +); + +CREATE TABLE bigint_multirange_table ( + id SERIAL PRIMARY KEY, + value_ranges int8multirange +); + +CREATE TABLE numeric_multirange_table ( + id SERIAL PRIMARY KEY, + price_ranges nummultirange +); + +CREATE TABLE timestamp_multirange_table ( + id SERIAL PRIMARY KEY, + event_times tsmultirange +); + +CREATE TABLE timestamptz_multirange_table ( + id SERIAL PRIMARY KEY, + global_event_times tstzmultirange +); + +CREATE TABLE date_multirange_table ( + id SERIAL PRIMARY KEY, + project_dates datemultirange +); + -- GIST Index on point_data column CREATE INDEX idx_point_data ON Mixed_Data_Types_Table1 USING GIST (point_data); @@ -114,7 +144,9 @@ WITH CHECK OPTION; CREATE TABLE public.test_jsonb ( id integer, data jsonb, - data2 text + data2 text, + region text, + FOREIGN KEY (id, region) REFERENCES sales_region(id, region) ); CREATE TABLE public.inet_type ( @@ -173,6 +205,7 @@ CREATE TYPE public.address_type AS ( zip_code VARCHAR(10) ); +CREATE EXTENSION lo; --other misc types create table public.combined_tbl ( id int, @@ -180,12 +213,19 @@ create table public.combined_tbl ( maddr macaddr, maddr8 macaddr8, lsn pg_lsn, + inds3 INTERVAL DAY TO SECOND(3), + d daterange, bitt bit (13), - bittv bit varying(15), + bittv bit varying(15) UNIQUE, address address_type, - arr_enum enum_kind[] + raster lo, + arr_enum enum_kind[], + PRIMARY KEY (id, arr_enum) ); +ALTER TABLE public.combined_tbl + ADD CONSTRAINT uk UNIQUE(lsn); + CREATE index idx1 on public.combined_tbl (c); CREATE index idx2 on public.combined_tbl (maddr); @@ -200,6 +240,10 @@ CREATE INDEX idx6 on public.combined_tbl (bittv); CREATE INDEX idx7 on public.combined_tbl (address); +CREATE INDEX idx8 on public.combined_tbl (d); + +CREATE INDEX idx9 on public.combined_tbl (inds3); + CREATE UNLOGGED TABLE tbl_unlogged (id int, val text); CREATE OR REPLACE FUNCTION public.check_sales_region() @@ -222,3 +266,212 @@ CREATE TRIGGER before_sales_region_insert_update BEFORE INSERT OR UPDATE ON public.sales_region FOR EACH ROW EXECUTE FUNCTION public.check_sales_region(); + + + CREATE TABLE public.ordersentry ( + order_id SERIAL PRIMARY KEY, + customer_name TEXT NOT NULL, + product_name TEXT NOT NULL, + quantity INT NOT NULL, + price NUMERIC(10, 2) NOT NULL, + processed_at timestamp, + r INT DEFAULT regexp_count('This is an example. Another example. Example is a common word.', 'example') -- regex functions in default +); + +INSERT INTO public.ordersentry (customer_name, product_name, quantity, price) +VALUES + ('Alice', 'Laptop', 1, 1200.00), + ('Bob', 'Smartphone', 2, 800.00), + ('Charlie', 'Tablet', 1, 500.00); + +CREATE VIEW public.ordersentry_view AS +SELECT + order_id, + customer_name, + product_name, + quantity, + price, + xmlelement( + name "OrderDetails", + xmlelement(name "Customer", customer_name), + xmlelement(name "Product", product_name), + xmlelement(name "Quantity", quantity), + xmlelement(name "TotalPrice", price * quantity) + ) AS order_xml, + xmlconcat( + xmlelement(name "Customer", customer_name), + xmlelement(name "Product", product_name) + ) AS summary_xml, + pg_try_advisory_lock(hashtext(customer_name || product_name)) AS lock_acquired, + ctid AS row_ctid, + xmin AS transaction_id +FROM + ordersentry; + +CREATE OR REPLACE FUNCTION process_order(orderid INT) RETURNS VOID AS $$ +DECLARE + lock_acquired BOOLEAN; +BEGIN + lock_acquired := pg_try_advisory_lock(orderid); -- not able to report this as it is an assignment statement TODO: fix when support this + + IF NOT lock_acquired THEN + RAISE EXCEPTION 'Order % already being processed by another session', orderid; + END IF; + + UPDATE orders + SET processed_at = NOW() + WHERE orders.order_id = orderid; + + RAISE NOTICE 'Order % processed successfully', orderid; + + PERFORM pg_advisory_unlock(orderid); +END; +$$ LANGUAGE plpgsql; + +select process_order(1); + +-- In PG migration from pg_dump the function parameters and return will never have the %TYPE syntax, instead they have the actual type in the DDLs +-- e.g. for the below function this will be the export one `CREATE FUNCTION public.process_combined_tbl(p_id integer, p_c cidr, p_bitt bit, p_inds3 interval) RETURNS macaddr` +CREATE OR REPLACE FUNCTION public.process_combined_tbl( + p_id public.combined_tbl.id%TYPE, + p_c public.combined_tbl.c%TYPE, + p_bitt public.combined_tbl.bitt%TYPE, + p_inds3 public.combined_tbl.inds3%TYPE +) +RETURNS public.combined_tbl.maddr%TYPE AS +$$ +DECLARE + v_maddr public.combined_tbl.maddr%TYPE; +BEGIN + -- Example logic: Assigning local variable using passed-in parameter + v_maddr := p_c::text; -- Example conversion (cidr to macaddr), just for illustration + + -- Processing the passed parameters + RAISE NOTICE 'Processing: ID = %, CIDR = %, BIT = %, Interval = %, MAC = %', + p_id, p_c, p_bitt, p_inds3, v_maddr; + + -- Returning a value of the macaddr type (this could be more meaningful logic) + RETURN v_maddr; -- Returning a macaddr value +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE PROCEDURE public.update_combined_tbl_data( + p_id public.combined_tbl.id%TYPE, + p_c public.combined_tbl.c%TYPE, + p_bitt public.combined_tbl.bitt%TYPE, + p_d public.combined_tbl.d%TYPE +) +AS +$$ +DECLARE + v_new_mac public.combined_tbl.maddr%TYPE; +BEGIN + -- Example: Using a local variable to store a macaddr value (for illustration) + v_new_mac := '00:14:22:01:23:45'::macaddr; + + -- Updating the table with provided parameters + UPDATE public.combined_tbl + SET + c = p_c, -- Updating cidr type column + bitt = p_bitt, -- Updating bit column + d = p_d, -- Updating daterange column + maddr = v_new_mac -- Using the local macaddr variable in update + WHERE id = p_id; + + RAISE NOTICE 'Updated record with ID: %, CIDR: %, BIT: %, Date range: %', + p_id, p_c, p_bitt, p_d; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER t_raster BEFORE UPDATE OR DELETE ON public.combined_tbl + FOR EACH ROW EXECUTE FUNCTION lo_manage(raster); + +CREATE OR REPLACE FUNCTION public.manage_large_object(loid OID) RETURNS VOID AS $$ +BEGIN + IF loid IS NOT NULL THEN + -- Unlink the large object to free up storage + PERFORM lo_unlink(loid); + END IF; +END; +$$ LANGUAGE plpgsql; + +-- for FETCH .. WITH TIES +CREATE TABLE employeesForView ( + id SERIAL PRIMARY KEY, + first_name VARCHAR(50) NOT NULL, + last_name VARCHAR(50) NOT NULL, + salary NUMERIC(10, 2) NOT NULL +); + +CREATE VIEW top_employees_view AS SELECT * FROM ( + SELECT * FROM employeesForView + ORDER BY salary DESC + FETCH FIRST 2 ROWS WITH TIES + ) AS top_employees; + +-- SECURITY INVOKER VIEW +CREATE TABLE public.employees ( + employee_id SERIAL PRIMARY KEY, + first_name VARCHAR(100), + last_name VARCHAR(100), + department VARCHAR(50) +); + +INSERT INTO public.employees (first_name, last_name, department) +VALUES + ('Alice', 'Smith', 'HR'), + ('Bob', 'Jones', 'Finance'), + ('Charlie', 'Brown', 'IT'), + ('Diana', 'Prince', 'HR'), + ('Ethan', 'Hunt', 'Security'); + +CREATE VIEW public.view_explicit_security_invoker +WITH (security_invoker = true) AS + SELECT employee_id, first_name + FROM public.employees; + +CREATE COLLATION schema2.ignore_accents (provider = icu, locale = 'und-u-ks-level1-kc-true', deterministic = false); + + CREATE COLLATION public.numeric (provider = icu, locale = 'en@colNumeric=yes'); +-- Testing tables with unique nulls not distinct constraints + +-- Control case +CREATE TABLE users_unique_nulls_distinct ( + id INTEGER PRIMARY KEY, + email TEXT, + UNIQUE (email) +); + +CREATE TABLE users_unique_nulls_not_distinct ( + id INTEGER PRIMARY KEY, + email TEXT, + UNIQUE NULLS NOT DISTINCT (email) +); + +CREATE TABLE sales_unique_nulls_not_distinct ( + store_id INT, + product_id INT, + sale_date DATE, + UNIQUE NULLS NOT DISTINCT (store_id, product_id, sale_date) +); + +CREATE TABLE sales_unique_nulls_not_distinct_alter ( + store_id INT, + product_id INT, + sale_date DATE +); + +ALTER TABLE sales_unique_nulls_not_distinct_alter + ADD CONSTRAINT sales_unique_nulls_not_distinct_alter_unique UNIQUE NULLS NOT DISTINCT (store_id, product_id, sale_date); + +-- Create a unique index on a column with NULLs with the NULLS NOT DISTINCT option +CREATE TABLE users_unique_nulls_not_distinct_index ( + id INTEGER PRIMARY KEY, + email TEXT +); + +CREATE UNIQUE INDEX users_unique_nulls_not_distinct_index_email + ON users_unique_nulls_not_distinct_index (email) + NULLS NOT DISTINCT; + + diff --git a/migtests/tests/pg/assessment-report-test/unsupported_query_constructs.sql b/migtests/tests/pg/assessment-report-test/unsupported_query_constructs.sql index 90d0e03d4e..0d1a88f6e6 100644 --- a/migtests/tests/pg/assessment-report-test/unsupported_query_constructs.sql +++ b/migtests/tests/pg/assessment-report-test/unsupported_query_constructs.sql @@ -25,19 +25,150 @@ SELECT xmlelement(name root, xmlelement(name child, 'value')); SELECT xml_is_well_formed('value'); --- Not Reported Currently +SELECT * +FROM xmltable( + '/employees/employee' + PASSING 'John' + COLUMNS + name TEXT PATH 'name' +); --- SELECT * --- FROM xmltable( --- '/employees/employee' --- PASSING 'John' --- COLUMNS --- name TEXT PATH 'name' --- ); -- Advisory Locks SELECT pg_advisory_lock(1,2); SELECT pg_advisory_unlock(1,2); SELECT pg_advisory_xact_lock(1,2); -SELECT pg_advisory_unlock_all(); \ No newline at end of file +SELECT pg_advisory_unlock_all(); + +-- Adding few XMLTABLE() examples +-- Case 1 +CREATE TABLE library_nested ( + lib_id INT, + lib_data XML +); + +INSERT INTO library_nested VALUES +(1, ' + +
+ + The Great Gatsby + F. Scott Fitzgerald + + + 1984 + George Orwell + +
+
+ + A Brief History of Time + Stephen Hawking + +
+
+'); + +-- Query with nested XMLTABLE() calls +SELECT + s.section_name, + b.title, + b.author +FROM + library_nested l, + XMLTABLE( + '/library/section' + PASSING l.lib_data + COLUMNS + section_name TEXT PATH '@name', + books XML PATH '.' + ) AS s, + XMLTABLE( + '/section/book' + PASSING s.books + COLUMNS + title TEXT PATH 'title', + author TEXT PATH 'author' +) AS b; + + +-- Case 2 +CREATE TABLE orders_lateral ( + order_id INT, + customer_id INT, + order_details XML +); + +INSERT INTO orders_lateral (customer_id, order_details) VALUES +(1, 1, ' + + + Keyboard + 2 + + + Mouse + 1 + + +'), +(2, ' + + + Monitor + 1 + + +'); + +-- Query using XMLTABLE with LATERAL join +SELECT + o.order_id, + items.product, + items.quantity::INT +FROM + orders_lateral o + CROSS JOIN LATERAL XMLTABLE( + '/order/item' + PASSING o.order_details + COLUMNS + product TEXT PATH 'product', + quantity TEXT PATH 'quantity' +) AS items; + + +SELECT lo_create('32142'); + +-- Unsupported COPY constructs + +CREATE TABLE IF NOT EXISTS employeesCopyFromWhere ( + id INT PRIMARY KEY, + name TEXT NOT NULL, + age INT NOT NULL +); + + +-- COPY FROM with WHERE clause +COPY employeesCopyFromWhere (id, name, age) +FROM STDIN WITH (FORMAT csv) +WHERE age > 30; +1,John Smith,25 +2,Jane Doe,34 +3,Bob Johnson,31 +\. + +CREATE TABLE IF NOT EXISTS employeesCopyOnError ( + id INT PRIMARY KEY, + name TEXT NOT NULL, + age INT NOT NULL +); + +-- COPY with ON_ERROR clause +COPY employeesCopyOnError (id, name, age) +FROM STDIN WITH (FORMAT csv, ON_ERROR IGNORE ); +4,Adam Smith,22 +5,John Doe,34 +6,Ron Johnson,31 +\. + diff --git a/migtests/tests/pg/constraints/pg_constraints_automation.sql b/migtests/tests/pg/constraints/pg_constraints_automation.sql index 4f70492627..d7929263c1 100644 --- a/migtests/tests/pg/constraints/pg_constraints_automation.sql +++ b/migtests/tests/pg/constraints/pg_constraints_automation.sql @@ -39,17 +39,19 @@ drop table if exists check_test; CREATE TABLE check_test ( ID serial primary key, first_name varchar(255) NOT NULL, + middle_name varchar(255) not null, last_name varchar(255), Age int, CHECK (Age>=18) ); -insert into check_test (first_name, last_name, age) values ('Modestine', 'MacMeeking', 20); -insert into check_test (first_name, last_name, age) values ('Genna', 'Kaysor', 50); -insert into check_test (first_name, last_name, age) values ('Tess', 'Wesker', 56); -insert into check_test (first_name, last_name, age) values ('Magnum', 'Danzelman', 89); -insert into check_test (first_name, last_name, age) values ('Mitzi', 'Pidwell', 34); -insert into check_test (first_name, last_name, age) values ('Milzie', 'Rohlfing', 70); - +insert into check_test (first_name, middle_name, last_name, age) values ('Modestine', '', 'MacMeeking', 20); +insert into check_test (first_name, middle_name, last_name, age) values ('Genna', '', 'Kaysor', 50); +insert into check_test (first_name, middle_name, last_name, age) values ('Tess', '', 'Wesker', 56); +insert into check_test (first_name, middle_name, last_name, age) values ('Magnum', '', 'Danzelman', 89); +insert into check_test (first_name, middle_name, last_name, age) values ('Mitzi', '', 'Pidwell', 34); +insert into check_test (first_name, middle_name, last_name, age) values ('Milzie', '', 'Rohlfing', 70); + +ALTER TABLE check_test ADD CONSTRAINT not_valid_cons CHECK(middle_name<>'') NOT VALID; drop table if exists default_test; diff --git a/migtests/tests/pg/constraints/validate b/migtests/tests/pg/constraints/validate index 7199571d27..82d036ded2 100755 --- a/migtests/tests/pg/constraints/validate +++ b/migtests/tests/pg/constraints/validate @@ -29,7 +29,11 @@ QUERIES_CHECK = { 'code': "23505" }, 'CHECK_CONDITION': { - 'query': "insert into check_test (id, first_name, last_name, age) values (7, 'Tom', 'Stewan', 15);", + 'query': "insert into check_test (id, first_name, middle_name, last_name, age) values (7, 'Tom', 'abc', 'Stewan', 15);", + 'code': "23514" + }, + 'CHECK_CONDITION_NOT_VALID': { + 'query': "insert into check_test (id, first_name, middle_name, last_name, age) values (7, 'Tom', '', 'Stewan', 52);", 'code': "23514" }, 'FOREIGN_CHECK': { diff --git a/migtests/tests/pg/datatypes/env.sh b/migtests/tests/pg/datatypes/env.sh index 328c0615f3..3606e020cc 100644 --- a/migtests/tests/pg/datatypes/env.sh +++ b/migtests/tests/pg/datatypes/env.sh @@ -1,3 +1,2 @@ export SOURCE_DB_TYPE="postgresql" -export SOURCE_DB_NAME=${SOURCE_DB_NAME:-"pg_datatypes"} export SOURCE_DB_SCHEMA="public" diff --git a/migtests/tests/pg/mgi/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/mgi/expected_files/expectedAssessmentReport.json index 6474fcb025..c5d63e0270 100644 --- a/migtests/tests/pg/mgi/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/mgi/expected_files/expectedAssessmentReport.json @@ -24,7 +24,7 @@ { "ObjectType": "TABLE", "TotalCount": 175, - "InvalidCount": 0, + "InvalidCount": 62, "ObjectNames": "mgd.acc_accession, mgd.acc_accessionmax, mgd.acc_accessionreference, mgd.acc_actualdb, mgd.acc_logicaldb, mgd.acc_mgitype, mgd.all_allele, mgd.all_allele_cellline, mgd.all_cellline, mgd.all_cellline_derivation, mgd.mgi_user, mgd.prb_strain, mgd.voc_term, mgd.mgi_organism, mgd.mgi_relationship, mgd.mrk_marker, mgd.all_allele_mutation, mgd.voc_annot, mgd.bib_citation_cache, mgd.gxd_allelepair, mgd.voc_annottype, mgd.all_cre_cache, mgd.all_knockout_cache, mgd.all_label, mgd.gxd_allelegenotype, mgd.mgi_reference_assoc, mgd.all_variant, mgd.all_variant_sequence, mgd.bib_refs, mgd.gxd_expression, mgd.gxd_index, mgd.img_image, mgd.mgi_refassoctype, mgd.mgi_synonym, mgd.mgi_synonymtype, mgd.mld_expts, mgd.mld_notes, mgd.mrk_do_cache, mgd.mrk_reference, mgd.mrk_strainmarker, mgd.prb_reference, mgd.prb_source, mgd.voc_evidence, mgd.bib_books, mgd.bib_workflow_status, mgd.bib_notes, mgd.gxd_assay, mgd.gxd_specimen, mgd.bib_workflow_data, mgd.bib_workflow_relevance, mgd.bib_workflow_tag, mgd.crs_cross, mgd.crs_matrix, mgd.crs_progeny, mgd.crs_references, mgd.crs_typings, mgd.dag_closure, mgd.dag_dag, mgd.dag_edge, mgd.dag_label, mgd.dag_node, mgd.voc_vocabdag, mgd.go_tracking, mgd.gxd_antibody, mgd.gxd_antigen, mgd.gxd_antibodyalias, mgd.gxd_antibodymarker, mgd.gxd_antibodyprep, mgd.gxd_gellane, mgd.gxd_insituresult, mgd.gxd_insituresultimage, mgd.gxd_assaytype, mgd.gxd_assaynote, mgd.gxd_gelband, mgd.gxd_gelrow, mgd.gxd_genotype, mgd.gxd_gellanestructure, mgd.gxd_theilerstage, mgd.voc_annotheader, mgd.gxd_htexperiment, mgd.gxd_htexperimentvariable, mgd.gxd_htrawsample, mgd.gxd_htsample, mgd.gxd_htsample_rnaseq, mgd.gxd_htsample_rnaseqcombined, mgd.gxd_htsample_rnaseqset, mgd.gxd_htsample_rnaseqset_cache, mgd.gxd_htsample_rnaseqsetmember, mgd.gxd_index_stages, mgd.gxd_isresultcelltype, mgd.img_imagepane, mgd.gxd_isresultstructure, mgd.gxd_probeprep, mgd.prb_probe, mgd.img_imagepane_assoc, mgd.mgi_note, mgd.map_coord_collection, mgd.map_coord_feature, mgd.map_coordinate, mgd.mrk_chromosome, mgd.mgi_dbinfo, mgd.mgi_keyvalue, mgd.mgi_notetype, mgd.mgi_organism_mgitype, mgd.mgi_property, mgd.mgi_propertytype, mgd.mgi_relationship_category, mgd.mgi_relationship_property, mgd.mgi_set, mgd.mgi_setmember, mgd.mgi_setmember_emapa, mgd.mgi_translation, mgd.mgi_translationtype, mgd.voc_vocab, mgd.mld_assay_types, mgd.mld_concordance, mgd.mld_contig, mgd.mld_contigprobe, mgd.mld_expt_marker, mgd.mld_expt_notes, mgd.mld_fish, mgd.mld_fish_region, mgd.mld_hit, mgd.mld_hybrid, mgd.mld_insitu, mgd.mld_isregion, mgd.mld_matrix, mgd.mld_mc2point, mgd.mld_mcdatalist, mgd.mld_ri, mgd.mld_ri2point, mgd.mld_ridata, mgd.mld_statistics, mgd.mrk_types, mgd.mrk_biotypemapping, mgd.mrk_cluster, mgd.mrk_clustermember, mgd.mrk_current, mgd.mrk_history, mgd.mrk_label, mgd.mrk_location_cache, mgd.mrk_status, mgd.mrk_mcv_cache, mgd.mrk_mcv_count_cache, mgd.mrk_notes, mgd.prb_alias, mgd.prb_allele, mgd.prb_allele_strain, mgd.prb_marker, mgd.prb_notes, mgd.prb_ref_notes, mgd.prb_rflv, mgd.prb_tissue, mgd.prb_strain_genotype, mgd.prb_strain_marker, mgd.ri_riset, mgd.ri_summary, mgd.ri_summary_expt_ref, mgd.seq_allele_assoc, mgd.seq_coord_cache, mgd.seq_genemodel, mgd.seq_genetrap, mgd.seq_marker_cache, mgd.seq_probe_cache, mgd.seq_sequence, mgd.seq_sequence_assoc, mgd.seq_sequence_raw, mgd.seq_source_assoc, mgd.voc_allele_cache, mgd.voc_annot_count_cache, mgd.voc_evidence_property, mgd.voc_marker_cache, mgd.voc_term_emapa, mgd.voc_term_emaps, mgd.wks_rosetta" }, { @@ -36,7 +36,7 @@ { "ObjectType": "FUNCTION", "TotalCount": 139, - "InvalidCount": 0, + "InvalidCount": 17, "ObjectNames": "mgd.acc_accession_delete, mgd.acc_assignj, mgd.acc_assignmgi, mgd.acc_delete_byacckey, mgd.acc_insert, mgd.acc_setmax, mgd.acc_split, mgd.acc_update, mgd.accref_insert, mgd.accref_process, mgd.all_allele_delete, mgd.all_allele_insert, mgd.all_allele_update, mgd.all_cellline_delete, mgd.all_cellline_update1, mgd.all_cellline_update2, mgd.all_convertallele, mgd.all_createwildtype, mgd.all_insertallele, mgd.all_mergeallele, mgd.all_mergewildtypes, mgd.all_reloadlabel, mgd.all_variant_delete, mgd.bib_keepwfrelevance, mgd.bib_refs_delete, mgd.bib_refs_insert, mgd.bib_reloadcache, mgd.bib_updatewfstatusap, mgd.bib_updatewfstatusgo, mgd.bib_updatewfstatusgxd, mgd.bib_updatewfstatusqtl, mgd.gxd_addcelltypeset, mgd.gxd_addemapaset, mgd.gxd_addgenotypeset, mgd.gxd_allelepair_insert, mgd.gxd_antibody_delete, mgd.gxd_antibody_insert, mgd.gxd_antigen_delete, mgd.gxd_antigen_insert, mgd.gxd_assay_delete, mgd.gxd_assay_insert, mgd.gxd_checkduplicategenotype, mgd.gxd_gelrow_insert, mgd.gxd_genotype_delete, mgd.gxd_genotype_insert, mgd.gxd_getgenotypesdatasets, mgd.gxd_getgenotypesdatasetscount, mgd.gxd_htexperiment_delete, mgd.gxd_htrawsample_delete, mgd.gxd_htsample_ageminmax, mgd.gxd_index_insert, mgd.gxd_index_insert_before, mgd.gxd_index_update, mgd.gxd_orderallelepairs, mgd.gxd_ordergenotypes, mgd.gxd_ordergenotypesall, mgd.gxd_ordergenotypesmissing, mgd.gxd_removebadgelband, mgd.gxd_replacegenotype, mgd.img_image_delete, mgd.img_image_insert, mgd.img_setpdo, mgd.mgi_addsetmember, mgd.mgi_checkemapaclipboard, mgd.mgi_cleannote, mgd.mgi_deleteemapaclipboarddups, mgd.mgi_insertreferenceassoc, mgd.mgi_insertsynonym, mgd.mgi_mergerelationship, mgd.mgi_organism_delete, mgd.mgi_organism_insert, mgd.mgi_processnote, mgd.mgi_reference_assoc_delete, mgd.mgi_reference_assoc_insert, mgd.mgi_relationship_delete, mgd.mgi_resetageminmax, mgd.mgi_resetsequencenum, mgd.mgi_setmember_emapa_insert, mgd.mgi_setmember_emapa_update, mgd.mgi_statistic_delete, mgd.mgi_updatereferenceassoc, mgd.mgi_updatesetmember, mgd.mld_expt_marker_update, mgd.mld_expts_delete, mgd.mld_expts_insert, mgd.mrk_allelewithdrawal, mgd.mrk_cluster_delete, mgd.mrk_copyhistory, mgd.mrk_deletewithdrawal, mgd.mrk_inserthistory, mgd.mrk_marker_delete, mgd.mrk_marker_insert, mgd.mrk_marker_update, mgd.mrk_mergewithdrawal, mgd.mrk_reloadlocation, mgd.mrk_reloadreference, mgd.mrk_simplewithdrawal, mgd.mrk_strainmarker_delete, mgd.mrk_updatekeys, mgd.prb_ageminmax, mgd.prb_getstrainbyreference, mgd.prb_getstraindatasets, mgd.prb_getstrainreferences, mgd.prb_insertreference, mgd.prb_marker_insert, mgd.prb_marker_update, mgd.prb_mergestrain, mgd.prb_probe_delete, mgd.prb_probe_insert, mgd.prb_processanonymoussource, mgd.prb_processprobesource, mgd.prb_processseqloadersource, mgd.prb_processsequencesource, mgd.prb_reference_delete, mgd.prb_reference_update, mgd.prb_setstrainreview, mgd.prb_source_delete, mgd.prb_strain_delete, mgd.prb_strain_insert, mgd.seq_deletebycreatedby, mgd.seq_deletedummy, mgd.seq_deleteobsoletedummy, mgd.seq_merge, mgd.seq_sequence_delete, mgd.seq_split, mgd.voc_annot_insert, mgd.voc_copyannotevidencenotes, mgd.voc_evidence_delete, mgd.voc_evidence_insert, mgd.voc_evidence_property_delete, mgd.voc_evidence_update, mgd.voc_mergeannotations, mgd.voc_mergedupannotations, mgd.voc_mergeterms, mgd.voc_processannotheader, mgd.voc_processannotheaderall, mgd.voc_processannotheadermissing, mgd.voc_resetterms, mgd.voc_term_delete" }, { @@ -248,46 +248,6 @@ "UnsupportedDataTypes": null, "UnsupportedDataTypesDesc": "Data types of the source database that are not supported on the target YugabyteDB.", "UnsupportedFeatures": [ - { - "FeatureName": "GIST indexes", - "Objects": [] - }, - { - "FeatureName": "BRIN indexes", - "Objects": [] - }, - { - "FeatureName": "SPGIST indexes", - "Objects": [] - }, - { - "FeatureName": "Constraint triggers", - "Objects": [] - }, - { - "FeatureName": "Inherited tables", - "Objects": [] - }, - { - "FeatureName": "Tables with stored generated columns", - "Objects": [] - }, - { - "FeatureName": "Conversion objects", - "Objects": [] - }, - { - "FeatureName": "Gin indexes on multi-columns", - "Objects": [] - }, - { - "FeatureName": "Setting attribute=value on column", - "Objects": [] - }, - { - "FeatureName": "Disabling rule on table", - "Objects": [] - }, { "FeatureName": "Clustering table on index", "Objects": [ @@ -540,44 +500,9 @@ "SqlStatement": "ALTER TABLE mgd.all_knockout_cache CLUSTER ON all_knockout_cache_idx_clustered;" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" - }, - { - "FeatureName": "Storage parameters in DDLs", - "Objects": [] - }, - { - "FeatureName": "Extensions", - "Objects": [] - }, - { - "FeatureName": "Exclusion constraints", - "Objects": [] - }, - { - "FeatureName": "REFERENCING clause for triggers", - "Objects": [] - }, - { - "FeatureName": "BEFORE ROW triggers on Partitioned tables", - "Objects": [] - }, - { - "FeatureName": "Deferrable constraints", - "Objects": [] - }, - { - "FeatureName": "View with check option", - "Objects": [] - }, - { - "FeatureName": "Index on complex datatypes", - "Objects": [] - }, - { - "FeatureName": "Unlogged tables", - "Objects": [] - } + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null + } ], "UnsupportedFeaturesDesc": "Features of the source database that are not supported on the target YugabyteDB.", "TableIndexStats": [ @@ -13491,22 +13416,240 @@ } ], "Notes": null, - "MigrationCaveats": [ - { - "FeatureName": "Alter partitioned tables to add Primary Key", - "Objects": [], - "FeatureDescription": "After export schema, the ALTER table should be merged with CREATE table for partitioned tables as alter of partitioned tables to add primary key is not supported." - }, + "MigrationCaveats": null, + "UnsupportedQueryConstructs": null, + "UnsupportedPlPgSqlObjects": [ { - "FeatureName": "Foreign tables", - "Objects": [], - "FeatureDescription": "During the export schema phase, SERVER and USER MAPPING objects are not exported. These should be manually created to make the foreign tables work." - }, - { - "FeatureName": "Policies", - "Objects": [], - "FeatureDescription": "There are some policies that are created for certain users/roles. During the export schema phase, USERs and GRANTs are not exported. Therefore, they will have to be manually created before running import schema." + "FeatureName": "Referenced type declaration of variables", + "Objects": [ + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.acc_assignmgi", + "SqlStatement": "acc_accession.accid%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.acc_assignmgi", + "SqlStatement": "acc_accession.accid%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.acc_insert", + "SqlStatement": "acc_accession.prefixPart%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.acc_update", + "SqlStatement": "acc_accession.prefixPart%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.acc_update", + "SqlStatement": "acc_accession.prefixPart%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.all_createwildtype", + "SqlStatement": "all_allele.symbol%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.all_insertallele", + "SqlStatement": "all_allele.isextinct%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.all_insertallele", + "SqlStatement": "all_allele.ismixed%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.all_reloadlabel", + "SqlStatement": "all_label.label%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.all_reloadlabel", + "SqlStatement": "all_label.labelType%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.all_reloadlabel", + "SqlStatement": "all_label.labelTypeName%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.img_setpdo", + "SqlStatement": "acc_accession.accID%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_allelewithdrawal", + "SqlStatement": "mrk_marker.symbol%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_allelewithdrawal", + "SqlStatement": "mrk_marker.name%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_allelewithdrawal", + "SqlStatement": "mrk_marker.symbol%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_copyhistory", + "SqlStatement": "mrk_history.name%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_copyhistory", + "SqlStatement": "mrk_history.event_date%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_deletewithdrawal", + "SqlStatement": "mrk_marker.name%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "SqlStatement": "mrk_marker.symbol%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "SqlStatement": "mrk_marker.name%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "SqlStatement": "mrk_marker.symbol%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "SqlStatement": "mrk_marker.chromosome%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "SqlStatement": "mrk_marker.name%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "SqlStatement": "mrk_marker.cytogeneticOffset%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "SqlStatement": "mrk_marker.cmOffset%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "SqlStatement": "all_allele.symbol%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "SqlStatement": "mrk_marker.chromosome%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "SqlStatement": "mrk_marker.cytogeneticOffset%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "SqlStatement": "mrk_marker.cmoffset%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "SqlStatement": "seq_coord_cache.startCoordinate%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "SqlStatement": "seq_coord_cache.endCoordinate%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "SqlStatement": "seq_coord_cache.strand%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "SqlStatement": "voc_term.term%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "SqlStatement": "map_coord_collection.abbreviation%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "SqlStatement": "seq_coord_cache.version%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "SqlStatement": "seq_coord_cache.chromosome%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_simplewithdrawal", + "SqlStatement": "mrk_marker.name%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_simplewithdrawal", + "SqlStatement": "mrk_marker.symbol%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.prb_ageminmax", + "SqlStatement": "prb_source.age%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.prb_ageminmax", + "SqlStatement": "prb_source.age%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.prb_mergestrain", + "SqlStatement": "acc_accession.accID%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.prb_mergestrain", + "SqlStatement": "acc_accession.accID%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.seq_split", + "SqlStatement": "acc_accession.accID%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.seq_split", + "SqlStatement": "acc_accession.accID%TYPE" + }, + { + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mgi_resetageminmax", + "SqlStatement": "prb_source.age%TYPE" + } + ], + "MinimumVersionsFixedIn": null, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported" } - ], - "UnsupportedQueryConstructs": null + ] } diff --git a/migtests/tests/pg/mgi/expected_files/expected_schema_analysis_report.json b/migtests/tests/pg/mgi/expected_files/expected_schema_analysis_report.json index ccadb13896..c5b66ad796 100644 --- a/migtests/tests/pg/mgi/expected_files/expected_schema_analysis_report.json +++ b/migtests/tests/pg/mgi/expected_files/expected_schema_analysis_report.json @@ -1,6 +1,5 @@ { "VoyagerVersion": "IGNORED", - "MigrationComplexity": "MEDIUM", "Summary": { "Description": "Objects that will be created on the target YugabyteDB.", "DbName": "test_mgd", @@ -24,7 +23,7 @@ { "ObjectType": "TABLE", "TotalCount": 175, - "InvalidCount": 0, + "InvalidCount": 62, "ObjectNames": "mgd.acc_accession, mgd.acc_accessionmax, mgd.acc_accessionreference, mgd.acc_actualdb, mgd.acc_logicaldb, mgd.acc_mgitype, mgd.all_allele, mgd.all_allele_cellline, mgd.all_cellline, mgd.all_cellline_derivation, mgd.mgi_user, mgd.prb_strain, mgd.voc_term, mgd.mgi_organism, mgd.mgi_relationship, mgd.mrk_marker, mgd.all_allele_mutation, mgd.voc_annot, mgd.bib_citation_cache, mgd.gxd_allelepair, mgd.voc_annottype, mgd.all_cre_cache, mgd.all_knockout_cache, mgd.all_label, mgd.gxd_allelegenotype, mgd.mgi_reference_assoc, mgd.all_variant, mgd.all_variant_sequence, mgd.bib_refs, mgd.gxd_expression, mgd.gxd_index, mgd.img_image, mgd.mgi_refassoctype, mgd.mgi_synonym, mgd.mgi_synonymtype, mgd.mld_expts, mgd.mld_notes, mgd.mrk_do_cache, mgd.mrk_reference, mgd.mrk_strainmarker, mgd.prb_reference, mgd.prb_source, mgd.voc_evidence, mgd.bib_books, mgd.bib_workflow_status, mgd.bib_notes, mgd.gxd_assay, mgd.gxd_specimen, mgd.bib_workflow_data, mgd.bib_workflow_relevance, mgd.bib_workflow_tag, mgd.crs_cross, mgd.crs_matrix, mgd.crs_progeny, mgd.crs_references, mgd.crs_typings, mgd.dag_closure, mgd.dag_dag, mgd.dag_edge, mgd.dag_label, mgd.dag_node, mgd.voc_vocabdag, mgd.go_tracking, mgd.gxd_antibody, mgd.gxd_antigen, mgd.gxd_antibodyalias, mgd.gxd_antibodymarker, mgd.gxd_antibodyprep, mgd.gxd_gellane, mgd.gxd_insituresult, mgd.gxd_insituresultimage, mgd.gxd_assaytype, mgd.gxd_assaynote, mgd.gxd_gelband, mgd.gxd_gelrow, mgd.gxd_genotype, mgd.gxd_gellanestructure, mgd.gxd_theilerstage, mgd.voc_annotheader, mgd.gxd_htexperiment, mgd.gxd_htexperimentvariable, mgd.gxd_htrawsample, mgd.gxd_htsample, mgd.gxd_htsample_rnaseq, mgd.gxd_htsample_rnaseqcombined, mgd.gxd_htsample_rnaseqset, mgd.gxd_htsample_rnaseqset_cache, mgd.gxd_htsample_rnaseqsetmember, mgd.gxd_index_stages, mgd.gxd_isresultcelltype, mgd.img_imagepane, mgd.gxd_isresultstructure, mgd.gxd_probeprep, mgd.prb_probe, mgd.img_imagepane_assoc, mgd.mgi_note, mgd.map_coord_collection, mgd.map_coord_feature, mgd.map_coordinate, mgd.mrk_chromosome, mgd.mgi_dbinfo, mgd.mgi_keyvalue, mgd.mgi_notetype, mgd.mgi_organism_mgitype, mgd.mgi_property, mgd.mgi_propertytype, mgd.mgi_relationship_category, mgd.mgi_relationship_property, mgd.mgi_set, mgd.mgi_setmember, mgd.mgi_setmember_emapa, mgd.mgi_translation, mgd.mgi_translationtype, mgd.voc_vocab, mgd.mld_assay_types, mgd.mld_concordance, mgd.mld_contig, mgd.mld_contigprobe, mgd.mld_expt_marker, mgd.mld_expt_notes, mgd.mld_fish, mgd.mld_fish_region, mgd.mld_hit, mgd.mld_hybrid, mgd.mld_insitu, mgd.mld_isregion, mgd.mld_matrix, mgd.mld_mc2point, mgd.mld_mcdatalist, mgd.mld_ri, mgd.mld_ri2point, mgd.mld_ridata, mgd.mld_statistics, mgd.mrk_types, mgd.mrk_biotypemapping, mgd.mrk_cluster, mgd.mrk_clustermember, mgd.mrk_current, mgd.mrk_history, mgd.mrk_label, mgd.mrk_location_cache, mgd.mrk_status, mgd.mrk_mcv_cache, mgd.mrk_mcv_count_cache, mgd.mrk_notes, mgd.prb_alias, mgd.prb_allele, mgd.prb_allele_strain, mgd.prb_marker, mgd.prb_notes, mgd.prb_ref_notes, mgd.prb_rflv, mgd.prb_tissue, mgd.prb_strain_genotype, mgd.prb_strain_marker, mgd.ri_riset, mgd.ri_summary, mgd.ri_summary_expt_ref, mgd.seq_allele_assoc, mgd.seq_coord_cache, mgd.seq_genemodel, mgd.seq_genetrap, mgd.seq_marker_cache, mgd.seq_probe_cache, mgd.seq_sequence, mgd.seq_sequence_assoc, mgd.seq_sequence_raw, mgd.seq_source_assoc, mgd.voc_allele_cache, mgd.voc_annot_count_cache, mgd.voc_evidence_property, mgd.voc_marker_cache, mgd.voc_term_emapa, mgd.voc_term_emaps, mgd.wks_rosetta" }, { @@ -36,7 +35,7 @@ { "ObjectType": "FUNCTION", "TotalCount": 139, - "InvalidCount": 0, + "InvalidCount": 17, "ObjectNames": "mgd.acc_accession_delete, mgd.acc_assignj, mgd.acc_assignmgi, mgd.acc_delete_byacckey, mgd.acc_insert, mgd.acc_setmax, mgd.acc_split, mgd.acc_update, mgd.accref_insert, mgd.accref_process, mgd.all_allele_delete, mgd.all_allele_insert, mgd.all_allele_update, mgd.all_cellline_delete, mgd.all_cellline_update1, mgd.all_cellline_update2, mgd.all_convertallele, mgd.all_createwildtype, mgd.all_insertallele, mgd.all_mergeallele, mgd.all_mergewildtypes, mgd.all_reloadlabel, mgd.all_variant_delete, mgd.bib_keepwfrelevance, mgd.bib_refs_delete, mgd.bib_refs_insert, mgd.bib_reloadcache, mgd.bib_updatewfstatusap, mgd.bib_updatewfstatusgo, mgd.bib_updatewfstatusgxd, mgd.bib_updatewfstatusqtl, mgd.gxd_addcelltypeset, mgd.gxd_addemapaset, mgd.gxd_addgenotypeset, mgd.gxd_allelepair_insert, mgd.gxd_antibody_delete, mgd.gxd_antibody_insert, mgd.gxd_antigen_delete, mgd.gxd_antigen_insert, mgd.gxd_assay_delete, mgd.gxd_assay_insert, mgd.gxd_checkduplicategenotype, mgd.gxd_gelrow_insert, mgd.gxd_genotype_delete, mgd.gxd_genotype_insert, mgd.gxd_getgenotypesdatasets, mgd.gxd_getgenotypesdatasetscount, mgd.gxd_htexperiment_delete, mgd.gxd_htrawsample_delete, mgd.gxd_htsample_ageminmax, mgd.gxd_index_insert, mgd.gxd_index_insert_before, mgd.gxd_index_update, mgd.gxd_orderallelepairs, mgd.gxd_ordergenotypes, mgd.gxd_ordergenotypesall, mgd.gxd_ordergenotypesmissing, mgd.gxd_removebadgelband, mgd.gxd_replacegenotype, mgd.img_image_delete, mgd.img_image_insert, mgd.img_setpdo, mgd.mgi_addsetmember, mgd.mgi_checkemapaclipboard, mgd.mgi_cleannote, mgd.mgi_deleteemapaclipboarddups, mgd.mgi_insertreferenceassoc, mgd.mgi_insertsynonym, mgd.mgi_mergerelationship, mgd.mgi_organism_delete, mgd.mgi_organism_insert, mgd.mgi_processnote, mgd.mgi_reference_assoc_delete, mgd.mgi_reference_assoc_insert, mgd.mgi_relationship_delete, mgd.mgi_resetageminmax, mgd.mgi_resetsequencenum, mgd.mgi_setmember_emapa_insert, mgd.mgi_setmember_emapa_update, mgd.mgi_statistic_delete, mgd.mgi_updatereferenceassoc, mgd.mgi_updatesetmember, mgd.mld_expt_marker_update, mgd.mld_expts_delete, mgd.mld_expts_insert, mgd.mrk_allelewithdrawal, mgd.mrk_cluster_delete, mgd.mrk_copyhistory, mgd.mrk_deletewithdrawal, mgd.mrk_inserthistory, mgd.mrk_marker_delete, mgd.mrk_marker_insert, mgd.mrk_marker_update, mgd.mrk_mergewithdrawal, mgd.mrk_reloadlocation, mgd.mrk_reloadreference, mgd.mrk_simplewithdrawal, mgd.mrk_strainmarker_delete, mgd.mrk_updatekeys, mgd.prb_ageminmax, mgd.prb_getstrainbyreference, mgd.prb_getstraindatasets, mgd.prb_getstrainreferences, mgd.prb_insertreference, mgd.prb_marker_insert, mgd.prb_marker_update, mgd.prb_mergestrain, mgd.prb_probe_delete, mgd.prb_probe_insert, mgd.prb_processanonymoussource, mgd.prb_processprobesource, mgd.prb_processseqloadersource, mgd.prb_processsequencesource, mgd.prb_reference_delete, mgd.prb_reference_update, mgd.prb_setstrainreview, mgd.prb_source_delete, mgd.prb_strain_delete, mgd.prb_strain_insert, mgd.seq_deletebycreatedby, mgd.seq_deletedummy, mgd.seq_deleteobsoletedummy, mgd.seq_merge, mgd.seq_sequence_delete, mgd.seq_split, mgd.voc_annot_insert, mgd.voc_copyannotevidencenotes, mgd.voc_evidence_delete, mgd.voc_evidence_insert, mgd.voc_evidence_property_delete, mgd.voc_evidence_update, mgd.voc_mergeannotations, mgd.voc_mergedupannotations, mgd.voc_mergeterms, mgd.voc_processannotheader, mgd.voc_processannotheaderall, mgd.voc_processannotheadermissing, mgd.voc_resetterms, mgd.voc_term_delete" }, { @@ -63,7 +62,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -74,7 +74,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -85,7 +86,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -96,7 +98,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -107,7 +110,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -118,7 +122,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -129,7 +134,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -140,7 +146,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -151,7 +158,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -162,7 +170,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -173,7 +182,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -184,7 +194,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -195,7 +206,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -206,7 +218,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -217,7 +230,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -228,7 +242,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -239,7 +254,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -250,7 +266,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -261,7 +278,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -272,7 +290,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -283,7 +302,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -294,7 +314,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -305,7 +326,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -316,7 +338,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -327,7 +350,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -338,7 +362,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -349,7 +374,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -360,7 +386,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -371,7 +398,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -382,7 +410,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -393,7 +422,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -404,7 +434,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -415,7 +446,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -426,7 +458,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -437,7 +470,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -448,7 +482,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -459,7 +494,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -470,7 +506,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -481,7 +518,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -492,7 +530,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -503,7 +542,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -514,7 +554,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -525,7 +566,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -536,7 +578,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -547,7 +590,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -558,7 +602,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -569,7 +614,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -580,7 +626,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -591,7 +638,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -602,7 +650,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -613,7 +662,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -624,7 +674,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -635,7 +686,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -646,7 +698,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -657,7 +710,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -668,7 +722,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -679,7 +734,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -690,7 +746,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -701,7 +758,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -712,7 +770,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -723,7 +782,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -734,7 +794,548 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove it from the exported schema.", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1124", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.acc_assignmgi", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "acc_accession.accid%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.acc_assignmgi", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "acc_accession.accid%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.acc_insert", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "acc_accession.prefixPart%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.acc_update", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "acc_accession.prefixPart%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.acc_update", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "acc_accession.prefixPart%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.all_createwildtype", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "all_allele.symbol%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.all_insertallele", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "all_allele.isextinct%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.all_insertallele", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "all_allele.ismixed%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.all_reloadlabel", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "all_label.label%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.all_reloadlabel", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "all_label.labelType%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.all_reloadlabel", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "all_label.labelTypeName%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.img_setpdo", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "acc_accession.accID%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_allelewithdrawal", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.symbol%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_allelewithdrawal", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.name%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_allelewithdrawal", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.symbol%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_copyhistory", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_history.name%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_copyhistory", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_history.event_date%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_deletewithdrawal", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.name%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.symbol%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.name%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.symbol%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.chromosome%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.name%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.cytogeneticOffset%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.cmOffset%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_mergewithdrawal", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "all_allele.symbol%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.chromosome%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.cytogeneticOffset%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.cmoffset%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "seq_coord_cache.startCoordinate%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "seq_coord_cache.endCoordinate%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "seq_coord_cache.strand%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "voc_term.term%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "map_coord_collection.abbreviation%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "seq_coord_cache.version%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_reloadlocation", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "seq_coord_cache.chromosome%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_simplewithdrawal", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.name%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mrk_simplewithdrawal", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "mrk_marker.symbol%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.prb_ageminmax", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "prb_source.age%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.prb_ageminmax", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "prb_source.age%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.prb_mergestrain", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "acc_accession.accID%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.prb_mergestrain", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "acc_accession.accID%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.seq_split", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "acc_accession.accID%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.seq_split", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "acc_accession.accID%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null + }, + { + "IssueType": "unsupported_plpgsql_objects", + "ObjectType": "FUNCTION", + "ObjectName": "mgd.mgi_resetageminmax", + "Reason": "Referenced type declaration of variables", + "SqlStatement": "prb_source.age%TYPE", + "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/mgi/export-dir/schema/functions/function.sql", + "Suggestion": "Fix the syntax to include the actual type name instead of referencing the type of a column", + "GH": "https://github.com/yugabyte/yugabyte-db/issues/23619", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", + "MinimumVersionsFixedIn": null } ] } diff --git a/migtests/tests/pg/omnibus/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/omnibus/expected_files/expectedAssessmentReport.json index b82d09d963..6bc3e3653b 100755 --- a/migtests/tests/pg/omnibus/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/omnibus/expected_files/expectedAssessmentReport.json @@ -1,6 +1,6 @@ { "VoyagerVersion": "IGNORED", - "MigrationComplexity": "MEDIUM", + "MigrationComplexity": "HIGH", "SchemaSummary": { "Description": "Objects that will be created on the target YugabyteDB.", "DbName": "test_omnibus", @@ -501,20 +501,9 @@ "SqlStatement": "CREATE INDEX hidx ON extension_example.testhstore USING gist (h extension_example.gist_hstore_ops (siglen='32'));" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported" - }, - { - "FeatureName": "BRIN indexes", - "Objects": [] - }, - { - "FeatureName": "SPGIST indexes", - "Objects": [] - }, - { - "FeatureName": "Constraint triggers", - "Objects": [] - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Inherited tables", "Objects": [ @@ -535,16 +524,9 @@ "SqlStatement": "CREATE TABLE regress_rls_schema.t3_3 (\n id integer NOT NULL,\n c text,\n b text,\n a integer\n)\nINHERITS (regress_rls_schema.t1_3);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" - }, - { - "FeatureName": "REFERENCING clause for triggers", - "Objects": [] - }, - { - "FeatureName": "BEFORE ROW triggers on Partitioned tables", - "Objects": [] - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Tables with stored generated columns", "Objects": [ @@ -557,8 +539,9 @@ "SqlStatement": "CREATE TABLE enum_example.bugs (\n id integer NOT NULL,\n description text,\n status enum_example.bug_status,\n _status enum_example.bug_status GENERATED ALWAYS AS (status) STORED,\n severity enum_example.bug_severity,\n _severity enum_example.bug_severity GENERATED ALWAYS AS (severity) STORED,\n info enum_example.bug_info GENERATED ALWAYS AS (enum_example.make_bug_info(status, severity)) STORED\n);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#generated-always-as-stored-type-column-is-not-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#generated-always-as-stored-type-column-is-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Conversion objects", "Objects": [ @@ -567,64 +550,39 @@ "SqlStatement": "CREATE CONVERSION conversion_example.myconv FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8;" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#create-or-alter-conversion-is-not-supported" - }, - { - "FeatureName": "Gin indexes on multi-columns", - "Objects": [] - }, - { - "FeatureName": "Setting attribute=value on column", - "Objects": [] - }, - { - "FeatureName": "Disabling rule on table", - "Objects": [] - }, - { - "FeatureName": "Clustering table on index", - "Objects": [] - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#create-or-alter-conversion-is-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Storage parameters in DDLs", "Objects": [ { - "ObjectName": "gin_idx", - "SqlStatement": "CREATE INDEX gin_idx ON idx_ex.films USING gin (to_tsvector('english'::regconfig, title)) WITH (fastupdate=off); " + "ObjectName": "gin_idx ON idx_ex.films", + "SqlStatement": "CREATE INDEX gin_idx ON idx_ex.films USING gin (to_tsvector('english'::regconfig, title)) WITH (fastupdate=off);" }, { - "ObjectName": "title_idx", - "SqlStatement": "CREATE UNIQUE INDEX title_idx ON idx_ex.films USING btree (title) WITH (fillfactor='70'); " + "ObjectName": "title_idx ON idx_ex.films", + "SqlStatement": "CREATE UNIQUE INDEX title_idx ON idx_ex.films USING btree (title) WITH (fillfactor='70');" }, { - "ObjectName": "title_idx_with_duplicates", - "SqlStatement": "CREATE INDEX title_idx_with_duplicates ON idx_ex.films USING btree (title) WITH (deduplicate_items=off); " + "ObjectName": "title_idx_with_duplicates ON idx_ex.films", + "SqlStatement": "CREATE INDEX title_idx_with_duplicates ON idx_ex.films USING btree (title) WITH (deduplicate_items=off);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" - }, - { - "FeatureName": "Extensions", - "Objects": [] - }, - { - "FeatureName": "Exclusion constraints", - "Objects": [] - }, - { - "FeatureName": "Deferrable constraints", - "Objects": [] - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "View with check option", "Objects": [ { "ObjectName": "regress_rls_schema.bv1", - "SqlStatement": "CREATE VIEW regress_rls_schema.bv1 WITH (security_barrier='true') AS\n SELECT b1.a,\n b1.b\n FROM regress_rls_schema.b1\n WHERE (b1.a \u003e 0)\n WITH CASCADED CHECK OPTION;" + "SqlStatement": "CREATE VIEW regress_rls_schema.bv1 WITH (security_barrier='true') AS\n SELECT a,\n b\n FROM regress_rls_schema.b1\n WHERE (a \u003e 0)\n WITH CASCADED CHECK OPTION;" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#view-with-check-option-is-not-supported" - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#view-with-check-option-is-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Index on complex datatypes", "Objects": [ @@ -633,12 +591,9 @@ "SqlStatement": "CREATE INDEX idx_1 ON composite_type_examples.ordinary_table USING btree (basic_);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" - }, - { - "FeatureName": "Unlogged tables", - "Objects": [] - } + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null + } ], "UnsupportedFeaturesDesc": "Features of the source database that are not supported on the target YugabyteDB.", "TableIndexStats": [ @@ -5503,22 +5458,18 @@ ], "Notes": null, "MigrationCaveats": [ - { - "FeatureName": "Alter partitioned tables to add Primary Key", - "Objects": [], - "FeatureDescription": "After export schema, the ALTER table should be merged with CREATE table for partitioned tables as alter of partitioned tables to add primary key is not supported." - }, { "FeatureName": "Foreign tables", "Objects": [ { "ObjectName": "foreign_db_example.technically_doesnt_exist", - "SqlStatement": "CREATE FOREIGN TABLE foreign_db_example.technically_doesnt_exist ( id integer, uses_type foreign_db_example.example_type, _uses_type foreign_db_example.example_type GENERATED ALWAYS AS (uses_type) STORED, positive_number foreign_db_example.positive_number, _positive_number foreign_db_example.positive_number GENERATED ALWAYS AS (positive_number) STORED, CONSTRAINT imaginary_table_id_gt_1 CHECK ((id \u003e 1)) ) SERVER technically_this_server; " + "SqlStatement": "CREATE FOREIGN TABLE foreign_db_example.technically_doesnt_exist ( id integer, uses_type foreign_db_example.example_type, _uses_type foreign_db_example.example_type GENERATED ALWAYS AS (uses_type) STORED, positive_number foreign_db_example.positive_number, _positive_number foreign_db_example.positive_number GENERATED ALWAYS AS (positive_number) STORED, CONSTRAINT imaginary_table_id_gt_1 CHECK ((id \u003e 1)) ) SERVER technically_this_server;" } ], "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#foreign-table-in-the-source-database-requires-server-and-user-mapping", - "FeatureDescription": "During the export schema phase, SERVER and USER MAPPING objects are not exported. These should be manually created to make the foreign tables work." - }, + "FeatureDescription": "During the export schema phase, SERVER and USER MAPPING objects are not exported. These should be manually created to make the foreign tables work.", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Policies", "Objects": [ @@ -5576,8 +5527,9 @@ } ], "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", - "FeatureDescription": "There are some policies that are created for certain users/roles. During the export schema phase, USERs and GRANTs are not exported. Therefore, they will have to be manually created before running import schema." - }, + "FeatureDescription": "There are some policies that are created for certain users/roles. During the export schema phase, USERs and GRANTs are not exported. Therefore, they will have to be manually created before running import schema.", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Unsupported Data Types for Live Migration", "Objects": [ @@ -5615,8 +5567,9 @@ } ], "DocsLink":"https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", - "FeatureDescription": "There are some data types in the schema that are not supported by live migration of data. These columns will be excluded when exporting and importing data in live migration workflows." - }, + "FeatureDescription": "There are some data types in the schema that are not supported by live migration of data. These columns will be excluded when exporting and importing data in live migration workflows.", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Unsupported Data Types for Live Migration with Fall-forward/Fallback", "Objects": [ @@ -5694,8 +5647,10 @@ } ], "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", - "FeatureDescription": "There are some data types in the schema that are not supported by live migration with fall-forward/fall-back. These columns will be excluded when exporting and importing data in live migration workflows." + "FeatureDescription": "There are some data types in the schema that are not supported by live migration with fall-forward/fall-back. These columns will be excluded when exporting and importing data in live migration workflows.", + "MinimumVersionsFixedIn": null } ], - "UnsupportedQueryConstructs": null + "UnsupportedQueryConstructs": null, + "UnsupportedPlPgSqlObjects": null } diff --git a/migtests/tests/pg/omnibus/expected_files/expected_failed.sql b/migtests/tests/pg/omnibus/expected_files/expected_failed.sql index cc663a37c2..ad72d68083 100755 --- a/migtests/tests/pg/omnibus/expected_files/expected_failed.sql +++ b/migtests/tests/pg/omnibus/expected_files/expected_failed.sql @@ -105,10 +105,10 @@ ERROR: VIEW WITH CASCADED CHECK OPTION not supported yet (SQLSTATE 0A000) File :/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/views/view.sql */ CREATE VIEW regress_rls_schema.bv1 WITH (security_barrier='true') AS - SELECT b1.a, - b1.b + SELECT a, + b FROM regress_rls_schema.b1 - WHERE (b1.a > 0) + WHERE (a > 0) WITH CASCADED CHECK OPTION; /* @@ -223,10 +223,10 @@ ERROR: relation "composite_type_examples.ordinary_table" does not exist (SQLSTAT File :/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/views/view.sql */ CREATE VIEW composite_type_examples.basic_view AS - SELECT ordinary_table.basic_, - ordinary_table._basic, - ordinary_table.nested, - ordinary_table._nested + SELECT basic_, + _basic, + nested, + _nested FROM composite_type_examples.ordinary_table; /* @@ -234,8 +234,8 @@ ERROR: relation "enum_example.bugs" does not exist (SQLSTATE 42P01) File :/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/views/view.sql */ CREATE VIEW enum_example._bugs AS - SELECT bugs.id, - bugs.status + SELECT id, + status FROM enum_example.bugs; /* @@ -243,11 +243,11 @@ ERROR: relation "foreign_db_example.technically_doesnt_exist" does not exist (SQ File :/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/views/view.sql */ CREATE VIEW public.foreign_db_example AS - SELECT technically_doesnt_exist.id, - technically_doesnt_exist.uses_type, - technically_doesnt_exist._uses_type, - technically_doesnt_exist.positive_number, - technically_doesnt_exist._positive_number + SELECT id, + uses_type, + _uses_type, + positive_number, + _positive_number FROM foreign_db_example.technically_doesnt_exist; /* @@ -255,7 +255,7 @@ ERROR: relation "range_type_example.example_tbl" does not exist (SQLSTATE 42P01) File :/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/views/view.sql */ CREATE VIEW range_type_example.depends_on_col_using_type AS - SELECT example_tbl.col + SELECT col FROM range_type_example.example_tbl; /* diff --git a/migtests/tests/pg/omnibus/expected_files/expected_schema_analysis_report.json b/migtests/tests/pg/omnibus/expected_files/expected_schema_analysis_report.json index 5e7ce60568..bde78722cb 100755 --- a/migtests/tests/pg/omnibus/expected_files/expected_schema_analysis_report.json +++ b/migtests/tests/pg/omnibus/expected_files/expected_schema_analysis_report.json @@ -1,6 +1,5 @@ { "VoyagerVersion": "IGNORED", - "MigrationComplexity": "MEDIUM", "Summary": { "Description": "Objects that will be created on the target YugabyteDB.", "DbName": "test_omnibus", @@ -150,7 +149,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -161,7 +161,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -172,7 +173,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -183,7 +185,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -194,7 +197,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "Using Triggers to update the generated columns is one way to work around this issue, refer docs link for more details.", "GH": "https://github.com/yugabyte/yugabyte-db/issues/10695", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#generated-always-as-stored-type-column-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#generated-always-as-stored-type-column-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -205,7 +209,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "Using Triggers to update the generated columns is one way to work around this issue, refer docs link for more details.", "GH": "https://github.com/yugabyte/yugabyte-db/issues/10695", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#generated-always-as-stored-type-column-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#generated-always-as-stored-type-column-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -216,7 +221,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1337", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -226,30 +232,33 @@ "SqlStatement": "CREATE INDEX idx_1 ON composite_type_examples.ordinary_table USING btree (basic_);", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Refer to the docs link for the workaround", - "GH": "https://github.com/yugabyte/yugabyte-db/issues/9698", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported" + "GH": "https://github.com/yugabyte/yugabyte-db/issues/25003", + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "gin_idx", + "ObjectName": "gin_idx ON idx_ex.films", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX gin_idx ON idx_ex.films USING gin (to_tsvector('english'::regconfig, title)) WITH (fastupdate=off); ", + "SqlStatement": "CREATE INDEX gin_idx ON idx_ex.films USING gin (to_tsvector('english'::regconfig, title)) WITH (fastupdate=off);", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "title_idx", + "ObjectName": "title_idx ON idx_ex.films", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE UNIQUE INDEX title_idx ON idx_ex.films USING btree (title) WITH (fillfactor='70'); ", + "SqlStatement": "CREATE UNIQUE INDEX title_idx ON idx_ex.films USING btree (title) WITH (fillfactor='70');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -260,7 +269,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -271,7 +281,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -282,7 +293,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -293,7 +305,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -304,29 +317,32 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "title_idx_with_duplicates", + "ObjectName": "title_idx_with_duplicates ON idx_ex.films", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX title_idx_with_duplicates ON idx_ex.films USING btree (title) WITH (deduplicate_items=off); ", + "SqlStatement": "CREATE INDEX title_idx_with_duplicates ON idx_ex.films USING btree (title) WITH (deduplicate_items=off);", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "VIEW", "ObjectName": "regress_rls_schema.bv1", "Reason": "Schema containing VIEW WITH CHECK OPTION is not supported yet.", - "SqlStatement": "CREATE VIEW regress_rls_schema.bv1 WITH (security_barrier='true') AS\n SELECT b1.a,\n b1.b\n FROM regress_rls_schema.b1\n WHERE (b1.a \u003e 0)\n WITH CASCADED CHECK OPTION;", + "SqlStatement": "CREATE VIEW regress_rls_schema.bv1 WITH (security_barrier='true') AS\n SELECT a,\n b\n FROM regress_rls_schema.b1\n WHERE (a \u003e 0)\n WITH CASCADED CHECK OPTION;", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/views/view.sql", "Suggestion": "Use Trigger with INSTEAD OF clause on INSERT/UPDATE on view to get this functionality", "GH": "https://github.com/yugabyte/yugabyte-db/issues/22716", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#view-with-check-option-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#view-with-check-option-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -337,18 +353,20 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/conversions/conversion.sql", "Suggestion": "Remove it from the exported schema", "GH": "https://github.com/yugabyte/yugabyte-db/issues/10866", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#create-or-alter-conversion-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#create-or-alter-conversion-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", "ObjectType": "FOREIGN TABLE", "ObjectName": "foreign_db_example.technically_doesnt_exist", "Reason": "Foreign tables require manual intervention.", - "SqlStatement": "CREATE FOREIGN TABLE foreign_db_example.technically_doesnt_exist ( id integer, uses_type foreign_db_example.example_type, _uses_type foreign_db_example.example_type GENERATED ALWAYS AS (uses_type) STORED, positive_number foreign_db_example.positive_number, _positive_number foreign_db_example.positive_number GENERATED ALWAYS AS (positive_number) STORED, CONSTRAINT imaginary_table_id_gt_1 CHECK ((id \u003e 1)) ) SERVER technically_this_server; ", + "SqlStatement": "CREATE FOREIGN TABLE foreign_db_example.technically_doesnt_exist ( id integer, uses_type foreign_db_example.example_type, _uses_type foreign_db_example.example_type GENERATED ALWAYS AS (uses_type) STORED, positive_number foreign_db_example.positive_number, _positive_number foreign_db_example.positive_number GENERATED ALWAYS AS (positive_number) STORED, CONSTRAINT imaginary_table_id_gt_1 CHECK ((id \u003e 1)) ) SERVER technically_this_server;", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/foreign_table.sql", "Suggestion": "SERVER 'technically_this_server', and USER MAPPING should be created manually on the target to create and use the foreign table", "GH": "https://github.com/yugabyte/yb-voyager/issues/1627", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#foreign-table-in-the-source-database-requires-server-and-user-mapping" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#foreign-table-in-the-source-database-requires-server-and-user-mapping", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -359,7 +377,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/policies/policy.sql", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -370,7 +389,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/policies/policy.sql", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -381,7 +401,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/policies/policy.sql", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -392,7 +413,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/policies/policy.sql", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -403,7 +425,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -414,7 +437,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -425,7 +449,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -436,7 +461,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -447,7 +473,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -458,7 +485,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -469,7 +497,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -480,7 +509,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -491,7 +521,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/policies/policy.sql", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -502,7 +533,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/policies/policy.sql", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -513,7 +545,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/policies/policy.sql", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -524,7 +557,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/policies/policy.sql", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -535,7 +569,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/policies/policy.sql", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -546,7 +581,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/policies/policy.sql", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -557,7 +593,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/policies/policy.sql", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -568,7 +605,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/policies/policy.sql", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -579,7 +617,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/omnibus/export-dir/schema/policies/policy.sql", "Suggestion": "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", "GH": "https://github.com/yugabyte/yb-voyager/issues/1655", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", + "MinimumVersionsFixedIn": null } ] } diff --git a/migtests/tests/pg/osm/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/osm/expected_files/expectedAssessmentReport.json index 8ee648b9c1..45fb806f22 100755 --- a/migtests/tests/pg/osm/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/osm/expected_files/expectedAssessmentReport.json @@ -1,6 +1,6 @@ { "VoyagerVersion": "IGNORED", - "MigrationComplexity": "LOW", + "MigrationComplexity": "MEDIUM", "SchemaSummary": { "Description": "Objects that will be created on the target YugabyteDB.", "DbName": "test_osm", @@ -81,60 +81,9 @@ "SqlStatement": "CREATE INDEX changeset_geom_gist ON public.osm_changeset USING gist (geom);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported" - }, - { - "FeatureName": "BRIN indexes", - "Objects": [] - }, - { - "FeatureName": "SPGIST indexes", - "Objects": [] - }, - { - "FeatureName": "REFERENCING clause for triggers", - "Objects": [] - }, - { - "FeatureName": "BEFORE ROW triggers on Partitioned tables", - "Objects": [] - }, - { - "FeatureName": "Constraint triggers", - "Objects": [] - }, - { - "FeatureName": "Inherited tables", - "Objects": [] - }, - { - "FeatureName": "Tables with stored generated columns", - "Objects": [] - }, - { - "FeatureName": "Conversion objects", - "Objects": [] - }, - { - "FeatureName": "Gin indexes on multi-columns", - "Objects": [] - }, - { - "FeatureName": "Setting attribute=value on column", - "Objects": [] - }, - { - "FeatureName": "Disabling rule on table", - "Objects": [] - }, - { - "FeatureName": "Clustering table on index", - "Objects": [] - }, - { - "FeatureName": "Storage parameters in DDLs", - "Objects": [] - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Extensions", "Objects": [ @@ -143,28 +92,9 @@ "SqlStatement": "CREATE EXTENSION IF NOT EXISTS postgis WITH SCHEMA public;" } ], - "DocsLink": "https://docs.yugabyte.com/preview/explore/ysql-language-features/pg-extensions/" - }, - { - "FeatureName": "Exclusion constraints", - "Objects": [] - }, - { - "FeatureName": "Deferrable constraints", - "Objects": [] - }, - { - "FeatureName": "View with check option", - "Objects": [] - }, - { - "FeatureName": "Index on complex datatypes", - "Objects": [] - }, - { - "FeatureName": "Unlogged tables", - "Objects": [] - } + "DocsLink": "https://docs.yugabyte.com/preview/explore/ysql-language-features/pg-extensions/", + "MinimumVersionsFixedIn": null + } ], "UnsupportedFeaturesDesc": "Features of the source database that are not supported on the target YugabyteDB.", "TableIndexStats": [ @@ -296,22 +226,7 @@ } ], "Notes": null, - "MigrationCaveats": [ - { - "FeatureName": "Alter partitioned tables to add Primary Key", - "Objects": [], - "FeatureDescription": "After export schema, the ALTER table should be merged with CREATE table for partitioned tables as alter of partitioned tables to add primary key is not supported." - }, - { - "FeatureName": "Foreign tables", - "Objects": [], - "FeatureDescription": "During the export schema phase, SERVER and USER MAPPING objects are not exported. These should be manually created to make the foreign tables work." - }, - { - "FeatureName": "Policies", - "Objects": [], - "FeatureDescription": "There are some policies that are created for certain users/roles. During the export schema phase, USERs and GRANTs are not exported. Therefore, they will have to be manually created before running import schema." - } - ], - "UnsupportedQueryConstructs": null + "MigrationCaveats": null, + "UnsupportedQueryConstructs": null, + "UnsupportedPlPgSqlObjects": null } diff --git a/migtests/tests/pg/osm/expected_files/expected_schema_analysis_report.json b/migtests/tests/pg/osm/expected_files/expected_schema_analysis_report.json index b353e463aa..8e78f4cbb8 100755 --- a/migtests/tests/pg/osm/expected_files/expected_schema_analysis_report.json +++ b/migtests/tests/pg/osm/expected_files/expected_schema_analysis_report.json @@ -1,6 +1,5 @@ { "VoyagerVersion": "IGNORED", - "MigrationComplexity": "LOW", "Summary": { "Description": "Objects that will be created on the target YugabyteDB.", "DbName": "test_osm", @@ -52,7 +51,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/osm/export-dir/schema/extensions/extension.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1538", - "DocsLink": "https://docs.yugabyte.com/preview/explore/ysql-language-features/pg-extensions/" + "DocsLink": "https://docs.yugabyte.com/preview/explore/ysql-language-features/pg-extensions/", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_datatypes", @@ -63,7 +63,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/osm/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yugabyte-db/issues/11323", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-yugabytedb" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-yugabytedb", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -74,7 +75,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/osm/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1337", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported", + "MinimumVersionsFixedIn": null } ] } diff --git a/migtests/tests/pg/partitions-with-indexes/env.sh b/migtests/tests/pg/partitions-with-indexes/env.sh index 6b22305323..7f216dc8da 100644 --- a/migtests/tests/pg/partitions-with-indexes/env.sh +++ b/migtests/tests/pg/partitions-with-indexes/env.sh @@ -1,3 +1,3 @@ export SOURCE_DB_TYPE="postgresql" -export SOURCE_DB_NAME=${SOURCE_DB_NAME:-"partitions"} +export SOURCE_DB_NAME=${SOURCE_DB_NAME:-"partitions_with_index"} export SOURCE_DB_SCHEMA="public,p1,p2" diff --git a/migtests/tests/pg/partitions/env.sh b/migtests/tests/pg/partitions/env.sh index 9ba7757697..bbca0e5017 100644 --- a/migtests/tests/pg/partitions/env.sh +++ b/migtests/tests/pg/partitions/env.sh @@ -1,4 +1,3 @@ export SOURCE_DB_TYPE="postgresql" -export SOURCE_DB_NAME=${SOURCE_DB_NAME:-"partitions"} export SOURCE_DB_SCHEMA="public,p1,p2" export MOVE_PK_FROM_ALTER_TO_CREATE="true" \ No newline at end of file diff --git a/migtests/tests/pg/partitions/export-data-status-report.json b/migtests/tests/pg/partitions/export-data-status-report.json new file mode 100644 index 0000000000..e9e23ee1ad --- /dev/null +++ b/migtests/tests/pg/partitions/export-data-status-report.json @@ -0,0 +1,37 @@ +[ + { + "table_name": "customers", + "status": "DONE", + "exported_count": 1000 + }, + { + "table_name": "sales", + "status": "DONE", + "exported_count": 1000 + }, + { + "table_name": "emp", + "status": "DONE", + "exported_count": 1000 + }, + { + "table_name": "p1.sales_region", + "status": "DONE", + "exported_count": 1000 + }, + { + "table_name": "range_columns_partition_test", + "status": "DONE", + "exported_count": 6 + }, + { + "table_name": "sales_region", + "status": "DONE", + "exported_count": 1000 + }, + { + "table_name": "test_partitions_sequences", + "status": "DONE", + "exported_count": 1000 + } +] \ No newline at end of file diff --git a/migtests/tests/pg/partitions/export-data-status-with-table-list-report.json b/migtests/tests/pg/partitions/export-data-status-with-table-list-report.json new file mode 100644 index 0000000000..e76c1483aa --- /dev/null +++ b/migtests/tests/pg/partitions/export-data-status-with-table-list-report.json @@ -0,0 +1,37 @@ +[ + { + "table_name": "customers (cust_other, cust_part11, cust_part12, cust_part21, cust_part22)", + "status": "DONE", + "exported_count": 1000 + }, + { + "table_name": "sales (sales_2019_q4, sales_2020_q1, sales_2020_q2)", + "status": "DONE", + "exported_count": 1000 + }, + { + "table_name": "emp (emp_0, emp_1, emp_2)", + "status": "DONE", + "exported_count": 1000 + }, + { + "table_name": "p1.sales_region (p2.boston, p2.london, p2.sydney)", + "status": "DONE", + "exported_count": 1000 + }, + { + "table_name": "range_columns_partition_test (range_columns_partition_test_p0, range_columns_partition_test_p1)", + "status": "DONE", + "exported_count": 6 + }, + { + "table_name": "sales_region (boston, london, sydney)", + "status": "DONE", + "exported_count": 1000 + }, + { + "table_name": "test_partitions_sequences (test_partitions_sequences_b, test_partitions_sequences_l, test_partitions_sequences_s)", + "status": "DONE", + "exported_count": 1000 + } +] \ No newline at end of file diff --git a/migtests/tests/pg/partitions/fix-schema b/migtests/tests/pg/partitions/fix-schema index 57007db3f5..8a341cc602 100755 --- a/migtests/tests/pg/partitions/fix-schema +++ b/migtests/tests/pg/partitions/fix-schema @@ -3,15 +3,15 @@ set -e set -x -sed -i 's/p2\.boston/p2.boston_region/g' $TEST_DIR/export-dir/schema/tables/table.sql -sed -i 's/p2\.london/p2.london_region/g' $TEST_DIR/export-dir/schema/tables/table.sql -sed -i 's/p2\.sydney/p2.sydney_region/g' $TEST_DIR/export-dir/schema/tables/table.sql +sed -i 's/p2\.boston/p2.boston_region/g' ${EXPORT_DIR}/schema/tables/table.sql +sed -i 's/p2\.london/p2.london_region/g' ${EXPORT_DIR}/schema/tables/table.sql +sed -i 's/p2\.sydney/p2.sydney_region/g' ${EXPORT_DIR}/schema/tables/table.sql -sed -i 's/p2\.boston/p2.boston_region/g' $TEST_DIR/export-dir/schema/tables/INDEXES_table.sql -sed -i 's/p2\.london/p2.london_region/g' $TEST_DIR/export-dir/schema/tables/INDEXES_table.sql -sed -i 's/p2\.sydney/p2.sydney_region/g' $TEST_DIR/export-dir/schema/tables/INDEXES_table.sql +sed -i 's/p2\.boston/p2.boston_region/g' ${EXPORT_DIR}/schema/tables/INDEXES_table.sql +sed -i 's/p2\.london/p2.london_region/g' ${EXPORT_DIR}/schema/tables/INDEXES_table.sql +sed -i 's/p2\.sydney/p2.sydney_region/g' ${EXPORT_DIR}/schema/tables/INDEXES_table.sql # Added so that the validations work(to check the recommendations in target YB applied or not) -sed -i 's/p2\.boston/p2.boston_region/g' $TEST_DIR/export-dir/assessment/reports/migration_assessment_report.json -sed -i 's/p2\.london/p2.london_region/g' $TEST_DIR/export-dir/assessment/reports/migration_assessment_report.json -sed -i 's/p2\.sydney/p2.sydney_region/g' $TEST_DIR/export-dir/assessment/reports/migration_assessment_report.json \ No newline at end of file +sed -i 's/p2\.boston/p2.boston_region/g' ${EXPORT_DIR}/assessment/reports/migration_assessment_report.json +sed -i 's/p2\.london/p2.london_region/g' ${EXPORT_DIR}/assessment/reports/migration_assessment_report.json +sed -i 's/p2\.sydney/p2.sydney_region/g' ${EXPORT_DIR}/assessment/reports/migration_assessment_report.json \ No newline at end of file diff --git a/migtests/tests/pg/pgtbrus/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/pgtbrus/expected_files/expectedAssessmentReport.json index f14bd0b852..3ebfc9a508 100755 --- a/migtests/tests/pg/pgtbrus/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/pgtbrus/expected_files/expectedAssessmentReport.json @@ -79,88 +79,7 @@ }, "UnsupportedDataTypes": null, "UnsupportedDataTypesDesc": "Data types of the source database that are not supported on the target YugabyteDB.", - "UnsupportedFeatures": [ - { - "FeatureName": "GIST indexes", - "Objects": [] - }, - { - "FeatureName": "BRIN indexes", - "Objects": [] - }, - { - "FeatureName": "SPGIST indexes", - "Objects": [] - }, - { - "FeatureName": "Constraint triggers", - "Objects": [] - }, - { - "FeatureName": "Inherited tables", - "Objects": [] - }, - { - "FeatureName": "REFERENCING clause for triggers", - "Objects": [] - }, - { - "FeatureName": "BEFORE ROW triggers on Partitioned tables", - "Objects": [] - }, - { - "FeatureName": "Tables with stored generated columns", - "Objects": [] - }, - { - "FeatureName": "Conversion objects", - "Objects": [] - }, - { - "FeatureName": "Gin indexes on multi-columns", - "Objects": [] - }, - { - "FeatureName": "Index on complex datatypes", - "Objects": [] - }, - { - "FeatureName": "Unlogged tables", - "Objects": [] - }, - { - "FeatureName": "Setting attribute=value on column", - "Objects": [] - }, - { - "FeatureName": "Disabling rule on table", - "Objects": [] - }, - { - "FeatureName": "Clustering table on index", - "Objects": [] - }, - { - "FeatureName": "Storage parameters in DDLs", - "Objects": [] - }, - { - "FeatureName": "Extensions", - "Objects": [] - }, - { - "FeatureName": "Exclusion constraints", - "Objects": [] - }, - { - "FeatureName": "Deferrable constraints", - "Objects": [] - }, - { - "FeatureName": "View with check option", - "Objects": [] - } - ], + "UnsupportedFeatures": null, "UnsupportedFeaturesDesc": "Features of the source database that are not supported on the target YugabyteDB.", "TableIndexStats": [ { @@ -194,31 +113,22 @@ ], "Notes": null, "MigrationCaveats": [ - { - "FeatureName": "Alter partitioned tables to add Primary Key", - "Objects": [], - "FeatureDescription": "After export schema, the ALTER table should be merged with CREATE table for partitioned tables as alter of partitioned tables to add primary key is not supported." - }, { "FeatureName": "Foreign tables", "Objects": [ { "ObjectName": "public.f_c", - "SqlStatement": "CREATE FOREIGN TABLE public.f_c ( i integer NOT NULL, t integer, x text ) SERVER p10 OPTIONS ( table_name 'c' ); " + "SqlStatement": "CREATE FOREIGN TABLE public.f_c ( i integer NOT NULL, t integer, x text ) SERVER p10 OPTIONS ( table_name 'c' );" }, { "ObjectName": "public.f_t", - "SqlStatement": "CREATE FOREIGN TABLE public.f_t ( i integer NOT NULL, ts timestamp(0) with time zone DEFAULT now(), j json, t text, e public.myenum, c public.mycomposit ) SERVER p10 OPTIONS ( table_name 't' ); " + "SqlStatement": "CREATE FOREIGN TABLE public.f_t ( i integer NOT NULL, ts timestamp(0) with time zone DEFAULT now(), j json, t text, e public.myenum, c public.mycomposit ) SERVER p10 OPTIONS ( table_name 't' );" } ], "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#foreign-table-in-the-source-database-requires-server-and-user-mapping", - "FeatureDescription": "During the export schema phase, SERVER and USER MAPPING objects are not exported. These should be manually created to make the foreign tables work." - }, - { - "FeatureName": "Policies", - "Objects": [], - "FeatureDescription": "There are some policies that are created for certain users/roles. During the export schema phase, USERs and GRANTs are not exported. Therefore, they will have to be manually created before running import schema." - }, + "FeatureDescription": "During the export schema phase, SERVER and USER MAPPING objects are not exported. These should be manually created to make the foreign tables work.", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Unsupported Data Types for Live Migration with Fall-forward/Fallback", "Objects": [ @@ -232,8 +142,10 @@ } ], "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", - "FeatureDescription": "There are some data types in the schema that are not supported by live migration with fall-forward/fall-back. These columns will be excluded when exporting and importing data in live migration workflows." + "FeatureDescription": "There are some data types in the schema that are not supported by live migration with fall-forward/fall-back. These columns will be excluded when exporting and importing data in live migration workflows.", + "MinimumVersionsFixedIn": null } ], - "UnsupportedQueryConstructs": null + "UnsupportedQueryConstructs": null, + "UnsupportedPlPgSqlObjects": null } diff --git a/migtests/tests/pg/pgtbrus/expected_files/expected_schema_analysis_report.json b/migtests/tests/pg/pgtbrus/expected_files/expected_schema_analysis_report.json index 6d85facc5c..f886d71f44 100755 --- a/migtests/tests/pg/pgtbrus/expected_files/expected_schema_analysis_report.json +++ b/migtests/tests/pg/pgtbrus/expected_files/expected_schema_analysis_report.json @@ -1,6 +1,5 @@ { "VoyagerVersion": "IGNORED", - "MigrationComplexity": "LOW", "Summary": { "Description": "Objects that will be created on the target YugabyteDB.", "DbName": "test_pgtbrus", @@ -65,22 +64,24 @@ "ObjectType": "FOREIGN TABLE", "ObjectName": "public.f_c", "Reason": "Foreign tables require manual intervention.", - "SqlStatement": "CREATE FOREIGN TABLE public.f_c ( i integer NOT NULL, t integer, x text ) SERVER p10 OPTIONS ( table_name 'c' ); ", + "SqlStatement": "CREATE FOREIGN TABLE public.f_c ( i integer NOT NULL, t integer, x text ) SERVER p10 OPTIONS ( table_name 'c' );", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/pgtbrus/export-dir/schema/tables/foreign_table.sql", "Suggestion": "SERVER 'p10', and USER MAPPING should be created manually on the target to create and use the foreign table", "GH": "https://github.com/yugabyte/yb-voyager/issues/1627", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#foreign-table-in-the-source-database-requires-server-and-user-mapping" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#foreign-table-in-the-source-database-requires-server-and-user-mapping", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", "ObjectType": "FOREIGN TABLE", "ObjectName": "public.f_t", "Reason": "Foreign tables require manual intervention.", - "SqlStatement": "CREATE FOREIGN TABLE public.f_t ( i integer NOT NULL, ts timestamp(0) with time zone DEFAULT now(), j json, t text, e public.myenum, c public.mycomposit ) SERVER p10 OPTIONS ( table_name 't' ); ", + "SqlStatement": "CREATE FOREIGN TABLE public.f_t ( i integer NOT NULL, ts timestamp(0) with time zone DEFAULT now(), j json, t text, e public.myenum, c public.mycomposit ) SERVER p10 OPTIONS ( table_name 't' );", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/pgtbrus/export-dir/schema/tables/foreign_table.sql", "Suggestion": "SERVER 'p10', and USER MAPPING should be created manually on the target to create and use the foreign table", "GH": "https://github.com/yugabyte/yb-voyager/issues/1627", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#foreign-table-in-the-source-database-requires-server-and-user-mapping" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#foreign-table-in-the-source-database-requires-server-and-user-mapping", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -91,7 +92,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/pgtbrus/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null } ] } diff --git a/migtests/tests/pg/rna/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/rna/expected_files/expectedAssessmentReport.json index 275de27adb..4294cd126b 100644 --- a/migtests/tests/pg/rna/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/rna/expected_files/expectedAssessmentReport.json @@ -377,22 +377,6 @@ "UnsupportedDataTypes": null, "UnsupportedDataTypesDesc": "Data types of the source database that are not supported on the target YugabyteDB.", "UnsupportedFeatures": [ - { - "FeatureName": "GIST indexes", - "Objects": [] - }, - { - "FeatureName": "BRIN indexes", - "Objects": [] - }, - { - "FeatureName": "SPGIST indexes", - "Objects": [] - }, - { - "FeatureName": "Constraint triggers", - "Objects": [] - }, { "FeatureName": "Inherited tables", "Objects": [ @@ -837,68 +821,9 @@ "SqlStatement": "CREATE TABLE rnacen.xref_p9_not_deleted (\n dbid smallint,\n created integer,\n last integer,\n upi character varying(26),\n version_i integer,\n deleted character(1),\n \"timestamp\" timestamp without time zone DEFAULT ('now'::text)::timestamp without time zone,\n userstamp character varying(20) DEFAULT 'USER'::character varying,\n ac character varying(300),\n version integer,\n taxid bigint,\n id bigint DEFAULT nextval('rnacen.xref_pk_seq'::regclass),\n CONSTRAINT \"ck_xref$deleted\" CHECK ((deleted = ANY (ARRAY['Y'::bpchar, 'N'::bpchar]))),\n CONSTRAINT xref_p9_not_deleted_check CHECK (((dbid = 9) AND (deleted = 'N'::bpchar)))\n)\nINHERITS (rnacen.xref);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" - }, - { - "FeatureName": "Tables with stored generated columns", - "Objects": [] - }, - { - "FeatureName": "Conversion objects", - "Objects": [] - }, - { - "FeatureName": "Gin indexes on multi-columns", - "Objects": [] - }, - { - "FeatureName": "REFERENCING clause for triggers", - "Objects": [] - }, - { - "FeatureName": "BEFORE ROW triggers on Partitioned tables", - "Objects": [] - }, - { - "FeatureName": "Setting attribute=value on column", - "Objects": [] - }, - { - "FeatureName": "Disabling rule on table", - "Objects": [] - }, - { - "FeatureName": "Clustering table on index", - "Objects": [] - }, - { - "FeatureName": "Storage parameters in DDLs", - "Objects": [] - }, - { - "FeatureName": "Extensions", - "Objects": [] - }, - { - "FeatureName": "Exclusion constraints", - "Objects": [] - }, - { - "FeatureName": "Deferrable constraints", - "Objects": [] - }, - { - "FeatureName": "View with check option", - "Objects": [] - }, - { - "FeatureName": "Index on complex datatypes", - "Objects": [] - }, - { - "FeatureName": "Unlogged tables", - "Objects": [] - } + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null + } ], "UnsupportedFeaturesDesc": "Features of the source database that are not supported on the target YugabyteDB.", "TableIndexStats": [ @@ -26580,22 +26505,7 @@ } ], "Notes": null, - "MigrationCaveats": [ - { - "FeatureName": "Alter partitioned tables to add Primary Key", - "Objects": [], - "FeatureDescription": "After export schema, the ALTER table should be merged with CREATE table for partitioned tables as alter of partitioned tables to add primary key is not supported." - }, - { - "FeatureName": "Foreign tables", - "Objects": [], - "FeatureDescription": "During the export schema phase, SERVER and USER MAPPING objects are not exported. These should be manually created to make the foreign tables work." - }, - { - "FeatureName": "Policies", - "Objects": [], - "FeatureDescription": "There are some policies that are created for certain users/roles. During the export schema phase, USERs and GRANTs are not exported. Therefore, they will have to be manually created before running import schema." - } - ], - "UnsupportedQueryConstructs": null + "MigrationCaveats": null, + "UnsupportedQueryConstructs": null, + "UnsupportedPlPgSqlObjects": null } diff --git a/migtests/tests/pg/rna/expected_files/expected_schema_analysis_report.json b/migtests/tests/pg/rna/expected_files/expected_schema_analysis_report.json index d9c1c349d1..33fbca2cce 100644 --- a/migtests/tests/pg/rna/expected_files/expected_schema_analysis_report.json +++ b/migtests/tests/pg/rna/expected_files/expected_schema_analysis_report.json @@ -1,6 +1,5 @@ { "VoyagerVersion": "IGNORED", - "MigrationComplexity": "HIGH", "Summary": { "Description": "Objects that will be created on the target YugabyteDB.", "DbName": "test_rna", @@ -76,7 +75,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -87,7 +87,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -98,7 +99,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -109,7 +111,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -120,7 +123,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -131,7 +135,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -142,7 +147,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -153,7 +159,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -164,7 +171,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -175,7 +183,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -186,7 +195,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -197,7 +207,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -208,7 +219,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -219,7 +231,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -230,7 +243,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -241,7 +255,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -252,7 +267,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -263,7 +279,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -274,7 +291,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -285,7 +303,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -296,7 +315,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -307,7 +327,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -318,7 +339,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -329,7 +351,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -340,7 +363,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -351,7 +375,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -362,7 +387,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -373,7 +399,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -384,7 +411,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -395,7 +423,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -406,7 +435,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -417,7 +447,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -428,7 +459,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -439,7 +471,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -450,7 +483,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -461,7 +495,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -472,7 +507,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -483,7 +519,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -494,7 +531,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -505,7 +543,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -516,7 +555,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -527,7 +567,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -538,7 +579,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -549,7 +591,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -560,7 +603,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -571,7 +615,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -582,7 +627,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -593,7 +639,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -604,7 +651,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -615,7 +663,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -626,7 +675,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -637,7 +687,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -648,7 +699,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -659,7 +711,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -670,7 +723,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -681,7 +735,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -692,7 +747,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -703,7 +759,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -714,7 +771,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -725,7 +783,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -736,7 +795,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -747,7 +807,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -758,7 +819,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -769,7 +831,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -780,7 +843,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -791,7 +855,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -802,7 +867,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -813,7 +879,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -824,7 +891,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -835,7 +903,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -846,7 +915,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -857,7 +927,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -868,7 +939,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -879,7 +951,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -890,7 +963,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -901,7 +975,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -912,7 +987,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -923,7 +999,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -934,7 +1011,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -945,7 +1023,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -956,7 +1035,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -967,7 +1047,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -978,7 +1059,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -989,7 +1071,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1000,7 +1083,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1011,7 +1095,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1022,7 +1107,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1033,7 +1119,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1044,7 +1131,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1055,7 +1143,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1066,7 +1155,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1077,7 +1167,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1088,7 +1179,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1099,7 +1191,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1110,7 +1203,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1121,7 +1215,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1132,7 +1227,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1143,7 +1239,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1154,7 +1251,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1165,7 +1263,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1176,7 +1275,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1187,7 +1287,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1198,7 +1299,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1209,7 +1311,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1220,7 +1323,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1231,7 +1335,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1242,7 +1347,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1253,7 +1359,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1264,7 +1371,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -1275,7 +1383,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/rna/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null } ] } diff --git a/migtests/tests/pg/sakila/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/sakila/expected_files/expectedAssessmentReport.json index 14301a9481..8ff126cd0f 100755 --- a/migtests/tests/pg/sakila/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/sakila/expected_files/expectedAssessmentReport.json @@ -126,28 +126,9 @@ "SqlStatement": "CREATE INDEX film_fulltext_idx ON public.film USING gist (fulltext);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported" - }, - { - "FeatureName": "BRIN indexes", - "Objects": [] - }, - { - "FeatureName": "SPGIST indexes", - "Objects": [] - }, - { - "FeatureName": "Constraint triggers", - "Objects": [] - }, - { - "FeatureName": "REFERENCING clause for triggers", - "Objects": [] - }, - { - "FeatureName": "BEFORE ROW triggers on Partitioned tables", - "Objects": [] - }, + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported", + "MinimumVersionsFixedIn": null + }, { "FeatureName": "Inherited tables", "Objects": [ @@ -176,60 +157,9 @@ "SqlStatement": "CREATE TABLE public.payment_p2007_06 (\n CONSTRAINT payment_p2007_06_payment_date_check CHECK (((payment_date \u003e= '2007-06-01 00:00:00'::timestamp without time zone) AND (payment_date \u003c '2007-07-01 00:00:00'::timestamp without time zone)))\n)\nINHERITS (public.payment);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" - }, - { - "FeatureName": "Tables with stored generated columns", - "Objects": [] - }, - { - "FeatureName": "Conversion objects", - "Objects": [] - }, - { - "FeatureName": "Gin indexes on multi-columns", - "Objects": [] - }, - { - "FeatureName": "Setting attribute=value on column", - "Objects": [] - }, - { - "FeatureName": "Disabling rule on table", - "Objects": [] - }, - { - "FeatureName": "Clustering table on index", - "Objects": [] - }, - { - "FeatureName": "Storage parameters in DDLs", - "Objects": [] - }, - { - "FeatureName": "Extensions", - "Objects": [] - }, - { - "FeatureName": "Exclusion constraints", - "Objects": [] - }, - { - "FeatureName": "Deferrable constraints", - "Objects": [] - }, - { - "FeatureName": "View with check option", - "Objects": [] - }, - { - "FeatureName": "Index on complex datatypes", - "Objects": [] - }, - { - "FeatureName": "Unlogged tables", - "Objects": [] - } + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null + } ], "UnsupportedFeaturesDesc": "Features of the source database that are not supported on the target YugabyteDB.", "TableIndexStats": [ @@ -936,21 +866,6 @@ ], "Notes": null, "MigrationCaveats": [ - { - "FeatureName": "Alter partitioned tables to add Primary Key", - "Objects": [], - "FeatureDescription": "After export schema, the ALTER table should be merged with CREATE table for partitioned tables as alter of partitioned tables to add primary key is not supported." - }, - { - "FeatureName": "Foreign tables", - "Objects": [], - "FeatureDescription": "During the export schema phase, SERVER and USER MAPPING objects are not exported. These should be manually created to make the foreign tables work." - }, - { - "FeatureName": "Policies", - "Objects": [], - "FeatureDescription": "There are some policies that are created for certain users/roles. During the export schema phase, USERs and GRANTs are not exported. Therefore, they will have to be manually created before running import schema." - }, { "FeatureName": "Unsupported Data Types for Live Migration with Fall-forward/Fallback", "Objects": [ @@ -960,8 +875,10 @@ } ], "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", - "FeatureDescription": "There are some data types in the schema that are not supported by live migration with fall-forward/fall-back. These columns will be excluded when exporting and importing data in live migration workflows." + "FeatureDescription": "There are some data types in the schema that are not supported by live migration with fall-forward/fall-back. These columns will be excluded when exporting and importing data in live migration workflows.", + "MinimumVersionsFixedIn": null } ], - "UnsupportedQueryConstructs": null + "UnsupportedQueryConstructs": null, + "UnsupportedPlPgSqlObjects": null } diff --git a/migtests/tests/pg/sakila/expected_files/expected_schema_analysis_report.json b/migtests/tests/pg/sakila/expected_files/expected_schema_analysis_report.json index d25c01a6f4..b2f01e9109 100755 --- a/migtests/tests/pg/sakila/expected_files/expected_schema_analysis_report.json +++ b/migtests/tests/pg/sakila/expected_files/expected_schema_analysis_report.json @@ -1,6 +1,5 @@ { "VoyagerVersion": "IGNORED", - "MigrationComplexity": "HIGH", "Summary": { "Description": "Objects that will be created on the target YugabyteDB.", "DbName": "pg_sakila", @@ -87,7 +86,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/sakila/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -98,7 +98,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/sakila/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -109,7 +110,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/sakila/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -120,7 +122,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/sakila/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -131,7 +134,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/sakila/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -142,7 +146,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/sakila/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -153,7 +158,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/sakila/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1129", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", @@ -164,7 +170,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/sakila/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "", "GH": "https://github.com/YugaByte/yugabyte-db/issues/1337", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported", + "MinimumVersionsFixedIn": null } ] } diff --git a/migtests/tests/pg/sample-is/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/sample-is/expected_files/expectedAssessmentReport.json index 7a73767908..47797ad8d4 100755 --- a/migtests/tests/pg/sample-is/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/sample-is/expected_files/expectedAssessmentReport.json @@ -76,74 +76,6 @@ "UnsupportedDataTypes": null, "UnsupportedDataTypesDesc": "Data types of the source database that are not supported on the target YugabyteDB.", "UnsupportedFeatures": [ - { - "FeatureName": "GIST indexes", - "Objects": [] - }, - { - "FeatureName": "BRIN indexes", - "Objects": [] - }, - { - "FeatureName": "SPGIST indexes", - "Objects": [] - }, - { - "FeatureName": "Constraint triggers", - "Objects": [] - }, - { - "FeatureName": "Inherited tables", - "Objects": [] - }, - { - "FeatureName": "Tables with stored generated columns", - "Objects": [] - }, - { - "FeatureName": "Conversion objects", - "Objects": [] - }, - { - "FeatureName": "Gin indexes on multi-columns", - "Objects": [] - }, - { - "FeatureName": "Setting attribute=value on column", - "Objects": [] - }, - { - "FeatureName": "Disabling rule on table", - "Objects": [] - }, - { - "FeatureName": "Clustering table on index", - "Objects": [] - }, - { - "FeatureName": "Storage parameters in DDLs", - "Objects": [] - }, - { - "FeatureName": "Extensions", - "Objects": [] - }, - { - "FeatureName": "Index on complex datatypes", - "Objects": [] - }, - { - "FeatureName": "Unlogged tables", - "Objects": [] - }, - { - "FeatureName": "REFERENCING clause for triggers", - "Objects": [] - }, - { - "FeatureName": "BEFORE ROW triggers on Partitioned tables", - "Objects": [] - }, { "FeatureName": "Exclusion constraints", "Objects": [ @@ -152,16 +84,9 @@ "SqlStatement": "ALTER TABLE ONLY public.secret_missions\n ADD CONSTRAINT cnt_solo_agent EXCLUDE USING gist (location WITH =, mission_timeline WITH \u0026\u0026);" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#exclusion-constraints-is-not-supported" - }, - { - "FeatureName": "Deferrable constraints", - "Objects": [] - }, - { - "FeatureName": "View with check option", - "Objects": [] - } + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#exclusion-constraints-is-not-supported", + "MinimumVersionsFixedIn": null + } ], "UnsupportedFeaturesDesc": "Features of the source database that are not supported on the target YugabyteDB.", "TableIndexStats": [ @@ -336,21 +261,6 @@ ], "Notes": null, "MigrationCaveats": [ - { - "FeatureName": "Alter partitioned tables to add Primary Key", - "Objects": [], - "FeatureDescription": "After export schema, the ALTER table should be merged with CREATE table for partitioned tables as alter of partitioned tables to add primary key is not supported." - }, - { - "FeatureName": "Foreign tables", - "Objects": [], - "FeatureDescription": "During the export schema phase, SERVER and USER MAPPING objects are not exported. These should be manually created to make the foreign tables work." - }, - { - "FeatureName": "Policies", - "Objects": [], - "FeatureDescription": "There are some policies that are created for certain users/roles. During the export schema phase, USERs and GRANTs are not exported. Therefore, they will have to be manually created before running import schema." - }, { "FeatureName": "Unsupported Data Types for Live Migration with Fall-forward/Fallback", "Objects": [ @@ -360,8 +270,10 @@ } ], "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", - "FeatureDescription": "There are some data types in the schema that are not supported by live migration with fall-forward/fall-back. These columns will be excluded when exporting and importing data in live migration workflows." + "FeatureDescription": "There are some data types in the schema that are not supported by live migration with fall-forward/fall-back. These columns will be excluded when exporting and importing data in live migration workflows.", + "MinimumVersionsFixedIn": null } ], - "UnsupportedQueryConstructs": null + "UnsupportedQueryConstructs": null, + "UnsupportedPlPgSqlObjects": null } diff --git a/migtests/tests/pg/sample-is/expected_files/expected_schema_analysis_report.json b/migtests/tests/pg/sample-is/expected_files/expected_schema_analysis_report.json index 0b6403a615..b1f90fab0d 100755 --- a/migtests/tests/pg/sample-is/expected_files/expected_schema_analysis_report.json +++ b/migtests/tests/pg/sample-is/expected_files/expected_schema_analysis_report.json @@ -1,6 +1,5 @@ { "VoyagerVersion": "IGNORED", - "MigrationComplexity": "LOW", "Summary": { "Description": "Objects that will be created on the target YugabyteDB.", "DbName": "test_is", @@ -58,7 +57,8 @@ "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/sample-is/export-dir/schema/tables/table.sql", "Suggestion": "Refer docs link for details on possible workaround", "GH": "https://github.com/yugabyte/yugabyte-db/issues/3944", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#exclusion-constraints-is-not-supported" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#exclusion-constraints-is-not-supported", + "MinimumVersionsFixedIn": null }, { "IssueType": "migration_caveats", @@ -69,7 +69,8 @@ "FilePath": "/Users/priyanshigupta/Documents/voyager/yb-voyager/migtests/tests/pg/sample-is/export-dir/schema/tables/table.sql", "Suggestion": "", "GH": "https://github.com/yugabyte/yb-voyager/issues/1731", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", + "MinimumVersionsFixedIn": null } ] } diff --git a/migtests/tests/pg/stackexchange/expected_files/expectedAssessmentReport.json b/migtests/tests/pg/stackexchange/expected_files/expectedAssessmentReport.json index 5bd7fd9efd..34c9ea429c 100644 --- a/migtests/tests/pg/stackexchange/expected_files/expectedAssessmentReport.json +++ b/migtests/tests/pg/stackexchange/expected_files/expectedAssessmentReport.json @@ -82,264 +82,189 @@ "UnsupportedDataTypes": null, "UnsupportedDataTypesDesc": "Data types of the source database that are not supported on the target YugabyteDB.", "UnsupportedFeatures": [ - { - "FeatureName": "GIST indexes", - "Objects": [] - }, - { - "FeatureName": "BRIN indexes", - "Objects": [] - }, - { - "FeatureName": "SPGIST indexes", - "Objects": [] - }, - { - "FeatureName": "Constraint triggers", - "Objects": [] - }, - { - "FeatureName": "Inherited tables", - "Objects": [] - }, - { - "FeatureName": "REFERENCING clause for triggers", - "Objects": [] - }, - { - "FeatureName": "BEFORE ROW triggers on Partitioned tables", - "Objects": [] - }, - { - "FeatureName": "Tables with stored generated columns", - "Objects": [] - }, - { - "FeatureName": "Conversion objects", - "Objects": [] - }, - { - "FeatureName": "Gin indexes on multi-columns", - "Objects": [] - }, - { - "FeatureName": "Setting attribute=value on column", - "Objects": [] - }, - { - "FeatureName": "Disabling rule on table", - "Objects": [] - }, - { - "FeatureName": "Clustering table on index", - "Objects": [] - }, { "FeatureName": "Storage parameters in DDLs", "Objects": [ { - "ObjectName": "badges_date_idx", - "SqlStatement": "CREATE INDEX badges_date_idx ON public.badges USING btree (date) WITH (fillfactor='100'); " + "ObjectName": "badges_date_idx ON public.badges", + "SqlStatement": "CREATE INDEX badges_date_idx ON public.badges USING btree (date) WITH (fillfactor='100');" }, { - "ObjectName": "badges_name_idx", - "SqlStatement": "CREATE INDEX badges_name_idx ON public.badges USING btree (name) WITH (fillfactor='100'); " + "ObjectName": "badges_name_idx ON public.badges", + "SqlStatement": "CREATE INDEX badges_name_idx ON public.badges USING btree (name) WITH (fillfactor='100');" }, { - "ObjectName": "badges_user_id_idx", - "SqlStatement": "CREATE INDEX badges_user_id_idx ON public.badges USING btree (userid) WITH (fillfactor='100'); " + "ObjectName": "badges_user_id_idx ON public.badges", + "SqlStatement": "CREATE INDEX badges_user_id_idx ON public.badges USING btree (userid) WITH (fillfactor='100');" }, { - "ObjectName": "cmnts_creation_date_idx", - "SqlStatement": "CREATE INDEX cmnts_creation_date_idx ON public.comments USING btree (creationdate) WITH (fillfactor='100'); " + "ObjectName": "cmnts_creation_date_idx ON public.comments", + "SqlStatement": "CREATE INDEX cmnts_creation_date_idx ON public.comments USING btree (creationdate) WITH (fillfactor='100');" }, { - "ObjectName": "cmnts_postid_idx", - "SqlStatement": "CREATE INDEX cmnts_postid_idx ON public.comments USING hash (postid) WITH (fillfactor='100'); " + "ObjectName": "cmnts_postid_idx ON public.comments", + "SqlStatement": "CREATE INDEX cmnts_postid_idx ON public.comments USING hash (postid) WITH (fillfactor='100');" }, { - "ObjectName": "cmnts_score_idx", - "SqlStatement": "CREATE INDEX cmnts_score_idx ON public.comments USING btree (score) WITH (fillfactor='100'); " + "ObjectName": "cmnts_score_idx ON public.comments", + "SqlStatement": "CREATE INDEX cmnts_score_idx ON public.comments USING btree (score) WITH (fillfactor='100');" }, { - "ObjectName": "cmnts_userid_idx", - "SqlStatement": "CREATE INDEX cmnts_userid_idx ON public.comments USING btree (userid) WITH (fillfactor='100'); " + "ObjectName": "cmnts_userid_idx ON public.comments", + "SqlStatement": "CREATE INDEX cmnts_userid_idx ON public.comments USING btree (userid) WITH (fillfactor='100');" }, { - "ObjectName": "ph_creation_date_idx", - "SqlStatement": "CREATE INDEX ph_creation_date_idx ON public.posthistory USING btree (creationdate) WITH (fillfactor='100'); " + "ObjectName": "ph_creation_date_idx ON public.posthistory", + "SqlStatement": "CREATE INDEX ph_creation_date_idx ON public.posthistory USING btree (creationdate) WITH (fillfactor='100');" }, { - "ObjectName": "ph_post_type_id_idx", - "SqlStatement": "CREATE INDEX ph_post_type_id_idx ON public.posthistory USING btree (posthistorytypeid) WITH (fillfactor='100'); " + "ObjectName": "ph_post_type_id_idx ON public.posthistory", + "SqlStatement": "CREATE INDEX ph_post_type_id_idx ON public.posthistory USING btree (posthistorytypeid) WITH (fillfactor='100');" }, { - "ObjectName": "ph_postid_idx", - "SqlStatement": "CREATE INDEX ph_postid_idx ON public.posthistory USING hash (postid) WITH (fillfactor='100'); " + "ObjectName": "ph_postid_idx ON public.posthistory", + "SqlStatement": "CREATE INDEX ph_postid_idx ON public.posthistory USING hash (postid) WITH (fillfactor='100');" }, { - "ObjectName": "ph_revguid_idx", - "SqlStatement": "CREATE INDEX ph_revguid_idx ON public.posthistory USING btree (revisionguid) WITH (fillfactor='100'); " + "ObjectName": "ph_revguid_idx ON public.posthistory", + "SqlStatement": "CREATE INDEX ph_revguid_idx ON public.posthistory USING btree (revisionguid) WITH (fillfactor='100');" }, { - "ObjectName": "ph_userid_idx", - "SqlStatement": "CREATE INDEX ph_userid_idx ON public.posthistory USING btree (userid) WITH (fillfactor='100'); " + "ObjectName": "ph_userid_idx ON public.posthistory", + "SqlStatement": "CREATE INDEX ph_userid_idx ON public.posthistory USING btree (userid) WITH (fillfactor='100');" }, { - "ObjectName": "postlinks_post_id_idx", - "SqlStatement": "CREATE INDEX postlinks_post_id_idx ON public.postlinks USING btree (postid) WITH (fillfactor='100'); " + "ObjectName": "postlinks_post_id_idx ON public.postlinks", + "SqlStatement": "CREATE INDEX postlinks_post_id_idx ON public.postlinks USING btree (postid) WITH (fillfactor='100');" }, { - "ObjectName": "postlinks_related_post_id_idx", - "SqlStatement": "CREATE INDEX postlinks_related_post_id_idx ON public.postlinks USING btree (relatedpostid) WITH (fillfactor='100'); " + "ObjectName": "postlinks_related_post_id_idx ON public.postlinks", + "SqlStatement": "CREATE INDEX postlinks_related_post_id_idx ON public.postlinks USING btree (relatedpostid) WITH (fillfactor='100');" }, { - "ObjectName": "posts_accepted_answer_id_idx", - "SqlStatement": "CREATE INDEX posts_accepted_answer_id_idx ON public.posts USING btree (acceptedanswerid) WITH (fillfactor='100'); " + "ObjectName": "posts_accepted_answer_id_idx ON public.posts", + "SqlStatement": "CREATE INDEX posts_accepted_answer_id_idx ON public.posts USING btree (acceptedanswerid) WITH (fillfactor='100');" }, { - "ObjectName": "posts_answer_count_idx", - "SqlStatement": "CREATE INDEX posts_answer_count_idx ON public.posts USING btree (answercount) WITH (fillfactor='100'); " + "ObjectName": "posts_answer_count_idx ON public.posts", + "SqlStatement": "CREATE INDEX posts_answer_count_idx ON public.posts USING btree (answercount) WITH (fillfactor='100');" }, { - "ObjectName": "posts_comment_count_idx", - "SqlStatement": "CREATE INDEX posts_comment_count_idx ON public.posts USING btree (commentcount) WITH (fillfactor='100'); " + "ObjectName": "posts_comment_count_idx ON public.posts", + "SqlStatement": "CREATE INDEX posts_comment_count_idx ON public.posts USING btree (commentcount) WITH (fillfactor='100');" }, { - "ObjectName": "posts_creation_date_idx", - "SqlStatement": "CREATE INDEX posts_creation_date_idx ON public.posts USING btree (creationdate) WITH (fillfactor='100'); " + "ObjectName": "posts_creation_date_idx ON public.posts", + "SqlStatement": "CREATE INDEX posts_creation_date_idx ON public.posts USING btree (creationdate) WITH (fillfactor='100');" }, { - "ObjectName": "posts_favorite_count_idx", - "SqlStatement": "CREATE INDEX posts_favorite_count_idx ON public.posts USING btree (favoritecount) WITH (fillfactor='100'); " + "ObjectName": "posts_favorite_count_idx ON public.posts", + "SqlStatement": "CREATE INDEX posts_favorite_count_idx ON public.posts USING btree (favoritecount) WITH (fillfactor='100');" }, { - "ObjectName": "posts_id_accepted_answers_id_idx", - "SqlStatement": "CREATE INDEX posts_id_accepted_answers_id_idx ON public.posts USING btree (id, acceptedanswerid) WITH (fillfactor='100'); " + "ObjectName": "posts_id_accepted_answers_id_idx ON public.posts", + "SqlStatement": "CREATE INDEX posts_id_accepted_answers_id_idx ON public.posts USING btree (id, acceptedanswerid) WITH (fillfactor='100');" }, { - "ObjectName": "posts_id_parent_id_idx", - "SqlStatement": "CREATE INDEX posts_id_parent_id_idx ON public.posts USING btree (id, parentid) WITH (fillfactor='100'); " + "ObjectName": "posts_id_parent_id_idx ON public.posts", + "SqlStatement": "CREATE INDEX posts_id_parent_id_idx ON public.posts USING btree (id, parentid) WITH (fillfactor='100');" }, { - "ObjectName": "posts_id_post_type_id_idx", - "SqlStatement": "CREATE INDEX posts_id_post_type_id_idx ON public.posts USING btree (id, posttypeid) WITH (fillfactor='100'); " + "ObjectName": "posts_id_post_type_id_idx ON public.posts", + "SqlStatement": "CREATE INDEX posts_id_post_type_id_idx ON public.posts USING btree (id, posttypeid) WITH (fillfactor='100');" }, { - "ObjectName": "posts_owner_user_id_creation_date_idx", - "SqlStatement": "CREATE INDEX posts_owner_user_id_creation_date_idx ON public.posts USING btree (owneruserid, creationdate) WITH (fillfactor='100'); " + "ObjectName": "posts_owner_user_id_creation_date_idx ON public.posts", + "SqlStatement": "CREATE INDEX posts_owner_user_id_creation_date_idx ON public.posts USING btree (owneruserid, creationdate) WITH (fillfactor='100');" }, { - "ObjectName": "posts_owner_user_id_idx", - "SqlStatement": "CREATE INDEX posts_owner_user_id_idx ON public.posts USING hash (owneruserid) WITH (fillfactor='100'); " + "ObjectName": "posts_owner_user_id_idx ON public.posts", + "SqlStatement": "CREATE INDEX posts_owner_user_id_idx ON public.posts USING hash (owneruserid) WITH (fillfactor='100');" }, { - "ObjectName": "posts_parent_id_idx", - "SqlStatement": "CREATE INDEX posts_parent_id_idx ON public.posts USING btree (parentid) WITH (fillfactor='100'); " + "ObjectName": "posts_parent_id_idx ON public.posts", + "SqlStatement": "CREATE INDEX posts_parent_id_idx ON public.posts USING btree (parentid) WITH (fillfactor='100');" }, { - "ObjectName": "posts_post_type_id_idx", - "SqlStatement": "CREATE INDEX posts_post_type_id_idx ON public.posts USING btree (posttypeid) WITH (fillfactor='100'); " + "ObjectName": "posts_post_type_id_idx ON public.posts", + "SqlStatement": "CREATE INDEX posts_post_type_id_idx ON public.posts USING btree (posttypeid) WITH (fillfactor='100');" }, { - "ObjectName": "posts_score_idx", - "SqlStatement": "CREATE INDEX posts_score_idx ON public.posts USING btree (score) WITH (fillfactor='100'); " + "ObjectName": "posts_score_idx ON public.posts", + "SqlStatement": "CREATE INDEX posts_score_idx ON public.posts USING btree (score) WITH (fillfactor='100');" }, { - "ObjectName": "posts_viewcount_idx", - "SqlStatement": "CREATE INDEX posts_viewcount_idx ON public.posts USING btree (viewcount) WITH (fillfactor='100'); " + "ObjectName": "posts_viewcount_idx ON public.posts", + "SqlStatement": "CREATE INDEX posts_viewcount_idx ON public.posts USING btree (viewcount) WITH (fillfactor='100');" }, { - "ObjectName": "posttags_postid_idx", - "SqlStatement": "CREATE INDEX posttags_postid_idx ON public.posttags USING hash (postid) WITH (fillfactor='100'); " + "ObjectName": "posttags_postid_idx ON public.posttags", + "SqlStatement": "CREATE INDEX posttags_postid_idx ON public.posttags USING hash (postid) WITH (fillfactor='100');" }, { - "ObjectName": "posttags_tagid_idx", - "SqlStatement": "CREATE INDEX posttags_tagid_idx ON public.posttags USING btree (tagid) WITH (fillfactor='100'); " + "ObjectName": "posttags_tagid_idx ON public.posttags", + "SqlStatement": "CREATE INDEX posttags_tagid_idx ON public.posttags USING btree (tagid) WITH (fillfactor='100');" }, { - "ObjectName": "tags_count_idx", - "SqlStatement": "CREATE INDEX tags_count_idx ON public.tags USING btree (count) WITH (fillfactor='100'); " + "ObjectName": "tags_count_idx ON public.tags", + "SqlStatement": "CREATE INDEX tags_count_idx ON public.tags USING btree (count) WITH (fillfactor='100');" }, { - "ObjectName": "tags_name_idx", - "SqlStatement": "CREATE INDEX tags_name_idx ON public.tags USING hash (tagname) WITH (fillfactor='100'); " + "ObjectName": "tags_name_idx ON public.tags", + "SqlStatement": "CREATE INDEX tags_name_idx ON public.tags USING hash (tagname) WITH (fillfactor='100');" }, { - "ObjectName": "user_acc_id_idx", - "SqlStatement": "CREATE INDEX user_acc_id_idx ON public.users USING hash (accountid) WITH (fillfactor='100'); " + "ObjectName": "user_acc_id_idx ON public.users", + "SqlStatement": "CREATE INDEX user_acc_id_idx ON public.users USING hash (accountid) WITH (fillfactor='100');" }, { - "ObjectName": "user_created_at_idx", - "SqlStatement": "CREATE INDEX user_created_at_idx ON public.users USING btree (creationdate) WITH (fillfactor='100'); " + "ObjectName": "user_created_at_idx ON public.users", + "SqlStatement": "CREATE INDEX user_created_at_idx ON public.users USING btree (creationdate) WITH (fillfactor='100');" }, { - "ObjectName": "user_display_idx", - "SqlStatement": "CREATE INDEX user_display_idx ON public.users USING hash (displayname) WITH (fillfactor='100'); " + "ObjectName": "user_display_idx ON public.users", + "SqlStatement": "CREATE INDEX user_display_idx ON public.users USING hash (displayname) WITH (fillfactor='100');" }, { - "ObjectName": "user_down_votes_idx", - "SqlStatement": "CREATE INDEX user_down_votes_idx ON public.users USING btree (downvotes) WITH (fillfactor='100'); " + "ObjectName": "user_down_votes_idx ON public.users", + "SqlStatement": "CREATE INDEX user_down_votes_idx ON public.users USING btree (downvotes) WITH (fillfactor='100');" }, { - "ObjectName": "user_up_votes_idx", - "SqlStatement": "CREATE INDEX user_up_votes_idx ON public.users USING btree (upvotes) WITH (fillfactor='100'); " + "ObjectName": "user_up_votes_idx ON public.users", + "SqlStatement": "CREATE INDEX user_up_votes_idx ON public.users USING btree (upvotes) WITH (fillfactor='100');" }, { - "ObjectName": "usertagqa_all_qa_posts_idx", - "SqlStatement": "CREATE INDEX usertagqa_all_qa_posts_idx ON public.usertagqa USING btree (((questions + answers))) WITH (fillfactor='100'); " + "ObjectName": "usertagqa_all_qa_posts_idx ON public.usertagqa", + "SqlStatement": "CREATE INDEX usertagqa_all_qa_posts_idx ON public.usertagqa USING btree (((questions + answers))) WITH (fillfactor='100');" }, { - "ObjectName": "usertagqa_answers_idx", - "SqlStatement": "CREATE INDEX usertagqa_answers_idx ON public.usertagqa USING btree (answers) WITH (fillfactor='100'); " + "ObjectName": "usertagqa_answers_idx ON public.usertagqa", + "SqlStatement": "CREATE INDEX usertagqa_answers_idx ON public.usertagqa USING btree (answers) WITH (fillfactor='100');" }, { - "ObjectName": "usertagqa_questions_answers_idx", - "SqlStatement": "CREATE INDEX usertagqa_questions_answers_idx ON public.usertagqa USING btree (questions, answers) WITH (fillfactor='100'); " + "ObjectName": "usertagqa_questions_answers_idx ON public.usertagqa", + "SqlStatement": "CREATE INDEX usertagqa_questions_answers_idx ON public.usertagqa USING btree (questions, answers) WITH (fillfactor='100');" }, { - "ObjectName": "usertagqa_questions_idx", - "SqlStatement": "CREATE INDEX usertagqa_questions_idx ON public.usertagqa USING btree (questions) WITH (fillfactor='100'); " + "ObjectName": "usertagqa_questions_idx ON public.usertagqa", + "SqlStatement": "CREATE INDEX usertagqa_questions_idx ON public.usertagqa USING btree (questions) WITH (fillfactor='100');" }, { - "ObjectName": "votes_creation_date_idx", - "SqlStatement": "CREATE INDEX votes_creation_date_idx ON public.votes USING btree (creationdate) WITH (fillfactor='100'); " + "ObjectName": "votes_creation_date_idx ON public.votes", + "SqlStatement": "CREATE INDEX votes_creation_date_idx ON public.votes USING btree (creationdate) WITH (fillfactor='100');" }, { - "ObjectName": "votes_post_id_idx", - "SqlStatement": "CREATE INDEX votes_post_id_idx ON public.votes USING hash (postid) WITH (fillfactor='100'); " + "ObjectName": "votes_post_id_idx ON public.votes", + "SqlStatement": "CREATE INDEX votes_post_id_idx ON public.votes USING hash (postid) WITH (fillfactor='100');" }, { - "ObjectName": "votes_type_idx", - "SqlStatement": "CREATE INDEX votes_type_idx ON public.votes USING btree (votetypeid) WITH (fillfactor='100'); " + "ObjectName": "votes_type_idx ON public.votes", + "SqlStatement": "CREATE INDEX votes_type_idx ON public.votes USING btree (votetypeid) WITH (fillfactor='100');" } ], - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" - }, - { - "FeatureName": "Extensions", - "Objects": [] - }, - { - "FeatureName": "Exclusion constraints", - "Objects": [] - }, - { - "FeatureName": "Deferrable constraints", - "Objects": [] - }, - { - "FeatureName": "View with check option", - "Objects": [] - }, - { - "FeatureName": "Index on complex datatypes", - "Objects": [] - }, - { - "FeatureName": "Unlogged tables", - "Objects": [] - } + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null + } ], "UnsupportedFeaturesDesc": "Features of the source database that are not supported on the target YugabyteDB.", "TableIndexStats": [ @@ -1255,22 +1180,7 @@ } ], "Notes": null, - "MigrationCaveats": [ - { - "FeatureName": "Alter partitioned tables to add Primary Key", - "Objects": [], - "FeatureDescription": "After export schema, the ALTER table should be merged with CREATE table for partitioned tables as alter of partitioned tables to add primary key is not supported." - }, - { - "FeatureName": "Foreign tables", - "Objects": [], - "FeatureDescription": "During the export schema phase, SERVER and USER MAPPING objects are not exported. These should be manually created to make the foreign tables work." - }, - { - "FeatureName": "Policies", - "Objects": [], - "FeatureDescription": "There are some policies that are created for certain users/roles. During the export schema phase, USERs and GRANTs are not exported. Therefore, they will have to be manually created before running import schema." - } - ], - "UnsupportedQueryConstructs": null + "MigrationCaveats": null, + "UnsupportedQueryConstructs": null, + "UnsupportedPlPgSqlObjects": null } diff --git a/migtests/tests/pg/stackexchange/expected_files/expected_schema_analysis_report.json b/migtests/tests/pg/stackexchange/expected_files/expected_schema_analysis_report.json index 25bc7473d7..ca3d4ac3ce 100644 --- a/migtests/tests/pg/stackexchange/expected_files/expected_schema_analysis_report.json +++ b/migtests/tests/pg/stackexchange/expected_files/expected_schema_analysis_report.json @@ -1,6 +1,5 @@ { "VoyagerVersion": "IGNORED", - "MigrationComplexity": "MEDIUM", "Summary": { "Description": "Objects that will be created on the target YugabyteDB.", "DbName": "test_stackex", @@ -45,486 +44,530 @@ { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "badges_date_idx", + "ObjectName": "badges_date_idx ON public.badges", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX badges_date_idx ON public.badges USING btree (date) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX badges_date_idx ON public.badges USING btree (date) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "badges_name_idx", + "ObjectName": "badges_name_idx ON public.badges", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX badges_name_idx ON public.badges USING btree (name) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX badges_name_idx ON public.badges USING btree (name) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "badges_user_id_idx", + "ObjectName": "badges_user_id_idx ON public.badges", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX badges_user_id_idx ON public.badges USING btree (userid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX badges_user_id_idx ON public.badges USING btree (userid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "cmnts_creation_date_idx", + "ObjectName": "cmnts_creation_date_idx ON public.comments", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX cmnts_creation_date_idx ON public.comments USING btree (creationdate) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX cmnts_creation_date_idx ON public.comments USING btree (creationdate) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "cmnts_postid_idx", + "ObjectName": "cmnts_postid_idx ON public.comments", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX cmnts_postid_idx ON public.comments USING hash (postid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX cmnts_postid_idx ON public.comments USING hash (postid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "cmnts_score_idx", + "ObjectName": "cmnts_score_idx ON public.comments", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX cmnts_score_idx ON public.comments USING btree (score) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX cmnts_score_idx ON public.comments USING btree (score) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "cmnts_userid_idx", + "ObjectName": "cmnts_userid_idx ON public.comments", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX cmnts_userid_idx ON public.comments USING btree (userid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX cmnts_userid_idx ON public.comments USING btree (userid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "ph_creation_date_idx", + "ObjectName": "ph_creation_date_idx ON public.posthistory", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX ph_creation_date_idx ON public.posthistory USING btree (creationdate) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX ph_creation_date_idx ON public.posthistory USING btree (creationdate) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "ph_post_type_id_idx", + "ObjectName": "ph_post_type_id_idx ON public.posthistory", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX ph_post_type_id_idx ON public.posthistory USING btree (posthistorytypeid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX ph_post_type_id_idx ON public.posthistory USING btree (posthistorytypeid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "ph_postid_idx", + "ObjectName": "ph_postid_idx ON public.posthistory", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX ph_postid_idx ON public.posthistory USING hash (postid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX ph_postid_idx ON public.posthistory USING hash (postid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "ph_revguid_idx", + "ObjectName": "ph_revguid_idx ON public.posthistory", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX ph_revguid_idx ON public.posthistory USING btree (revisionguid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX ph_revguid_idx ON public.posthistory USING btree (revisionguid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "ph_userid_idx", + "ObjectName": "ph_userid_idx ON public.posthistory", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX ph_userid_idx ON public.posthistory USING btree (userid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX ph_userid_idx ON public.posthistory USING btree (userid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "postlinks_post_id_idx", + "ObjectName": "postlinks_post_id_idx ON public.postlinks", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX postlinks_post_id_idx ON public.postlinks USING btree (postid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX postlinks_post_id_idx ON public.postlinks USING btree (postid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "postlinks_related_post_id_idx", + "ObjectName": "postlinks_related_post_id_idx ON public.postlinks", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX postlinks_related_post_id_idx ON public.postlinks USING btree (relatedpostid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX postlinks_related_post_id_idx ON public.postlinks USING btree (relatedpostid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posts_accepted_answer_id_idx", + "ObjectName": "posts_accepted_answer_id_idx ON public.posts", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posts_accepted_answer_id_idx ON public.posts USING btree (acceptedanswerid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posts_accepted_answer_id_idx ON public.posts USING btree (acceptedanswerid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posts_answer_count_idx", + "ObjectName": "posts_answer_count_idx ON public.posts", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posts_answer_count_idx ON public.posts USING btree (answercount) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posts_answer_count_idx ON public.posts USING btree (answercount) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posts_comment_count_idx", + "ObjectName": "posts_comment_count_idx ON public.posts", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posts_comment_count_idx ON public.posts USING btree (commentcount) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posts_comment_count_idx ON public.posts USING btree (commentcount) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posts_creation_date_idx", + "ObjectName": "posts_creation_date_idx ON public.posts", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posts_creation_date_idx ON public.posts USING btree (creationdate) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posts_creation_date_idx ON public.posts USING btree (creationdate) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posts_favorite_count_idx", + "ObjectName": "posts_favorite_count_idx ON public.posts", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posts_favorite_count_idx ON public.posts USING btree (favoritecount) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posts_favorite_count_idx ON public.posts USING btree (favoritecount) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posts_id_accepted_answers_id_idx", + "ObjectName": "posts_id_accepted_answers_id_idx ON public.posts", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posts_id_accepted_answers_id_idx ON public.posts USING btree (id, acceptedanswerid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posts_id_accepted_answers_id_idx ON public.posts USING btree (id, acceptedanswerid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posts_id_parent_id_idx", + "ObjectName": "posts_id_parent_id_idx ON public.posts", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posts_id_parent_id_idx ON public.posts USING btree (id, parentid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posts_id_parent_id_idx ON public.posts USING btree (id, parentid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posts_id_post_type_id_idx", + "ObjectName": "posts_id_post_type_id_idx ON public.posts", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posts_id_post_type_id_idx ON public.posts USING btree (id, posttypeid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posts_id_post_type_id_idx ON public.posts USING btree (id, posttypeid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posts_owner_user_id_creation_date_idx", + "ObjectName": "posts_owner_user_id_creation_date_idx ON public.posts", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posts_owner_user_id_creation_date_idx ON public.posts USING btree (owneruserid, creationdate) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posts_owner_user_id_creation_date_idx ON public.posts USING btree (owneruserid, creationdate) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posts_owner_user_id_idx", + "ObjectName": "posts_owner_user_id_idx ON public.posts", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posts_owner_user_id_idx ON public.posts USING hash (owneruserid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posts_owner_user_id_idx ON public.posts USING hash (owneruserid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posts_parent_id_idx", + "ObjectName": "posts_parent_id_idx ON public.posts", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posts_parent_id_idx ON public.posts USING btree (parentid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posts_parent_id_idx ON public.posts USING btree (parentid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posts_post_type_id_idx", + "ObjectName": "posts_post_type_id_idx ON public.posts", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posts_post_type_id_idx ON public.posts USING btree (posttypeid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posts_post_type_id_idx ON public.posts USING btree (posttypeid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posts_score_idx", + "ObjectName": "posts_score_idx ON public.posts", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posts_score_idx ON public.posts USING btree (score) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posts_score_idx ON public.posts USING btree (score) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posts_viewcount_idx", + "ObjectName": "posts_viewcount_idx ON public.posts", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posts_viewcount_idx ON public.posts USING btree (viewcount) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posts_viewcount_idx ON public.posts USING btree (viewcount) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posttags_postid_idx", + "ObjectName": "posttags_postid_idx ON public.posttags", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posttags_postid_idx ON public.posttags USING hash (postid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posttags_postid_idx ON public.posttags USING hash (postid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "posttags_tagid_idx", + "ObjectName": "posttags_tagid_idx ON public.posttags", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX posttags_tagid_idx ON public.posttags USING btree (tagid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX posttags_tagid_idx ON public.posttags USING btree (tagid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "tags_count_idx", + "ObjectName": "tags_count_idx ON public.tags", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX tags_count_idx ON public.tags USING btree (count) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX tags_count_idx ON public.tags USING btree (count) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "tags_name_idx", + "ObjectName": "tags_name_idx ON public.tags", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX tags_name_idx ON public.tags USING hash (tagname) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX tags_name_idx ON public.tags USING hash (tagname) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "user_acc_id_idx", + "ObjectName": "user_acc_id_idx ON public.users", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX user_acc_id_idx ON public.users USING hash (accountid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX user_acc_id_idx ON public.users USING hash (accountid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "user_created_at_idx", + "ObjectName": "user_created_at_idx ON public.users", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX user_created_at_idx ON public.users USING btree (creationdate) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX user_created_at_idx ON public.users USING btree (creationdate) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "user_display_idx", + "ObjectName": "user_display_idx ON public.users", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX user_display_idx ON public.users USING hash (displayname) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX user_display_idx ON public.users USING hash (displayname) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "user_down_votes_idx", + "ObjectName": "user_down_votes_idx ON public.users", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX user_down_votes_idx ON public.users USING btree (downvotes) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX user_down_votes_idx ON public.users USING btree (downvotes) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "user_up_votes_idx", + "ObjectName": "user_up_votes_idx ON public.users", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX user_up_votes_idx ON public.users USING btree (upvotes) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX user_up_votes_idx ON public.users USING btree (upvotes) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "usertagqa_all_qa_posts_idx", + "ObjectName": "usertagqa_all_qa_posts_idx ON public.usertagqa", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX usertagqa_all_qa_posts_idx ON public.usertagqa USING btree (((questions + answers))) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX usertagqa_all_qa_posts_idx ON public.usertagqa USING btree (((questions + answers))) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "usertagqa_answers_idx", + "ObjectName": "usertagqa_answers_idx ON public.usertagqa", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX usertagqa_answers_idx ON public.usertagqa USING btree (answers) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX usertagqa_answers_idx ON public.usertagqa USING btree (answers) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "usertagqa_questions_answers_idx", + "ObjectName": "usertagqa_questions_answers_idx ON public.usertagqa", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX usertagqa_questions_answers_idx ON public.usertagqa USING btree (questions, answers) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX usertagqa_questions_answers_idx ON public.usertagqa USING btree (questions, answers) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "usertagqa_questions_idx", + "ObjectName": "usertagqa_questions_idx ON public.usertagqa", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX usertagqa_questions_idx ON public.usertagqa USING btree (questions) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX usertagqa_questions_idx ON public.usertagqa USING btree (questions) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "votes_creation_date_idx", + "ObjectName": "votes_creation_date_idx ON public.votes", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX votes_creation_date_idx ON public.votes USING btree (creationdate) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX votes_creation_date_idx ON public.votes USING btree (creationdate) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "votes_post_id_idx", + "ObjectName": "votes_post_id_idx ON public.votes", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX votes_post_id_idx ON public.votes USING hash (postid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX votes_post_id_idx ON public.votes USING hash (postid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null }, { "IssueType": "unsupported_features", "ObjectType": "INDEX", - "ObjectName": "votes_type_idx", + "ObjectName": "votes_type_idx ON public.votes", "Reason": "Storage parameters are not supported yet.", - "SqlStatement": "CREATE INDEX votes_type_idx ON public.votes USING btree (votetypeid) WITH (fillfactor='100'); ", + "SqlStatement": "CREATE INDEX votes_type_idx ON public.votes USING btree (votetypeid) WITH (fillfactor='100');", "FilePath": "/home/ubuntu/yb-voyager/migtests/tests/pg/stackexchange/export-dir/schema/tables/INDEXES_table.sql", "Suggestion": "Remove the storage parameters from the DDL", "GH": "https://github.com/yugabyte/yugabyte-db/issues/23467", - "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql" + "DocsLink": "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", + "MinimumVersionsFixedIn": null } ] } diff --git a/migtests/tests/pg/unique-key-conflicts-test/env.sh b/migtests/tests/pg/unique-key-conflicts-test/env.sh index f812fc984c..4615850a1d 100644 --- a/migtests/tests/pg/unique-key-conflicts-test/env.sh +++ b/migtests/tests/pg/unique-key-conflicts-test/env.sh @@ -1,4 +1,2 @@ export SOURCE_DB_TYPE="postgresql" -export SOURCE_DB_NAME=${SOURCE_DB_NAME:-"unique_key_conflict_cases"} export SOURCE_DB_SCHEMA="public,non_public" -export SOURCE_REPLICA_DB_NAME="unique_key_conflict_cases_replica" \ No newline at end of file diff --git a/yb-voyager/cmd/analyzeSchema.go b/yb-voyager/cmd/analyzeSchema.go index 2ba76f6a03..fe25825e65 100644 --- a/yb-voyager/cmd/analyzeSchema.go +++ b/yb-voyager/cmd/analyzeSchema.go @@ -27,18 +27,22 @@ import ( "strings" "text/template" - pg_query "github.com/pganalyze/pg_query_go/v5" + pg_query "github.com/pganalyze/pg_query_go/v6" "github.com/samber/lo" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "golang.org/x/exp/slices" "github.com/yugabyte/yb-voyager/yb-voyager/src/callhome" + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" "github.com/yugabyte/yb-voyager/yb-voyager/src/cp" "github.com/yugabyte/yb-voyager/yb-voyager/src/metadb" + "github.com/yugabyte/yb-voyager/yb-voyager/src/query/queryissue" + "github.com/yugabyte/yb-voyager/yb-voyager/src/query/queryparser" "github.com/yugabyte/yb-voyager/yb-voyager/src/srcdb" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" + "github.com/yugabyte/yb-voyager/yb-voyager/src/ybversion" ) type summaryInfo struct { @@ -126,31 +130,10 @@ var ( schemaAnalysisReport utils.SchemaReport partitionTablesMap = make(map[string]bool) // key is partitioned table, value is sqlInfo (sqlstmt, fpath) where the ADD PRIMARY KEY statement resides - primaryConsInAlter = make(map[string]*sqlInfo) - summaryMap = make(map[string]*summaryInfo) - multiRegex = regexp.MustCompile(`([a-zA-Z0-9_\.]+[,|;])`) - dollarQuoteRegex = regexp.MustCompile(`(\$.*\$)`) - /* - this will contain the information in this format: - public.table1 -> { - column1: citext | jsonb | inet | tsquery | tsvector | array - ... - } - schema2.table2 -> { - column3: citext | jsonb | inet | tsquery | tsvector | array - ... - } - Here only those columns on tables are stored which have unsupported type for Index in YB - */ - columnsWithUnsupportedIndexDatatypes = make(map[string]map[string]string) - /* - list of composite types with fully qualified typename in the exported schema - */ - compositeTypes = make([]string, 0) - /* - list of enum types with fully qualified typename in the exported schema - */ - enumTypes = make([]string, 0) + summaryMap = make(map[string]*summaryInfo) + parserIssueDetector = queryissue.NewParserIssueDetector() + multiRegex = regexp.MustCompile(`([a-zA-Z0-9_\.]+[,|;])`) + dollarQuoteRegex = regexp.MustCompile(`(\$.*\$)`) //TODO: optional but replace every possible space or new line char with [\s\n]+ in all regexs viewWithCheckRegex = re("VIEW", capture(ident), anything, "WITH", opt(commonClause), "CHECK", "OPTION") rangeRegex = re("PRECEDING", "and", anything, ":float") @@ -168,9 +151,7 @@ var ( idxConcRegex = re("REINDEX", anything, capture(ident)) likeAllRegex = re("CREATE", "TABLE", ifNotExists, capture(ident), anything, "LIKE", anything, "INCLUDING ALL") likeRegex = re("CREATE", "TABLE", ifNotExists, capture(ident), anything, `\(LIKE`) - inheritRegex = re("CREATE", opt(capture(unqualifiedIdent)), "TABLE", ifNotExists, capture(ident), anything, "INHERITS", "[ |(]") withOidsRegex = re("CREATE", "TABLE", ifNotExists, capture(ident), anything, "WITH", anything, "OIDS") - intvlRegex = re("CREATE", "TABLE", ifNotExists, capture(ident)+`\(`, anything, "interval", "PRIMARY") anydataRegex = re("CREATE", "TABLE", ifNotExists, capture(ident), anything, "AnyData", anything) anydatasetRegex = re("CREATE", "TABLE", ifNotExists, capture(ident), anything, "AnyDataSet", anything) anyTypeRegex = re("CREATE", "TABLE", ifNotExists, capture(ident), anything, "AnyType", anything) @@ -214,41 +195,34 @@ var ( ) const ( - CONVERSION_ISSUE_REASON = "CREATE CONVERSION is not supported yet" - GIN_INDEX_MULTI_COLUMN_ISSUE_REASON = "Schema contains gin index on multi column which is not supported." - ADDING_PK_TO_PARTITIONED_TABLE_ISSUE_REASON = "Adding primary key to a partitioned table is not supported yet." - INHERITANCE_ISSUE_REASON = "TABLE INHERITANCE not supported in YugabyteDB" - CONSTRAINT_TRIGGER_ISSUE_REASON = "CONSTRAINT TRIGGER not supported yet." - REFERENCING_CLAUSE_FOR_TRIGGERS = "REFERENCING clause (transition tables) not supported yet." - BEFORE_FOR_EACH_ROW_TRIGGERS_ON_PARTITIONED_TABLE = "Partitioned tables cannot have BEFORE / FOR EACH ROW triggers." - COMPOUND_TRIGGER_ISSUE_REASON = "COMPOUND TRIGGER not supported in YugabyteDB." - - STORED_GENERATED_COLUMN_ISSUE_REASON = "Stored generated columns are not supported." - UNSUPPORTED_EXTENSION_ISSUE = "This extension is not supported in YugabyteDB by default." - EXCLUSION_CONSTRAINT_ISSUE = "Exclusion constraint is not supported yet" - ALTER_TABLE_DISABLE_RULE_ISSUE = "ALTER TABLE name DISABLE RULE not supported yet" - STORAGE_PARAMETERS_DDL_STMT_ISSUE = "Storage parameters are not supported yet." - ALTER_TABLE_SET_ATTRIBUTE_ISSUE = "ALTER TABLE .. ALTER COLUMN .. SET ( attribute = value ) not supported yet" - FOREIGN_TABLE_ISSUE_REASON = "Foreign tables require manual intervention." - ALTER_TABLE_CLUSTER_ON_ISSUE = "ALTER TABLE CLUSTER not supported yet." - DEFERRABLE_CONSTRAINT_ISSUE = "DEFERRABLE constraints not supported yet" - POLICY_ROLE_ISSUE = "Policy require roles to be created." - VIEW_CHECK_OPTION_ISSUE = "Schema containing VIEW WITH CHECK OPTION is not supported yet." - ISSUE_INDEX_WITH_COMPLEX_DATATYPES = `INDEX on column '%s' not yet supported` - ISSUE_UNLOGGED_TABLE = "UNLOGGED tables are not supported yet." + // Issues detected using regexp, reported in assessment and analyze both + CONVERSION_ISSUE_REASON = "CREATE CONVERSION is not supported yet" + UNSUPPORTED_EXTENSION_ISSUE = "This extension is not supported in YugabyteDB by default." + VIEW_CHECK_OPTION_ISSUE = "Schema containing VIEW WITH CHECK OPTION is not supported yet." + + // Refactor: constants below used in some comparisions (use Issue Type there and remove these) + INHERITANCE_ISSUE_REASON = "TABLE INHERITANCE not supported in YugabyteDB" + ADDING_PK_TO_PARTITIONED_TABLE_ISSUE_REASON = "Adding primary key to a partitioned table is not supported yet." + COMPOUND_TRIGGER_ISSUE_REASON = "COMPOUND TRIGGER not supported in YugabyteDB." + STORED_GENERATED_COLUMN_ISSUE_REASON = "Stored generated columns are not supported." + FOREIGN_TABLE_ISSUE_REASON = "Foreign tables require manual intervention." + DEFERRABLE_CONSTRAINT_ISSUE = "DEFERRABLE constraints not supported yet" + POLICY_ROLE_ISSUE = "Policy require roles to be created." + ISSUE_INDEX_WITH_COMPLEX_DATATYPES = `INDEX on column '%s' not yet supported` + INDEX_METHOD_ISSUE_REASON = "Schema contains %s index which is not supported." + UNSUPPORTED_DATATYPE = "Unsupported datatype" UNSUPPORTED_DATATYPE_LIVE_MIGRATION = "Unsupported datatype for Live migration" UNSUPPORTED_DATATYPE_LIVE_MIGRATION_WITH_FF_FB = "Unsupported datatype for Live migration with fall-forward/fallback" UNSUPPORTED_PG_SYNTAX = "Unsupported PG syntax" - INDEX_METHOD_ISSUE_REASON = "Schema contains %s index which is not supported." INSUFFICIENT_COLUMNS_IN_PK_FOR_PARTITION = "insufficient columns in the PRIMARY KEY constraint definition in CREATE TABLE" GIN_INDEX_DETAILS = "There are some GIN indexes present in the schema, but GIN indexes are partially supported in YugabyteDB as mentioned in (https://github.com/yugabyte/yugabyte-db/issues/7850) so take a look and modify them if not supported." ) // Reports one case in JSON -func reportCase(filePath string, reason string, ghIssue string, suggestion string, objType string, objName string, sqlStmt string, issueType string, docsLink string) { - var issue utils.Issue +func reportCase(filePath string, reason string, ghIssue string, suggestion string, objType string, objName string, sqlStmt string, category string, docsLink string, impact string) { + var issue utils.AnalyzeSchemaIssue issue.FilePath = filePath issue.Reason = reason issue.GH = ghIssue @@ -256,7 +230,8 @@ func reportCase(filePath string, reason string, ghIssue string, suggestion strin issue.ObjectType = objType issue.ObjectName = objName issue.SqlStatement = sqlStmt - issue.IssueType = issueType + issue.IssueType = category // IssueType field of analyze schema should be renamed to Category + issue.Impact = lo.Ternary(impact != "", impact, constants.IMPACT_LEVEL_1) if sourceDBType == POSTGRESQL { issue.DocsLink = docsLink } @@ -266,13 +241,13 @@ func reportCase(filePath string, reason string, ghIssue string, suggestion strin func reportBasedOnComment(comment int, fpath string, issue string, suggestion string, objName string, objType string, line string) { if comment == 1 { - reportCase(fpath, "Unsupported, please edit to match PostgreSQL syntax", "https://github.com/yugabyte/yb-voyager/issues/1625", suggestion, objType, objName, line, UNSUPPORTED_FEATURES, "") + reportCase(fpath, "Unsupported, please edit to match PostgreSQL syntax", "https://github.com/yugabyte/yb-voyager/issues/1625", suggestion, objType, objName, line, UNSUPPORTED_FEATURES_CATEGORY, "", "") summaryMap[objType].invalidCount[objName] = true } else if comment == 2 { - // reportCase(fpath, "PACKAGE in oracle are exported as Schema, please review and edit to match PostgreSQL syntax if required, Package is "+objName, issue, suggestion, objType) + // reportCase(fpath, "PACKAGE in oracle are exported as Schema, please review and edit to match PostgreSQL syntax if required, Package is "+objName, issue, suggestion, objType, "") summaryMap["PACKAGE"].objSet = append(summaryMap["PACKAGE"].objSet, objName) } else if comment == 3 { - reportCase(fpath, "SQLs in file might be unsupported please review and edit to match PostgreSQL syntax if required. ", "https://github.com/yugabyte/yb-voyager/issues/1625", suggestion, objType, objName, line, UNSUPPORTED_FEATURES, "") + reportCase(fpath, "SQLs in file might be unsupported please review and edit to match PostgreSQL syntax if required. ", "https://github.com/yugabyte/yb-voyager/issues/1625", suggestion, objType, objName, line, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if comment == 4 { summaryMap[objType].details["Inherited Types are present which are not supported in PostgreSQL syntax, so exported as Inherited Tables"] = true } @@ -341,1025 +316,41 @@ func addSummaryDetailsForIndexes() { } } -func checkForeignTable(sqlInfoArr []sqlInfo, fpath string) { - for _, sqlStmtInfo := range sqlInfoArr { - parseTree, err := pg_query.Parse(sqlStmtInfo.stmt) - if err != nil { - utils.ErrExit("failed to parse the stmt %v: %v", sqlStmtInfo.stmt, err) - } - createForeignTableNode, isForeignTable := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CreateForeignTableStmt) - if isForeignTable { - baseStmt := createForeignTableNode.CreateForeignTableStmt.BaseStmt - relation := baseStmt.Relation - schemaName := relation.Schemaname - tableName := relation.Relname - serverName := createForeignTableNode.CreateForeignTableStmt.Servername - summaryMap["FOREIGN TABLE"].invalidCount[sqlStmtInfo.objName] = true - objName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - reportCase(fpath, FOREIGN_TABLE_ISSUE_REASON, "https://github.com/yugabyte/yb-voyager/issues/1627", - fmt.Sprintf("SERVER '%s', and USER MAPPING should be created manually on the target to create and use the foreign table", serverName), "FOREIGN TABLE", objName, sqlStmtInfo.stmt, MIGRATION_CAVEATS, FOREIGN_TABLE_DOC_LINK) - reportUnsupportedDatatypes(relation, baseStmt.TableElts, sqlStmtInfo, fpath, "FOREIGN TABLE") - } - } -} - func checkStmtsUsingParser(sqlInfoArr []sqlInfo, fpath string, objType string) { for _, sqlStmtInfo := range sqlInfoArr { - parseTree, err := pg_query.Parse(sqlStmtInfo.stmt) + _, err := queryparser.Parse(sqlStmtInfo.stmt) if err != nil { //if the Stmt is not already report by any of the regexes if !summaryMap[objType].invalidCount[sqlStmtInfo.objName] { reason := fmt.Sprintf("%s - '%s'", UNSUPPORTED_PG_SYNTAX, err.Error()) reportCase(fpath, reason, "https://github.com/yugabyte/yb-voyager/issues/1625", - "Fix the schema as per PG syntax", objType, sqlStmtInfo.objName, sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "Fix the schema as per PG syntax", objType, sqlStmtInfo.objName, sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } continue } - createTableNode, isCreateTable := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CreateStmt) - alterTableNode, isAlterTable := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_AlterTableStmt) - createIndexNode, isCreateIndex := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_IndexStmt) - createPolicyNode, isCreatePolicy := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CreatePolicyStmt) - createCompositeTypeNode, isCreateCompositeType := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CompositeTypeStmt) - createEnumTypeNode, isCreateEnumType := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CreateEnumStmt) - createTriggerNode, isCreateTrigger := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CreateTrigStmt) - - if objType == TABLE && isCreateTable { - reportPartitionsRelatedIssues(createTableNode, sqlStmtInfo, fpath) - reportGeneratedStoredColumnTables(createTableNode, sqlStmtInfo, fpath) - reportExclusionConstraintCreateTable(createTableNode, sqlStmtInfo, fpath) - reportDeferrableConstraintCreateTable(createTableNode, sqlStmtInfo, fpath) - reportUnsupportedDatatypes(createTableNode.CreateStmt.Relation, createTableNode.CreateStmt.TableElts, sqlStmtInfo, fpath, objType) - parseColumnsWithUnsupportedIndexDatatypes(createTableNode) - reportUnloggedTable(createTableNode, sqlStmtInfo, fpath) - } - if isAlterTable { - reportAlterAddPKOnPartition(alterTableNode, sqlStmtInfo, fpath) - reportAlterTableVariants(alterTableNode, sqlStmtInfo, fpath, objType) - reportExclusionConstraintAlterTable(alterTableNode, sqlStmtInfo, fpath) - reportDeferrableConstraintAlterTable(alterTableNode, sqlStmtInfo, fpath) - } - if isCreateIndex { - reportIndexMethods(createIndexNode, sqlStmtInfo, fpath) - reportCreateIndexStorageParameter(createIndexNode, sqlStmtInfo, fpath) - reportUnsupportedIndexesOnComplexDatatypes(createIndexNode, sqlStmtInfo, fpath) - checkGinVariations(createIndexNode, sqlStmtInfo, fpath) - } - - if isCreatePolicy { - reportPolicyRequireRolesOrGrants(createPolicyNode, sqlStmtInfo, fpath) - } - - if isCreateTrigger { - reportUnsupportedTriggers(createTriggerNode, sqlStmtInfo, fpath) - } - - if isCreateCompositeType { - //Adding the composite types (UDTs) in the list - /* - e.g. CREATE TYPE non_public."Address_type" AS ( - street VARCHAR(100), - city VARCHAR(50), - state VARCHAR(50), - zip_code VARCHAR(10) - ); - stmt:{composite_type_stmt:{typevar:{schemaname:"non_public" relname:"Address_type" relpersistence:"p" location:14} coldeflist:{column_def:{colname:"street" - type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"varchar"}} typmods:{a_const:{ival:{ival:100} location:65}} typemod:-1 location:57} ... - - Here the type name is required which is available in typevar->relname typevar->schemaname for qualified name - */ - typeName := createCompositeTypeNode.CompositeTypeStmt.Typevar.GetRelname() - typeSchemaName := createCompositeTypeNode.CompositeTypeStmt.Typevar.GetSchemaname() - fullTypeName := lo.Ternary(typeSchemaName != "", typeSchemaName+"."+typeName, typeName) - compositeTypes = append(compositeTypes, fullTypeName) - } - if isCreateEnumType { - //Adding the composite types (UDTs) in the list - /* - e.g. CREATE TYPE decline_reason AS ENUM ( - 'duplicate_payment_method', - 'server_failure' - ); - stmt:{create_enum_stmt:{type_name:{string:{sval:"decline_reason"}} vals:{string:{sval:"duplicate_payment_method"}} vals:{string:{sval:"server_failure"}}}} - stmt_len:101} - - Here the type name is required which is available in typevar->relname typevar->schemaname for qualified name - */ - typeNames := createEnumTypeNode.CreateEnumStmt.GetTypeName() - typeName, typeSchemaName := getTypeNameAndSchema(typeNames) - fullTypeName := lo.Ternary(typeSchemaName != "", typeSchemaName+"."+typeName, typeName) - enumTypes = append(enumTypes, fullTypeName) - } - } -} - -func reportUnsupportedTriggers(createTriggerNode *pg_query.Node_CreateTrigStmt, sqlStmtInfo sqlInfo, fpath string) { - schemaName := createTriggerNode.CreateTrigStmt.Relation.Schemaname - tableName := createTriggerNode.CreateTrigStmt.Relation.Relname - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - trigName := createTriggerNode.CreateTrigStmt.Trigname - displayObjectName := fmt.Sprintf("%s ON %s", trigName, fullyQualifiedName) - - /* - e.g.CREATE CONSTRAINT TRIGGER some_trig - AFTER DELETE ON xyz_schema.abc - DEFERRABLE INITIALLY DEFERRED - FOR EACH ROW EXECUTE PROCEDURE xyz_schema.some_trig(); - create_trig_stmt:{isconstraint:true trigname:"some_trig" relation:{schemaname:"xyz_schema" relname:"abc" inh:true relpersistence:"p" - location:56} funcname:{string:{sval:"xyz_schema"}} funcname:{string:{sval:"some_trig"}} row:true events:8 deferrable:true initdeferred:true}} - stmt_len:160} - */ - if createTriggerNode.CreateTrigStmt.Isconstraint { - reportCase(fpath, CONSTRAINT_TRIGGER_ISSUE_REASON, - "https://github.com/YugaByte/yugabyte-db/issues/1709", "", "TRIGGER", displayObjectName, - sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, CONSTRAINT_TRIGGER_DOC_LINK) - } - - /* - e.g. CREATE TRIGGER projects_loose_fk_trigger - AFTER DELETE ON public.projects - REFERENCING OLD TABLE AS old_table - FOR EACH STATEMENT EXECUTE FUNCTION xyz_schema.some_trig(); - stmt:{create_trig_stmt:{trigname:"projects_loose_fk_trigger" relation:{schemaname:"public" relname:"projects" inh:true - relpersistence:"p" location:58} funcname:{string:{sval:"xyz_schema"}} funcname:{string:{sval:"some_trig"}} events:8 - transition_rels:{trigger_transition:{name:"old_table" is_table:true}}}} stmt_len:167} - */ - if createTriggerNode.CreateTrigStmt.GetTransitionRels() != nil { - summaryMap["TRIGGER"].invalidCount[displayObjectName] = true - reportCase(fpath, REFERENCING_CLAUSE_FOR_TRIGGERS, - "https://github.com/YugaByte/yugabyte-db/issues/1668", "", "TRIGGER", displayObjectName, sqlStmtInfo.formattedStmt, - UNSUPPORTED_FEATURES, REFERENCING_CLAUSE_TRIGGER_DOC_LINK) - } - - /* - e.g.CREATE TRIGGER after_insert_or_delete_trigger - BEFORE INSERT OR DELETE ON main_table - FOR EACH ROW - EXECUTE FUNCTION handle_insert_or_delete(); - stmt:{create_trig_stmt:{trigname:"after_insert_or_delete_trigger" relation:{relname:"main_table" inh:true relpersistence:"p" - location:111} funcname:{string:{sval:"handle_insert_or_delete"}} row:true timing:2 events:12}} stmt_len:177} - - here, - timing - bits of BEFORE/AFTER/INSTEAD - events - bits of "OR" INSERT/UPDATE/DELETE/TRUNCATE - row - FOR EACH ROW (true), FOR EACH STATEMENT (false) - refer - https://github.com/pganalyze/pg_query_go/blob/c3a818d346a927c18469460bb18acb397f4f4301/parser/include/postgres/catalog/pg_trigger_d.h#L49 - TRIGGER_TYPE_BEFORE (1 << 1) - TRIGGER_TYPE_INSERT (1 << 2) - TRIGGER_TYPE_DELETE (1 << 3) - TRIGGER_TYPE_UPDATE (1 << 4) - TRIGGER_TYPE_TRUNCATE (1 << 5) - TRIGGER_TYPE_INSTEAD (1 << 6) - */ - - timing := createTriggerNode.CreateTrigStmt.Timing - isSecondBitSet := timing&(1<<1) != 0 - if isSecondBitSet && createTriggerNode.CreateTrigStmt.Row { - // BEFORE clause will have the bits in timing as 1<<1 - // BEFORE and FOR EACH ROW on partitioned table is not supported in PG<=12 - if partitionTablesMap[fullyQualifiedName] { - summaryMap["TRIGGER"].invalidCount[displayObjectName] = true - reportCase(fpath, BEFORE_FOR_EACH_ROW_TRIGGERS_ON_PARTITIONED_TABLE, - "https://github.com/yugabyte/yugabyte-db/issues/24830", "Create the triggers on individual partitions.", "TRIGGER", displayObjectName, sqlStmtInfo.formattedStmt, - UNSUPPORTED_FEATURES, BEFORE_ROW_TRIGGER_PARTITIONED_TABLE_DOC_LINK) - } - } - -} - -func reportAlterAddPKOnPartition(alterTableNode *pg_query.Node_AlterTableStmt, sqlStmtInfo sqlInfo, fpath string) { - schemaName := alterTableNode.AlterTableStmt.Relation.Schemaname - tableName := alterTableNode.AlterTableStmt.Relation.Relname - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - - alterCmd := alterTableNode.AlterTableStmt.Cmds[0].GetAlterTableCmd() - /* - e.g. - ALTER TABLE example2 - ADD CONSTRAINT example2_pkey PRIMARY KEY (id); - tmts:{stmt:{alter_table_stmt:{relation:{relname:"example2" inh:true relpersistence:"p" location:693} - cmds:{alter_table_cmd:{subtype:AT_AddConstraint def:{constraint:{contype:CONSTR_PRIMARY conname:"example2_pkey" - location:710 keys:{string:{sval:"id"}}}} behavior:DROP_RESTRICT}} objtype:OBJECT_TABLE}} stmt_location:679 stmt_len:72} - - */ - - constraint := alterCmd.GetDef().GetConstraint() - - if constraint != nil && constraint.Contype == pg_query.ConstrType_CONSTR_PRIMARY { - if partitionTablesMap[fullyQualifiedName] { - reportCase(fpath, ADDING_PK_TO_PARTITIONED_TABLE_ISSUE_REASON, - "https://github.com/yugabyte/yugabyte-db/issues/10074", "", "TABLE", fullyQualifiedName, sqlStmtInfo.formattedStmt, MIGRATION_CAVEATS, ADDING_PK_TO_PARTITIONED_TABLE_DOC_LINK) - } else { - primaryConsInAlter[fullyQualifiedName] = &sqlStmtInfo - } - } -} - -/* -This functions reports multiple issues - -1. Adding PK to Partitioned Table (in cases where ALTER is before create) -2. Expression partitions are not allowed if PK/UNIQUE columns are there is table -3. List partition strategy is not allowed with multi-column partitions. -4. Partition columns should all be included in Primary key set if any on table. -*/ -func reportPartitionsRelatedIssues(createTableNode *pg_query.Node_CreateStmt, sqlStmtInfo sqlInfo, fpath string) { - schemaName := createTableNode.CreateStmt.Relation.Schemaname - tableName := createTableNode.CreateStmt.Relation.Relname - columns := createTableNode.CreateStmt.TableElts - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - - /* - e.g. In case if PRIMARY KEY is included in column definition - CREATE TABLE example2 ( - id numeric NOT NULL PRIMARY KEY, - country_code varchar(3), - record_type varchar(5) - ) PARTITION BY RANGE (country_code, record_type) ; - stmts:{stmt:{create_stmt:{relation:{relname:"example2" inh:true relpersistence:"p" location:193} table_elts:{column_def:{colname:"id" - type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"numeric"}} typemod:-1 location:208} is_local:true - constraints:{constraint:{contype:CONSTR_NOTNULL location:216}} constraints:{constraint:{contype:CONSTR_PRIMARY location:225}} - location:205}} ... partspec:{strategy:PARTITION_STRATEGY_RANGE - part_params:{partition_elem:{name:"country_code" location:310}} part_params:{partition_elem:{name:"record_type" location:324}} - location:290} oncommit:ONCOMMIT_NOOP}} stmt_location:178 stmt_len:159} - - In case if PRIMARY KEY in column list CREATE TABLE example1 (..., PRIMARY KEY(id,country_code) ) PARTITION BY RANGE (country_code, record_type); - stmts:{stmt:{create_stmt:{relation:{relname:"example1" inh:true relpersistence:"p" location:15} table_elts:{column_def:{colname:"id" - type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"numeric"}} ... table_elts:{constraint:{contype:CONSTR_PRIMARY - location:98 keys:{string:{sval:"id"}} keys:{string:{sval:"country_code"}}}} partspec:{strategy:PARTITION_STRATEGY_RANGE - part_params:{partition_elem:{name:"country_code" location:150}} part_params:{partition_elem:{name:"record_type" ... - */ - if createTableNode.CreateStmt.GetPartspec() == nil { - //If not partition table then no need to proceed - return - } - - if primaryConsInAlter[fullyQualifiedName] != nil { - //reporting the ALTER TABLE ADD PK on partition table here in case the order is different if ALTER is before the CREATE - alterTableSqlInfo := primaryConsInAlter[fullyQualifiedName] - reportCase(alterTableSqlInfo.fileName, ADDING_PK_TO_PARTITIONED_TABLE_ISSUE_REASON, - "https://github.com/yugabyte/yugabyte-db/issues/10074", "", "TABLE", fullyQualifiedName, alterTableSqlInfo.formattedStmt, MIGRATION_CAVEATS, ADDING_PK_TO_PARTITIONED_TABLE_DOC_LINK) - } - - partitionTablesMap[fullyQualifiedName] = true // marking the partition tables in the map - - var primaryKeyColumns, partitionColumns, uniqueKeyColumns []string - - for _, column := range columns { - if column.GetColumnDef() != nil { //In case PRIMARY KEY constraint is added with column definition - constraints := column.GetColumnDef().Constraints - for _, constraint := range constraints { - if constraint.GetConstraint().Contype == pg_query.ConstrType_CONSTR_PRIMARY { - primaryKeyColumns = []string{column.GetColumnDef().Colname} - } - if constraint.GetConstraint().Contype == pg_query.ConstrType_CONSTR_UNIQUE { - uniqueKeyColumns = append(uniqueKeyColumns, column.GetColumnDef().Colname) - } - } - } else if column.GetConstraint() != nil { - //In case CREATE DDL has PRIMARY KEY(column_name) - it will be included in columns but won't have columnDef as its a constraint - for _, key := range column.GetConstraint().GetKeys() { - if column.GetConstraint().Contype == pg_query.ConstrType_CONSTR_PRIMARY { - primaryKeyColumns = append(primaryKeyColumns, key.GetString_().Sval) - } else if column.GetConstraint().Contype == pg_query.ConstrType_CONSTR_UNIQUE { - uniqueKeyColumns = append(uniqueKeyColumns, key.GetString_().Sval) - } - } - } - } - - partitionElements := createTableNode.CreateStmt.GetPartspec().GetPartParams() - - for _, partElem := range partitionElements { - if partElem.GetPartitionElem().GetExpr() != nil { - //Expression partitions - if len(primaryKeyColumns) > 0 || len(uniqueKeyColumns) > 0 { - summaryMap["TABLE"].invalidCount[fullyQualifiedName] = true - reportCase(fpath, "Issue with Partition using Expression on a table which cannot contain Primary Key / Unique Key on any column", - "https://github.com/yugabyte/yb-voyager/issues/698", "Remove the Constriant from the table definition", "TABLE", fullyQualifiedName, sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, EXPRESSION_PARTIITON_DOC_LINK) - } - } else { - partitionColumns = append(partitionColumns, partElem.GetPartitionElem().GetName()) - } - } - - if len(partitionColumns) > 1 && createTableNode.CreateStmt.GetPartspec().GetStrategy() == pg_query.PartitionStrategy_PARTITION_STRATEGY_LIST { - summaryMap["TABLE"].invalidCount[fullyQualifiedName] = true - reportCase(fpath, `cannot use "list" partition strategy with more than one column`, - "https://github.com/yugabyte/yb-voyager/issues/699", "Make it a single column partition by list or choose other supported Partitioning methods", "TABLE", fullyQualifiedName, sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, LIST_PARTIION_MULTI_COLUMN_DOC_LINK) - } - - if len(primaryKeyColumns) == 0 { // no need to report in case of non-PK tables - return - } - - partitionColumnsNotInPK, _ := lo.Difference(partitionColumns, primaryKeyColumns) - if len(partitionColumnsNotInPK) > 0 { - summaryMap["TABLE"].invalidCount[fullyQualifiedName] = true - reportCase(fpath, fmt.Sprintf("%s - (%s)", INSUFFICIENT_COLUMNS_IN_PK_FOR_PARTITION, strings.Join(partitionColumnsNotInPK, ", ")), - "https://github.com/yugabyte/yb-voyager/issues/578", "Add all Partition columns to Primary Key", "TABLE", fullyQualifiedName, sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, PARTITION_KEY_NOT_PK_DOC_LINK) - } - -} - -// Reference for some of the types https://docs.yugabyte.com/stable/api/ysql/datatypes/ (datatypes with type 1) -var UnsupportedIndexDatatypes = []string{ - "citext", - "tsvector", - "tsquery", - "jsonb", - "inet", - "json", - "macaddr", - "macaddr8", - "cidr", - "bit", // for BIT (n) - "varbit", // for BIT varying (n) - //Below ones are not supported on PG as well with atleast btree access method. Better to have in our list though - //Need to understand if there is other method or way available in PG to have these index key [TODO] - "circle", - "box", - "line", - "lseg", - "point", - "pg_lsn", - "path", - "polygon", - "txid_snapshot", - // array as well but no need to add it in the list as fetching this type is a different way TODO: handle better with specific types -} - -func parseColumnsWithUnsupportedIndexDatatypes(createTableNode *pg_query.Node_CreateStmt) { - schemaName := createTableNode.CreateStmt.Relation.Schemaname - tableName := createTableNode.CreateStmt.Relation.Relname - columns := createTableNode.CreateStmt.TableElts - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - for _, column := range columns { - /* - e.g. 1. CREATE TABLE public.citext_type ( - id integer, - lists_of_data text[], - data public.citext - ); - stmt:{create_stmt:{relation:{schemaname:"public" relname:"citext_type" inh:true relpersistence:"p" location:258} table_elts:{column_def:{colname:"id" - type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"int4"}} typemod:-1 location:287} is_local:true location:284}} table_elts: - {column_def:{colname:"lists_of_data" type_name:{names:{string:{sval:"text"}} typemod:-1 array_bounds:{integer:{ival:-1}} location:315} is_local:true - location:301}} table_elts:{column_def:{colname:"data" type_name:{names:{string:{sval:"public"}} names:{string:{sval:"citext"}} typemod:-1 location:333} - is_local:true location:328}} oncommit:ONCOMMIT_NOOP}} stmt_location:244 stmt_len:108 - - 2. CREATE TABLE public.ts_query_table ( - id int generated by default as identity, - query tsquery - ); - stmt:{create_stmt:{relation:{schemaname:"public" relname:"ts_query_table" inh:true relpersistence:"p" location:211} table_elts:{column_def:{colname:"id" - type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"int4"}} typemod:-1 location:242} is_local:true constraints:{constraint:{contype:CONSTR_IDENTITY - location:246 generated_when:"d"}} location:239}} table_elts:{column_def:{colname:"query" type_name:{names:{string:{sval:"tsquery"}} - typemod:-1 location:290} is_local:true location:284}} oncommit:ONCOMMIT_NOOP}} stmt_location:196 stmt_len:110 - - 3. create table combined_tbl ( - id int, c cidr, ci circle, b box, j json, - l line, ls lseg, maddr macaddr, maddr8 macaddr8, p point, - lsn pg_lsn, p1 path, p2 polygon, id1 txid_snapshot, - bitt bit (13), bittv bit varying(15), address non_public."Address_type" - ); - stmt:{create_stmt:{relation:{relname:"combined_tbl" ... colname:"id" type_name:...names:{string:{sval:"int4"}}... column_def:{colname:"c" type_name:{names:{string:{sval:"cidr"}} - ... column_def:{colname:"ci" type_name:{names:{string:{sval:"circle"}} ... column_def:{colname:"b"type_name:{names:{string:{sval:"box"}} ... column_def:{colname:"j" type_name:{names:{string:{sval:"json"}} - ... column_def:{colname:"l" type_name:{names:{string:{sval:"line"}} ...column_def:{colname:"ls" type_name:{names:{string:{sval:"lseg"}} ...column_def:{colname:"maddr" type_name:{names:{string:{sval:"macaddr"}} - ...column_def:{colname:"maddr8" type_name:{names:{string:{sval:"macaddr8"}}...column_def:{colname:"p" type_name:{names:{string:{sval:"point"}} ...column_def:{colname:"lsn" type_name:{names:{string:{sval:"pg_lsn"}} - ...column_def:{colname:"p1" type_name:{names:{string:{sval:"path"}} .... column_def:{colname:"p2" type_name:{names:{string:{sval:"polygon"}} .... column_def:{colname:"id1" type_name:{names:{string:{sval:"txid_snapshot"}} - ... column_def:{colname:"bitt" type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"bit"}} typmods:{a_const:{ival:{ival:13} location:241}} typemod:-1 location:236} is_local:true location:231}} - table_elts:{column_def:{colname:"bittv" type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"varbit"}} typmods:{a_const:{ival:{ival:15} location:264}} typemod:-1 location:252} ... column_def:{colname:"address" - type_name:{names:{string:{sval:"non_public"}} names:{string:{sval:"Address_type"}} is_local:true location:246}} oncommit:ONCOMMIT_NOOP}} stmt_location:51 stmt_len:217 - - - */ - if column.GetColumnDef() != nil { - typeNames := column.GetColumnDef().GetTypeName().GetNames() - typeName, typeSchemaName := getTypeNameAndSchema(typeNames) - fullTypeName := lo.Ternary(typeSchemaName != "", typeSchemaName+"."+typeName, typeName) - colName := column.GetColumnDef().GetColname() - if len(column.GetColumnDef().GetTypeName().GetArrayBounds()) > 0 { - //For Array types and storing the type as "array" as of now we can enhance the to have specific type e.g. INT4ARRAY - _, ok := columnsWithUnsupportedIndexDatatypes[fullyQualifiedName] - if !ok { - columnsWithUnsupportedIndexDatatypes[fullyQualifiedName] = make(map[string]string) - } - columnsWithUnsupportedIndexDatatypes[fullyQualifiedName][colName] = "array" - } else if slices.Contains(UnsupportedIndexDatatypes, typeName) || slices.Contains(compositeTypes, fullTypeName) { - _, ok := columnsWithUnsupportedIndexDatatypes[fullyQualifiedName] - if !ok { - columnsWithUnsupportedIndexDatatypes[fullyQualifiedName] = make(map[string]string) - } - columnsWithUnsupportedIndexDatatypes[fullyQualifiedName][colName] = typeName - if slices.Contains(compositeTypes, fullTypeName) { //For UDTs - columnsWithUnsupportedIndexDatatypes[fullyQualifiedName][colName] = "user_defined_type" - } - } - } - } -} - -func reportUnsupportedIndexesOnComplexDatatypes(createIndexNode *pg_query.Node_IndexStmt, sqlStmtInfo sqlInfo, fpath string) { - indexName := createIndexNode.IndexStmt.GetIdxname() - relName := createIndexNode.IndexStmt.GetRelation() - schemaName := relName.GetSchemaname() - tableName := relName.GetRelname() - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - displayObjName := fmt.Sprintf("%s ON %s", indexName, fullyQualifiedName) - /* - e.g. - 1. CREATE INDEX tsvector_idx ON public.documents (title_tsvector, id); - stmt:{index_stmt:{idxname:"tsvector_idx" relation:{schemaname:"public" relname:"documents" inh:true relpersistence:"p" location:510} access_method:"btree" - index_params:{index_elem:{name:"title_tsvector" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} index_params:{index_elem:{name:"id" - ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}}}} stmt_location:479 stmt_len:69 - - 2. CREATE INDEX idx_json ON public.test_json ((data::jsonb)); - stmt:{index_stmt:{idxname:"idx_json" relation:{schemaname:"public" relname:"test_json" inh:true relpersistence:"p" location:703} access_method:"btree" - index_params:{index_elem:{expr:{type_cast:{arg:{column_ref:{fields:{string:{sval:"data"}} location:722}} type_name:{names:{string:{sval:"jsonb"}} typemod:-1 - location:728} location:726}} ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}}}} stmt_location:676 stmt_len:59 - */ - if createIndexNode.IndexStmt.AccessMethod != "btree" { - return // Right now not reporting any other access method issues with such types. - } - _, ok := columnsWithUnsupportedIndexDatatypes[fullyQualifiedName] - if !ok { - return - } - for _, param := range createIndexNode.IndexStmt.GetIndexParams() { - /* - cases to cover - 1. normal index on column with these types - 2. expression index with casting of unsupported column to supported types [No handling as such just to test as colName will not be there] - 3. expression index with casting to unsupported types - 4. normal index on column with UDTs - 5. these type of indexes on different access method like gin etc.. [TODO to explore more, for now not reporting the indexes on anyother access method than btree] - */ - colName := param.GetIndexElem().GetName() - typeName, ok := columnsWithUnsupportedIndexDatatypes[fullyQualifiedName][colName] - if ok { - summaryMap["INDEX"].invalidCount[displayObjName] = true - reportCase(fpath, fmt.Sprintf(ISSUE_INDEX_WITH_COMPLEX_DATATYPES, typeName), "https://github.com/yugabyte/yugabyte-db/issues/9698", - "Refer to the docs link for the workaround", "INDEX", displayObjName, sqlStmtInfo.formattedStmt, - UNSUPPORTED_FEATURES, INDEX_ON_UNSUPPORTED_TYPE) - return - } - //For the expression index case to report in case casting to unsupported types #3 - typeNames := param.GetIndexElem().GetExpr().GetTypeCast().GetTypeName().GetNames() - castTypeName, castTypeSchemaName := getTypeNameAndSchema(typeNames) - fullCastTypeName := lo.Ternary(castTypeSchemaName != "", castTypeSchemaName+"."+castTypeName, castTypeName) - if len(param.GetIndexElem().GetExpr().GetTypeCast().GetTypeName().GetArrayBounds()) > 0 { - //In case casting is happening for an array type - summaryMap["INDEX"].invalidCount[displayObjName] = true - reportCase(fpath, fmt.Sprintf(ISSUE_INDEX_WITH_COMPLEX_DATATYPES, "array"), "https://github.com/yugabyte/yugabyte-db/issues/9698", - "Refer to the docs link for the workaround", "INDEX", displayObjName, sqlStmtInfo.formattedStmt, - UNSUPPORTED_FEATURES, INDEX_ON_UNSUPPORTED_TYPE) - return - } else if slices.Contains(UnsupportedIndexDatatypes, castTypeName) || slices.Contains(compositeTypes, fullCastTypeName) { - summaryMap["INDEX"].invalidCount[displayObjName] = true - reason := fmt.Sprintf(ISSUE_INDEX_WITH_COMPLEX_DATATYPES, castTypeName) - if slices.Contains(compositeTypes, fullCastTypeName) { - reason = fmt.Sprintf(ISSUE_INDEX_WITH_COMPLEX_DATATYPES, "user_defined_type") - } - reportCase(fpath, reason, "https://github.com/yugabyte/yugabyte-db/issues/9698", - "Refer to the docs link for the workaround", "INDEX", displayObjName, sqlStmtInfo.formattedStmt, - UNSUPPORTED_FEATURES, INDEX_ON_UNSUPPORTED_TYPE) - return - } - } -} - -var unsupportedIndexMethods = []string{ - "gist", - "brin", - "spgist", -} - -func reportIndexMethods(createIndexNode *pg_query.Node_IndexStmt, sqlStmtInfo sqlInfo, fpath string) { - indexMethod := createIndexNode.IndexStmt.AccessMethod - - if !slices.Contains(unsupportedIndexMethods, indexMethod) { - return - } - - indexName := createIndexNode.IndexStmt.GetIdxname() - relName := createIndexNode.IndexStmt.GetRelation() - schemaName := relName.GetSchemaname() - tableName := relName.GetRelname() - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - displayObjName := fmt.Sprintf("%s ON %s", indexName, fullyQualifiedName) - - summaryMap["INDEX"].invalidCount[displayObjName] = true - - reportCase(fpath, fmt.Sprintf(INDEX_METHOD_ISSUE_REASON, strings.ToUpper(indexMethod)), - "https://github.com/YugaByte/yugabyte-db/issues/1337", "", "INDEX", displayObjName, sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, UNSUPPORTED_INDEX_METHODS_DOC_LINK) -} - -func reportUnloggedTable(createTableNode *pg_query.Node_CreateStmt, sqlStmtInfo sqlInfo, fpath string) { - schemaName := createTableNode.CreateStmt.Relation.Schemaname - tableName := createTableNode.CreateStmt.Relation.Relname - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - /* - e.g CREATE UNLOGGED TABLE tbl_unlogged (id int, val text); - stmt:{create_stmt:{relation:{schemaname:"public" relname:"tbl_unlogged" inh:true relpersistence:"u" location:19} - table_elts:{column_def:{colname:"id" type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"int4"}} - typemod:-1 location:54} is_local:true location:51}} table_elts:{column_def:{colname:"val" type_name:{names:{string:{sval:"text"}} - typemod:-1 location:93} is_local:true location:89}} oncommit:ONCOMMIT_NOOP}} stmt_len:99 - here, relpersistence is the information about the persistence of this table where u-> unlogged, p->persistent, t->temporary tables - */ - if createTableNode.CreateStmt.Relation.GetRelpersistence() == "u" { - reportCase(fpath, ISSUE_UNLOGGED_TABLE, "https://github.com/yugabyte/yugabyte-db/issues/1129/", - "Remove UNLOGGED keyword to make it work", "TABLE", fullyQualifiedName, sqlStmtInfo.formattedStmt, - UNSUPPORTED_FEATURES, UNLOGGED_TABLE_DOC_LINK) - } -} - -// Checks Whether there is a GIN index -/* -Following type of SQL queries are being taken care of by this function - - 1. CREATE INDEX index_name ON table_name USING gin(column1, column2 ...) - 2. CREATE INDEX index_name ON table_name USING gin(column1 [ASC/DESC/HASH]) -*/ -func checkGinVariations(createIndexNode *pg_query.Node_IndexStmt, sqlStmtInfo sqlInfo, fpath string) { - indexName := createIndexNode.IndexStmt.GetIdxname() - relName := createIndexNode.IndexStmt.GetRelation() - schemaName := relName.GetSchemaname() - tableName := relName.GetRelname() - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - displayObjectName := fmt.Sprintf("%s ON %s", indexName, fullyQualifiedName) - if createIndexNode.IndexStmt.GetAccessMethod() != "gin" { // its always in lower - return - } else { - summaryMap["INDEX"].details[GIN_INDEX_DETAILS] = true - } - /* - e.g. CREATE INDEX idx_name ON public.test USING gin (data, data2); - stmt:{index_stmt:{idxname:"idx_name" relation:{schemaname:"public" relname:"test" inh:true relpersistence:"p" - location:125} access_method:"gin" index_params:{index_elem:{name:"data" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} - index_params:{index_elem:{name:"data2" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}}}} stmt_location:81 stmt_len:81 - */ - if len(createIndexNode.IndexStmt.GetIndexParams()) > 1 { - summaryMap["INDEX"].invalidCount[displayObjectName] = true - reportCase(fpath, "Schema contains gin index on multi column which is not supported.", - "https://github.com/yugabyte/yugabyte-db/issues/10652", "", "INDEX", displayObjectName, - sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, GIN_INDEX_MULTI_COLUMN_DOC_LINK) - return - } - /* - e.g. CREATE INDEX idx_name ON public.test USING gin (data DESC); - stmt:{index_stmt:{idxname:"idx_name" relation:{schemaname:"public" relname:"test" inh:true relpersistence:"p" location:44} - access_method:"gin" index_params:{index_elem:{name:"data" ordering:SORTBY_DESC nulls_ordering:SORTBY_NULLS_DEFAULT}}}} stmt_len:80 - */ - idxParam := createIndexNode.IndexStmt.GetIndexParams()[0] // taking only the first as already checking len > 1 above so should be fine - if idxParam.GetIndexElem().GetOrdering() != pg_query.SortByDir_SORTBY_DEFAULT { - summaryMap["INDEX"].invalidCount[displayObjectName] = true - reportCase(fpath, "Schema contains gin index on column with ASC/DESC/HASH Clause which is not supported.", - "https://github.com/yugabyte/yugabyte-db/issues/10653", "", "INDEX", displayObjectName, - sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, GIN_INDEX_DIFFERENT_ISSUE_DOC_LINK) - } - -} - -func reportPolicyRequireRolesOrGrants(createPolicyNode *pg_query.Node_CreatePolicyStmt, sqlStmtInfo sqlInfo, fpath string) { - policyName := createPolicyNode.CreatePolicyStmt.GetPolicyName() - roles := createPolicyNode.CreatePolicyStmt.GetRoles() - relname := createPolicyNode.CreatePolicyStmt.GetTable() - schemaName := relname.Schemaname - tableName := relname.Relname - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - roleNames := make([]string, 0) - /* - e.g. CREATE POLICY P ON tbl1 TO regress_rls_eve, regress_rls_frank USING (true); - stmt:{create_policy_stmt:{policy_name:"p" table:{relname:"tbl1" inh:true relpersistence:"p" location:20} cmd_name:"all" - permissive:true roles:{role_spec:{roletype:ROLESPEC_CSTRING rolename:"regress_rls_eve" location:28}} roles:{role_spec: - {roletype:ROLESPEC_CSTRING rolename:"regress_rls_frank" location:45}} qual:{a_const:{boolval:{boolval:true} location:70}}}} - stmt_len:75 - - here role_spec of each roles is managing the roles related information in a POLICY DDL if any, so we can just check if there is - a role name available in it which means there is a role associated with this DDL. Hence report it. - - */ - for _, role := range roles { - roleName := role.GetRoleSpec().GetRolename() // only in case there is role associated with a policy it will error out in schema migration - if roleName != "" { - //this means there is some role or grants used in this Policy, so detecting it - roleNames = append(roleNames, roleName) + err = parserIssueDetector.ParseRequiredDDLs(sqlStmtInfo.formattedStmt) + if err != nil { + utils.ErrExit("error parsing stmt: [%s]: %v", sqlStmtInfo.formattedStmt, err) } - } - if len(roleNames) > 0 { - policyNameWithTable := fmt.Sprintf("%s ON %s", policyName, fullyQualifiedName) - summaryMap["POLICY"].invalidCount[policyNameWithTable] = true - reportCase(fpath, fmt.Sprintf("%s Users - (%s)", POLICY_ROLE_ISSUE, strings.Join(roleNames, ",")), "https://github.com/yugabyte/yb-voyager/issues/1655", - "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", - "POLICY", policyNameWithTable, sqlStmtInfo.formattedStmt, MIGRATION_CAVEATS, POLICY_DOC_LINK) - } -} - -func reportUnsupportedDatatypes(relation *pg_query.RangeVar, columns []*pg_query.Node, sqlStmtInfo sqlInfo, fpath string, objectType string) { - schemaName := relation.Schemaname - tableName := relation.Relname - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - for _, column := range columns { - /* - e.g. CREATE TABLE test_xml_type(id int, data xml); - relation:{relname:"test_xml_type" inh:true relpersistence:"p" location:15} table_elts:{column_def:{colname:"id" - type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"int4"}} typemod:-1 location:32} - is_local:true location:29}} table_elts:{column_def:{colname:"data" type_name:{names:{string:{sval:"xml"}} - typemod:-1 location:42} is_local:true location:37}} oncommit:ONCOMMIT_NOOP}} - - here checking the type of each column as type definition can be a list names for types which are native e.g. int - it has type names - [pg_catalog, int4] both to determine but for complex types like text,json or xml etc. if doesn't have - info about pg_catalog. so checking the 0th only in case XML/XID to determine the type and report - */ - if column.GetColumnDef() != nil { - typeNames := column.GetColumnDef().GetTypeName().GetNames() - typeName, typeSchemaName := getTypeNameAndSchema(typeNames) - fullTypeName := lo.Ternary(typeSchemaName != "", typeSchemaName+"."+typeName, typeName) - isArrayType := len(column.GetColumnDef().GetTypeName().GetArrayBounds()) > 0 - colName := column.GetColumnDef().GetColname() - - liveUnsupportedDatatypes := srcdb.GetPGLiveMigrationUnsupportedDatatypes() - liveWithFfOrFbUnsupportedDatatypes := srcdb.GetPGLiveMigrationWithFFOrFBUnsupportedDatatypes() - - isUnsupportedDatatype := utils.ContainsAnyStringFromSlice(srcdb.PostgresUnsupportedDataTypes, typeName) - isUnsupportedDatatypeInLive := utils.ContainsAnyStringFromSlice(liveUnsupportedDatatypes, typeName) - - isUnsupportedDatatypeInLiveWithFFOrFBList := utils.ContainsAnyStringFromSlice(liveWithFfOrFbUnsupportedDatatypes, typeName) - isUDTDatatype := utils.ContainsAnyStringFromSlice(compositeTypes, fullTypeName) //if type is array - isEnumDatatype := utils.ContainsAnyStringFromSlice(enumTypes, fullTypeName) //is ENUM type - isArrayOfEnumsDatatype := isArrayType && isEnumDatatype - isUnsupportedDatatypeInLiveWithFFOrFB := isUnsupportedDatatypeInLiveWithFFOrFBList || isUDTDatatype || isArrayOfEnumsDatatype - - if isUnsupportedDatatype { - reason := fmt.Sprintf("%s - %s on column - %s", UNSUPPORTED_DATATYPE, typeName, colName) - summaryMap[objectType].invalidCount[sqlStmtInfo.objName] = true - var ghIssue, suggestion, docLink string - - switch typeName { - case "xml": - ghIssue = "https://github.com/yugabyte/yugabyte-db/issues/1043" - suggestion = "Data ingestion is not supported for this type in YugabyteDB so handle this type in different way. Refer link for more details." - docLink = XML_DATATYPE_DOC_LINK - case "xid": - ghIssue = "https://github.com/yugabyte/yugabyte-db/issues/15638" - suggestion = "Functions for this type e.g. txid_current are not supported in YugabyteDB yet" - docLink = XID_DATATYPE_DOC_LINK - case "geometry", "geography", "box2d", "box3d", "topogeometry": - ghIssue = "https://github.com/yugabyte/yugabyte-db/issues/11323" - suggestion = "" - docLink = UNSUPPORTED_DATATYPES_DOC_LINK - default: - ghIssue = "https://github.com/yugabyte/yb-voyager/issues/1731" - suggestion = "" - docLink = UNSUPPORTED_DATATYPES_DOC_LINK - } - reportCase(fpath, reason, ghIssue, suggestion, - objectType, fullyQualifiedName, sqlStmtInfo.formattedStmt, UNSUPPORTED_DATATYPES, docLink) - } else if objectType == TABLE && isUnsupportedDatatypeInLive { - //reporting only for TABLE Type as we don't deal with FOREIGN TABLE in live migration - reason := fmt.Sprintf("%s - %s on column - %s", UNSUPPORTED_DATATYPE_LIVE_MIGRATION, typeName, colName) - summaryMap[objectType].invalidCount[sqlStmtInfo.objName] = true - reportCase(fpath, reason, "https://github.com/yugabyte/yb-voyager/issues/1731", "", - objectType, fullyQualifiedName, sqlStmtInfo.formattedStmt, MIGRATION_CAVEATS, UNSUPPORTED_DATATYPE_LIVE_MIGRATION_DOC_LINK) - } else if objectType == TABLE && isUnsupportedDatatypeInLiveWithFFOrFB { - //reporting only for TABLE Type as we don't deal with FOREIGN TABLE in live migration - reportTypeName := fullTypeName - if isArrayType { // For Array cases to make it clear in issue - reportTypeName = fmt.Sprintf("%s[]", reportTypeName) - } - //reporting types in the list YugabyteUnsupportedDataTypesForDbzm, UDT columns as unsupported with live migration with ff/fb - reason := fmt.Sprintf("%s - %s on column - %s", UNSUPPORTED_DATATYPE_LIVE_MIGRATION_WITH_FF_FB, reportTypeName, colName) - summaryMap[objectType].invalidCount[sqlStmtInfo.objName] = true - reportCase(fpath, reason, "https://github.com/yugabyte/yb-voyager/issues/1731", "", - objectType, fullyQualifiedName, sqlStmtInfo.formattedStmt, MIGRATION_CAVEATS, UNSUPPORTED_DATATYPE_LIVE_MIGRATION_DOC_LINK) - } + if parserIssueDetector.IsGinIndexPresentInSchema { + summaryMap["INDEX"].details[GIN_INDEX_DETAILS] = true } - } -} -func getTypeNameAndSchema(typeNames []*pg_query.Node) (string, string) { - typeName := "" - typeSchemaName := "" - if len(typeNames) > 0 { - typeName = typeNames[len(typeNames)-1].GetString_().Sval // type name can be qualified / unqualifed or native / non-native proper type name will always be available at last index - } - if len(typeNames) >= 2 { // Names list will have all the parts of qualified type name - typeSchemaName = typeNames[len(typeNames)-2].GetString_().Sval // // type name can be qualified / unqualifed or native / non-native proper schema name will always be available at last 2nd index - } - - return typeName, typeSchemaName -} - -var deferrableConstraintsList = []pg_query.ConstrType{ - pg_query.ConstrType_CONSTR_ATTR_DEFERRABLE, - pg_query.ConstrType_CONSTR_ATTR_DEFERRED, - pg_query.ConstrType_CONSTR_ATTR_IMMEDIATE, -} - -func reportDeferrableConstraintCreateTable(createTableNode *pg_query.Node_CreateStmt, sqlStmtInfo sqlInfo, fpath string) { - schemaName := createTableNode.CreateStmt.Relation.Schemaname - tableName := createTableNode.CreateStmt.Relation.Relname - columns := createTableNode.CreateStmt.TableElts - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - - for _, column := range columns { - /* - e.g. create table unique_def_test(id int UNIQUE DEFERRABLE, c1 int); - create_stmt:{relation:{relname:"unique_def_test" inh:true relpersistence:"p" location:15} - table_elts:{column_def:{colname:"id" type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"int4"}} - typemod:-1 location:34} is_local:true constraints:{constraint:{contype:CONSTR_UNIQUE location:38}} - constraints:{constraint:{contype:CONSTR_ATTR_DEFERRABLE location:45}} location:31}} .... - - here checking the case where this clause is in column definition so iterating over each column_def and in that - constraint type has deferrable or not and also it should not be a foreign constraint as Deferrable on FKs are - supported. - */ - if column.GetColumnDef() != nil { - constraints := column.GetColumnDef().GetConstraints() - colName := column.GetColumnDef().GetColname() - if constraints != nil { - isDeferrable := false - var deferrableConstraintType pg_query.ConstrType - for idx, constraint := range constraints { - if slices.Contains(deferrableConstraintsList, constraint.GetConstraint().Contype) { - //Getting the constraint type before the DEFERRABLE clause as the clause is applicable to that constraint - if idx > 0 { - deferrableConstraintType = constraints[idx-1].GetConstraint().Contype - } - isDeferrable = true - } - } - if isDeferrable && deferrableConstraintType != pg_query.ConstrType_CONSTR_FOREIGN { - summaryMap["TABLE"].invalidCount[sqlStmtInfo.objName] = true - generatedConName := generateConstraintName(deferrableConstraintType, tableName, []string{colName}) - specifiedConstraintName := column.GetConstraint().GetConname() - conName := lo.Ternary(specifiedConstraintName == "", generatedConName, specifiedConstraintName) - reportCase(fpath, DEFERRABLE_CONSTRAINT_ISSUE, "https://github.com/yugabyte/yugabyte-db/issues/1709", - "Remove these constraints from the exported schema and make the necessary changes to the application before pointing it to target", - "TABLE", fmt.Sprintf("%s, constraint: (%s)", fullyQualifiedName, conName), sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, DEFERRABLE_CONSTRAINT_DOC_LINK) - } - } - } else if column.GetConstraint() != nil { - /* - e.g. create table uniquen_def_test1(id int, c1 int, UNIQUE(id) DEFERRABLE INITIALLY DEFERRED); - {create_stmt:{relation:{relname:"unique_def_test1" inh:true relpersistence:"p" location:80} table_elts:{column_def:{colname:"id" - type_name:{.... names:{string:{sval:"int4"}} typemod:-1 location:108} is_local:true location:105}} - table_elts:{constraint:{contype:CONSTR_UNIQUE deferrable:true initdeferred:true location:113 keys:{string:{sval:"id"}}}} .. - - here checking the case where this constraint is at the at the end as a constraint only, so checking deferrable field in constraint - in case of its not a FK. - */ - colNames := getColumnNames(column.GetConstraint().GetKeys()) - if column.GetConstraint().Deferrable && column.GetConstraint().Contype != pg_query.ConstrType_CONSTR_FOREIGN { - generatedConName := generateConstraintName(column.GetConstraint().Contype, tableName, colNames) - specifiedConstraintName := column.GetConstraint().GetConname() - conName := lo.Ternary(specifiedConstraintName == "", generatedConName, specifiedConstraintName) - summaryMap["TABLE"].invalidCount[sqlStmtInfo.objName] = true - reportCase(fpath, DEFERRABLE_CONSTRAINT_ISSUE, "https://github.com/yugabyte/yugabyte-db/issues/1709", - "Remove these constraints from the exported schema and make the neccessary changes to the application to work on target seamlessly", - "TABLE", fmt.Sprintf("%s, constraint: (%s)", fullyQualifiedName, conName), sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, DEFERRABLE_CONSTRAINT_DOC_LINK) - } + ddlIssues, err := parserIssueDetector.GetDDLIssues(sqlStmtInfo.formattedStmt, targetDbVersion) + if err != nil { + utils.ErrExit("error getting ddl issues for stmt: [%s]: %v", sqlStmtInfo.formattedStmt, err) } - } -} - -func generateConstraintName(conType pg_query.ConstrType, tableName string, columns []string) string { - suffix := "" - //Deferrable is only applicable to following constraint - //https://www.postgresql.org/docs/current/sql-createtable.html#:~:text=Currently%2C%20only%20UNIQUE%2C%20PRIMARY%20KEY%2C%20EXCLUDE%2C%20and%20REFERENCES - switch conType { - case pg_query.ConstrType_CONSTR_UNIQUE: - suffix = "_key" - case pg_query.ConstrType_CONSTR_PRIMARY: - suffix = "_pkey" - case pg_query.ConstrType_CONSTR_EXCLUSION: - suffix = "_excl" - case pg_query.ConstrType_CONSTR_FOREIGN: - suffix = "_fkey" - } - - return fmt.Sprintf("%s_%s%s", tableName, strings.Join(columns, "_"), suffix) -} - -func getColumnNames(keys []*pg_query.Node) []string { - var res []string - for _, k := range keys { - res = append(res, k.GetString_().Sval) - } - return res -} - -func reportDeferrableConstraintAlterTable(alterTableNode *pg_query.Node_AlterTableStmt, sqlStmtInfo sqlInfo, fpath string) { - schemaName := alterTableNode.AlterTableStmt.Relation.Schemaname - tableName := alterTableNode.AlterTableStmt.Relation.Relname - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - - alterCmd := alterTableNode.AlterTableStmt.Cmds[0].GetAlterTableCmd() - /* - e.g. ALTER TABLE ONLY public.users ADD CONSTRAINT users_email_key UNIQUE (email) DEFERRABLE; - alter_table_cmd:{subtype:AT_AddConstraint def:{constraint:{contype:CONSTR_UNIQUE conname:"users_email_key" - deferrable:true location:196 keys:{string:{sval:"email"}}}} behavior:DROP_RESTRICT}} objtype:OBJECT_TABLE}} - - similar to CREATE table 2nd case where constraint is at the end of column definitions mentioning the constraint only - so here as well while adding constraint checking the type of constraint and the deferrable field of it. - */ - constraint := alterCmd.GetDef().GetConstraint() - if constraint != nil && constraint.Deferrable && constraint.Contype != pg_query.ConstrType_CONSTR_FOREIGN { - conName := constraint.Conname - summaryMap["TABLE"].invalidCount[fullyQualifiedName] = true - reportCase(fpath, DEFERRABLE_CONSTRAINT_ISSUE, "https://github.com/yugabyte/yugabyte-db/issues/1709", - "Remove these constraints from the exported schema and make the neccessary changes to the application to work on target seamlessly", - "TABLE", fmt.Sprintf("%s, constraint: (%s)", fullyQualifiedName, conName), sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, DEFERRABLE_CONSTRAINT_DOC_LINK) - } -} - -func reportExclusionConstraintCreateTable(createTableNode *pg_query.Node_CreateStmt, sqlStmtInfo sqlInfo, fpath string) { - - schemaName := createTableNode.CreateStmt.Relation.Schemaname - tableName := createTableNode.CreateStmt.Relation.Relname - columns := createTableNode.CreateStmt.TableElts - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - /* - e.g. CREATE TABLE "Test"( - id int, - room_id int, - time_range tsrange, - room_id1 int, - time_range1 tsrange - EXCLUDE USING gist (room_id WITH =, time_range WITH &&), - EXCLUDE USING gist (room_id1 WITH =, time_range1 WITH &&) - ); - create_stmt:{relation:{relname:"Test" inh:true relpersistence:"p" location:14} table_elts:...table_elts:{constraint:{contype:CONSTR_EXCLUSION - location:226 exclusions:{list:{items:{index_elem:{name:"room_id" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} - items:{list:{items:{string:{sval:"="}}}}}} exclusions:{list:{items:{index_elem:{name:"time_range" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} - items:{list:{items:{string:{sval:"&&"}}}}}} access_method:"gist"}} table_elts:{constraint:{contype:CONSTR_EXCLUSION location:282 exclusions:{list: - {items:{index_elem:{name:"room_id1" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} items:{list:{items:{string:{sval:"="}}}}}} - exclusions:{list:{items:{index_elem:{name:"time_range1" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} items:{list:{items:{string:{sval:"&&"}}}}}} - access_method:"gist"}} oncommit:ONCOMMIT_NOOP}} stmt_len:365} - here we are iterating over all the table_elts - table elements and which are comma separated column info in - the DDL so each column has column_def(column definition) in the parse tree but in case it is a constraint, the column_def - is nil. - - */ - for _, column := range columns { - //In case CREATE DDL has EXCLUDE USING gist(room_id '=', time_range WITH &&) - it will be included in columns but won't have columnDef as its a constraint - if column.GetColumnDef() == nil && column.GetConstraint() != nil { - if column.GetConstraint().Contype == pg_query.ConstrType_CONSTR_EXCLUSION { - colNames := getColumnNamesFromExclusions(column.GetConstraint().GetExclusions()) - generatedConName := generateConstraintName(column.GetConstraint().Contype, tableName, colNames) - specifiedConstraintName := column.GetConstraint().GetConname() - conName := lo.Ternary(specifiedConstraintName == "", generatedConName, specifiedConstraintName) - summaryMap["TABLE"].invalidCount[sqlStmtInfo.objName] = true - reportCase(fpath, EXCLUSION_CONSTRAINT_ISSUE, "https://github.com/yugabyte/yugabyte-db/issues/3944", - "Refer docs link for details on possible workaround", "TABLE", fmt.Sprintf("%s, constraint: (%s)", fullyQualifiedName, conName), sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, EXCLUSION_CONSTRAINT_DOC_LINK) - } + for _, i := range ddlIssues { + schemaAnalysisReport.Issues = append(schemaAnalysisReport.Issues, convertIssueInstanceToAnalyzeIssue(i, fpath, false)) } } } -func getColumnNamesFromExclusions(keys []*pg_query.Node) []string { - var res []string - for _, k := range keys { - //exclusions:{list:{items:{index_elem:{name:"room_id" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} - //items:{list:{items:{string:{sval:"="}}}}}} - res = append(res, k.GetList().GetItems()[0].GetIndexElem().Name) // every first element of items in exclusions will be col name - } - return res -} - -func reportCreateIndexStorageParameter(createIndexNode *pg_query.Node_IndexStmt, sqlStmtInfo sqlInfo, fpath string) { - indexName := createIndexNode.IndexStmt.GetIdxname() - relName := createIndexNode.IndexStmt.GetRelation() - schemaName := relName.GetSchemaname() - tableName := relName.GetRelname() - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - /* - e.g. CREATE INDEX idx on table_name(id) with (fillfactor='70'); - index_stmt:{idxname:"idx" relation:{relname:"table_name" inh:true relpersistence:"p" location:21} access_method:"btree" - index_params:{index_elem:{name:"id" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} - options:{def_elem:{defname:"fillfactor" arg:{string:{sval:"70"}} ... - here again similar to ALTER table Storage parameters options is the high level field in for WITH options. - */ - if len(createIndexNode.IndexStmt.GetOptions()) > 0 { - //YB doesn't support any storage parameters from PG yet refer - - //https://docs.yugabyte.com/preview/api/ysql/the-sql-language/statements/ddl_create_table/#storage-parameters-1 - summaryMap["INDEX"].invalidCount[fmt.Sprintf("%s ON %s", indexName, fullyQualifiedName)] = true - reportCase(fpath, STORAGE_PARAMETERS_DDL_STMT_ISSUE, "https://github.com/yugabyte/yugabyte-db/issues/23467", - "Remove the storage parameters from the DDL", "INDEX", indexName, sqlStmtInfo.stmt, UNSUPPORTED_FEATURES, STORAGE_PARAMETERS_DDL_STMT_DOC_LINK) - } -} - -func reportAlterTableVariants(alterTableNode *pg_query.Node_AlterTableStmt, sqlStmtInfo sqlInfo, fpath string, objType string) { - schemaName := alterTableNode.AlterTableStmt.Relation.Schemaname - tableName := alterTableNode.AlterTableStmt.Relation.Relname - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - // this will the list of items in the SET (attribute=value, ..) - /* - e.g. alter table test_1 alter column col1 set (attribute_option=value); - cmds:{alter_table_cmd:{subtype:AT_SetOptions name:"col1" def:{list:{items:{def_elem:{defname:"attribute_option" - arg:{type_name:{names:{string:{sval:"value"}} typemod:-1 location:263}} defaction:DEFELEM_UNSPEC location:246}}}}... - for set attribute issue we will the type of alter setting the options and in the 'def' definition field which has the - information of the type, we will check if there is any list which will only present in case there is syntax like (...) - */ - if alterTableNode.AlterTableStmt.Cmds[0].GetAlterTableCmd().GetSubtype() == pg_query.AlterTableType_AT_SetOptions && - len(alterTableNode.AlterTableStmt.Cmds[0].GetAlterTableCmd().GetDef().GetList().GetItems()) > 0 { - reportCase(fpath, ALTER_TABLE_SET_ATTRIBUTE_ISSUE, "https://github.com/yugabyte/yugabyte-db/issues/1124", - "Remove it from the exported schema", "TABLE", fullyQualifiedName, sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, UNSUPPORTED_ALTER_VARIANTS_DOC_LINK) - } - - /* - e.g. alter table test add constraint uk unique(id) with (fillfactor='70'); - alter_table_cmd:{subtype:AT_AddConstraint def:{constraint:{contype:CONSTR_UNIQUE conname:"asd" location:292 - keys:{string:{sval:"id"}} options:{def_elem:{defname:"fillfactor" arg:{string:{sval:"70"}}... - Similarly here we are trying to get the constraint if any and then get the options field which is WITH options - in this case only so checking that for this case. - */ - - if alterTableNode.AlterTableStmt.Cmds[0].GetAlterTableCmd().GetSubtype() == pg_query.AlterTableType_AT_AddConstraint && - len(alterTableNode.AlterTableStmt.Cmds[0].GetAlterTableCmd().GetDef().GetConstraint().GetOptions()) > 0 { - reportCase(fpath, STORAGE_PARAMETERS_DDL_STMT_ISSUE, "https://github.com/yugabyte/yugabyte-db/issues/23467", - "Remove the storage parameters from the DDL", "TABLE", fullyQualifiedName, sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, STORAGE_PARAMETERS_DDL_STMT_DOC_LINK) - } - - /* - e.g. ALTER TABLE example DISABLE example_rule; - cmds:{alter_table_cmd:{subtype:AT_DisableRule name:"example_rule" behavior:DROP_RESTRICT}} objtype:OBJECT_TABLE}} - checking the subType is sufficient in this case - */ - if alterTableNode.AlterTableStmt.Cmds[0].GetAlterTableCmd().GetSubtype() == pg_query.AlterTableType_AT_DisableRule { - ruleName := alterTableNode.AlterTableStmt.Cmds[0].GetAlterTableCmd().GetName() - reportCase(fpath, ALTER_TABLE_DISABLE_RULE_ISSUE, "https://github.com/yugabyte/yugabyte-db/issues/1124", - fmt.Sprintf("Remove this and the rule '%s' from the exported schema to be not enabled on the table.", ruleName), "TABLE", fullyQualifiedName, sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, UNSUPPORTED_ALTER_VARIANTS_DOC_LINK) - } - /* - e.g. ALTER TABLE example CLUSTER ON idx; - stmt:{alter_table_stmt:{relation:{relname:"example" inh:true relpersistence:"p" location:13} - cmds:{alter_table_cmd:{subtype:AT_ClusterOn name:"idx" behavior:DROP_RESTRICT}} objtype:OBJECT_TABLE}} stmt_len:32 - - */ - if alterTableNode.AlterTableStmt.Cmds[0].GetAlterTableCmd().GetSubtype() == pg_query.AlterTableType_AT_ClusterOn { - reportCase(fpath, ALTER_TABLE_CLUSTER_ON_ISSUE, - "https://github.com/YugaByte/yugabyte-db/issues/1124", "Remove it from the exported schema.", "TABLE", fullyQualifiedName, sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, UNSUPPORTED_ALTER_VARIANTS_DOC_LINK) - } - -} - -func reportExclusionConstraintAlterTable(alterTableNode *pg_query.Node_AlterTableStmt, sqlStmtInfo sqlInfo, fpath string) { - - schemaName := alterTableNode.AlterTableStmt.Relation.Schemaname - tableName := alterTableNode.AlterTableStmt.Relation.Relname - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - alterCmd := alterTableNode.AlterTableStmt.Cmds[0].GetAlterTableCmd() - /* - e.g. ALTER TABLE ONLY public.meeting ADD CONSTRAINT no_time_overlap EXCLUDE USING gist (room_id WITH =, time_range WITH &&); - cmds:{alter_table_cmd:{subtype:AT_AddConstraint def:{constraint:{contype:CONSTR_EXCLUSION conname:"no_time_overlap" location:41 - here again same checking the definition of the alter stmt if it has constraint and checking its type - */ - constraint := alterCmd.GetDef().GetConstraint() - if alterCmd.Subtype == pg_query.AlterTableType_AT_AddConstraint && constraint.Contype == pg_query.ConstrType_CONSTR_EXCLUSION { - // colNames := getColumnNamesFromExclusions(alterCmd.GetDef().GetConstraint().GetExclusions()) - conName := constraint.Conname - summaryMap["TABLE"].invalidCount[fullyQualifiedName] = true - reportCase(fpath, EXCLUSION_CONSTRAINT_ISSUE, "https://github.com/yugabyte/yugabyte-db/issues/3944", - "Refer docs link for details on possible workaround", "TABLE", fmt.Sprintf("%s, constraint: (%s)", fullyQualifiedName, conName), sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, EXCLUSION_CONSTRAINT_DOC_LINK) - } -} - -func reportGeneratedStoredColumnTables(createTableNode *pg_query.Node_CreateStmt, sqlStmtInfo sqlInfo, fpath string) { - schemaName := createTableNode.CreateStmt.Relation.Schemaname - tableName := createTableNode.CreateStmt.Relation.Relname - columns := createTableNode.CreateStmt.TableElts - var generatedColumns []string - for _, column := range columns { - //In case CREATE DDL has PRIMARY KEY(column_name) - it will be included in columns but won't have columnDef as its a constraint - if column.GetColumnDef() != nil { - constraints := column.GetColumnDef().Constraints - for _, constraint := range constraints { - if constraint.GetConstraint().Contype == pg_query.ConstrType_CONSTR_GENERATED { - generatedColumns = append(generatedColumns, column.GetColumnDef().Colname) - } - } - } - } - fullyQualifiedName := lo.Ternary(schemaName != "", schemaName+"."+tableName, tableName) - if len(generatedColumns) > 0 { - summaryMap["TABLE"].invalidCount[sqlStmtInfo.objName] = true - reportCase(fpath, STORED_GENERATED_COLUMN_ISSUE_REASON+fmt.Sprintf(" Generated Columns: (%s)", strings.Join(generatedColumns, ",")), - "https://github.com/yugabyte/yugabyte-db/issues/10695", - "Using Triggers to update the generated columns is one way to work around this issue, refer docs link for more details.", - TABLE, fullyQualifiedName, sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, GENERATED_STORED_COLUMN_DOC_LINK) - } -} - // Checks compatibility of views func checkViews(sqlInfoArr []sqlInfo, fpath string) { for _, sqlInfo := range sqlInfoArr { - /*if dropMatViewRegex.MatchString(sqlInfo.stmt) { - reportCase(fpath, "DROP MATERIALIZED VIEW not supported yet.",a - "https://github.com/YugaByte/yugabyte-db/issues/10102", "") - } else if view := matViewRegex.FindStringSubmatch(sqlInfo.stmt); view != nil { - reportCase(fpath, "Schema contains materialized view which is not supported. The view is: "+view[1], - "https://github.com/yugabyte/yugabyte-db/issues/10102", "") - } else */ if view := viewWithCheckRegex.FindStringSubmatch(sqlInfo.stmt); view != nil { summaryMap["VIEW"].invalidCount[sqlInfo.objName] = true reportCase(fpath, VIEW_CHECK_OPTION_ISSUE, "https://github.com/yugabyte/yugabyte-db/issues/22716", - "Use Trigger with INSTEAD OF clause on INSERT/UPDATE on view to get this functionality", "VIEW", view[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, VIEW_CHECK_OPTION_DOC_LINK) + "Use Trigger with INSTEAD OF clause on INSERT/UPDATE on view to get this functionality", "VIEW", view[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, VIEW_CHECK_OPTION_DOC_LINK, "") } } } @@ -1383,39 +374,47 @@ func checkSql(sqlInfoArr []sqlInfo, fpath string) { if rangeRegex.MatchString(sqlInfo.stmt) { reportCase(fpath, "RANGE with offset PRECEDING/FOLLOWING is not supported for column type numeric and offset type double precision", - "https://github.com/yugabyte/yugabyte-db/issues/10692", "", "TABLE", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/yugabyte/yugabyte-db/issues/10692", "", "TABLE", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true } else if stmt := fetchRegex.FindStringSubmatch(sqlInfo.stmt); stmt != nil { location := strings.ToUpper(stmt[1]) if slices.Contains(notSupportedFetchLocation, location) { summaryMap["PROCEDURE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "This FETCH clause might not be supported yet", "https://github.com/YugaByte/yugabyte-db/issues/6514", - "Please verify the DDL on your YugabyteDB version before proceeding", "CURSOR", sqlInfo.objName, sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "Please verify the DDL on your YugabyteDB version before proceeding", "CURSOR", sqlInfo.objName, sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } } else if stmt := alterAggRegex.FindStringSubmatch(sqlInfo.stmt); stmt != nil { + summaryMap["AGGREGATE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER AGGREGATE not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/2717", "", "AGGREGATE", stmt[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/2717", "", "AGGREGATE", stmt[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if dropCollRegex.MatchString(sqlInfo.stmt) { + summaryMap["COLLATION"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "DROP multiple objects not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/880", separateMultiObj("DROP COLLATION", sqlInfo.formattedStmt), "COLLATION", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/880", separateMultiObj("DROP COLLATION", sqlInfo.formattedStmt), "COLLATION", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if dropIdxRegex.MatchString(sqlInfo.stmt) { + summaryMap["INDEX"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "DROP multiple objects not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/880", separateMultiObj("DROP INDEX", sqlInfo.formattedStmt), "INDEX", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/880", separateMultiObj("DROP INDEX", sqlInfo.formattedStmt), "INDEX", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if dropViewRegex.MatchString(sqlInfo.stmt) { + summaryMap["VIEW"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "DROP multiple objects not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/880", separateMultiObj("DROP VIEW", sqlInfo.formattedStmt), "VIEW", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/880", separateMultiObj("DROP VIEW", sqlInfo.formattedStmt), "VIEW", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if dropSeqRegex.MatchString(sqlInfo.stmt) { + summaryMap["SEQUENCE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "DROP multiple objects not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/880", separateMultiObj("DROP SEQUENCE", sqlInfo.formattedStmt), "SEQUENCE", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/880", separateMultiObj("DROP SEQUENCE", sqlInfo.formattedStmt), "SEQUENCE", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if dropForeignRegex.MatchString(sqlInfo.stmt) { + summaryMap["FOREIGN TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "DROP multiple objects not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/880", separateMultiObj("DROP FOREIGN TABLE", sqlInfo.formattedStmt), "FOREIGN TABLE", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/880", separateMultiObj("DROP FOREIGN TABLE", sqlInfo.formattedStmt), "FOREIGN TABLE", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if idx := dropIdxConcurRegex.FindStringSubmatch(sqlInfo.stmt); idx != nil { + summaryMap["INDEX"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "DROP INDEX CONCURRENTLY not supported yet", - "https://github.com/yugabyte/yugabyte-db/issues/22717", "", "INDEX", idx[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/yugabyte/yugabyte-db/issues/22717", "", "INDEX", idx[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if currentOfRegex.MatchString(sqlInfo.stmt) { - reportCase(fpath, "WHERE CURRENT OF not supported yet", "https://github.com/YugaByte/yugabyte-db/issues/737", "", "CURSOR", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + reportCase(fpath, "WHERE CURRENT OF not supported yet", "https://github.com/YugaByte/yugabyte-db/issues/737", "", "CURSOR", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if bulkCollectRegex.MatchString(sqlInfo.stmt) { - reportCase(fpath, "BULK COLLECT keyword of oracle is not converted into PostgreSQL compatible syntax", "https://github.com/yugabyte/yb-voyager/issues/1539", "", "", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + reportCase(fpath, "BULK COLLECT keyword of oracle is not converted into PostgreSQL compatible syntax", "https://github.com/yugabyte/yb-voyager/issues/1539", "", "", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } } } @@ -1427,94 +426,106 @@ func checkDDL(sqlInfoArr []sqlInfo, fpath string, objType string) { if am := amRegex.FindStringSubmatch(sqlInfo.stmt); am != nil { summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "CREATE ACCESS METHOD is not supported.", - "https://github.com/yugabyte/yugabyte-db/issues/10693", "", "ACCESS METHOD", am[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/yugabyte/yugabyte-db/issues/10693", "", "ACCESS METHOD", am[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := idxConcRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "REINDEX is not supported.", - "https://github.com/yugabyte/yugabyte-db/issues/10267", "", "TABLE", tbl[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/yugabyte/yugabyte-db/issues/10267", "", "TABLE", tbl[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := likeAllRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "LIKE ALL is not supported yet.", - "https://github.com/yugabyte/yugabyte-db/issues/10697", "", "TABLE", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/yugabyte/yugabyte-db/issues/10697", "", "TABLE", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := likeRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "LIKE clause not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1129", "", "TABLE", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") - } else if tbl := inheritRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { - summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true - reportCase(fpath, INHERITANCE_ISSUE_REASON, - "https://github.com/YugaByte/yugabyte-db/issues/1129", "", "TABLE", tbl[4], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, INHERITANCE_DOC_LINK) + "https://github.com/YugaByte/yugabyte-db/issues/1129", "", "TABLE", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := withOidsRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "OIDs are not supported for user tables.", - "https://github.com/yugabyte/yugabyte-db/issues/10273", "", "TABLE", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") - } else if tbl := intvlRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { - summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true - reportCase(fpath, "PRIMARY KEY containing column of type 'INTERVAL' not yet supported.", - "https://github.com/YugaByte/yugabyte-db/issues/1397", "", "TABLE", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/yugabyte/yugabyte-db/issues/10273", "", "TABLE", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := alterOfRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLE OF not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := alterSchemaRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLE SET SCHEMA not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/3947", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/3947", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if createSchemaRegex.MatchString(sqlInfo.stmt) { + summaryMap["SCHEMA"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "CREATE SCHEMA with elements not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/10865", "", "SCHEMA", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/10865", "", "SCHEMA", "", sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := alterNotOfRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLE NOT OF not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := alterColumnStatsRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLE ALTER column SET STATISTICS not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := alterColumnStorageRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLE ALTER column SET STORAGE not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := alterColumnResetAttributesRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLE ALTER column RESET (attribute) not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := alterConstrRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLE ALTER CONSTRAINT not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := setOidsRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLE SET WITH OIDS not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[4], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[4], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := withoutClusterRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLE SET WITHOUT CLUSTER not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := alterSetRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLE SET not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := alterIdxRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER INDEX SET not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "INDEX", tbl[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "INDEX", tbl[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := alterResetRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLE RESET not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := alterOptionsRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLE not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if typ := dropAttrRegex.FindStringSubmatch(sqlInfo.stmt); typ != nil { + summaryMap["TYPE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TYPE DROP ATTRIBUTE not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1893", "", "TYPE", typ[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1893", "", "TYPE", typ[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if typ := alterTypeRegex.FindStringSubmatch(sqlInfo.stmt); typ != nil { + summaryMap["TYPE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TYPE not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1893", "", "TYPE", typ[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1893", "", "TYPE", typ[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := alterInhRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLE INHERIT not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := valConstrRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLE VALIDATE CONSTRAINT not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1124", "", "TABLE", tbl[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if spc := alterTblSpcRegex.FindStringSubmatch(sqlInfo.stmt); spc != nil { + summaryMap["TABLESPACE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER TABLESPACE not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1153", "", "TABLESPACE", spc[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1153", "", "TABLESPACE", spc[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if spc := alterViewRegex.FindStringSubmatch(sqlInfo.stmt); spc != nil { + summaryMap["VIEW"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "ALTER VIEW not supported yet.", - "https://github.com/YugaByte/yugabyte-db/issues/1131", "", "VIEW", spc[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/YugaByte/yugabyte-db/issues/1131", "", "VIEW", spc[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := cLangRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { reportCase(fpath, "LANGUAGE C not supported yet.", - "https://github.com/yugabyte/yb-voyager/issues/1540", "", "FUNCTION", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/yugabyte/yb-voyager/issues/1540", "", "FUNCTION", tbl[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") summaryMap["FUNCTION"].invalidCount[sqlInfo.objName] = true } else if strings.Contains(strings.ToLower(sqlInfo.stmt), "drop temporary table") { filePath := strings.Split(fpath, "/") @@ -1522,22 +533,22 @@ func checkDDL(sqlInfoArr []sqlInfo, fpath string, objType string) { objType := strings.ToUpper(strings.Split(fileName, ".")[0]) summaryMap[objType].invalidCount[sqlInfo.objName] = true reportCase(fpath, `temporary table is not a supported clause for drop`, - "https://github.com/yugabyte/yb-voyager/issues/705", `remove "temporary" and change it to "drop table"`, objType, sqlInfo.objName, sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, DROP_TEMP_TABLE_DOC_LINK) + "https://github.com/yugabyte/yb-voyager/issues/705", `remove "temporary" and change it to "drop table"`, objType, sqlInfo.objName, sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, DROP_TEMP_TABLE_DOC_LINK, "") } else if regMatch := anydataRegex.FindStringSubmatch(sqlInfo.stmt); regMatch != nil { summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true - reportCase(fpath, "AnyData datatype doesn't have a mapping in YugabyteDB", "https://github.com/yugabyte/yb-voyager/issues/1541", `Remove the column with AnyData datatype or change it to a relevant supported datatype`, "TABLE", regMatch[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + reportCase(fpath, "AnyData datatype doesn't have a mapping in YugabyteDB", "https://github.com/yugabyte/yb-voyager/issues/1541", `Remove the column with AnyData datatype or change it to a relevant supported datatype`, "TABLE", regMatch[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if regMatch := anydatasetRegex.FindStringSubmatch(sqlInfo.stmt); regMatch != nil { summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true - reportCase(fpath, "AnyDataSet datatype doesn't have a mapping in YugabyteDB", "https://github.com/yugabyte/yb-voyager/issues/1541", `Remove the column with AnyDataSet datatype or change it to a relevant supported datatype`, "TABLE", regMatch[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + reportCase(fpath, "AnyDataSet datatype doesn't have a mapping in YugabyteDB", "https://github.com/yugabyte/yb-voyager/issues/1541", `Remove the column with AnyDataSet datatype or change it to a relevant supported datatype`, "TABLE", regMatch[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if regMatch := anyTypeRegex.FindStringSubmatch(sqlInfo.stmt); regMatch != nil { summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true - reportCase(fpath, "AnyType datatype doesn't have a mapping in YugabyteDB", "https://github.com/yugabyte/yb-voyager/issues/1541", `Remove the column with AnyType datatype or change it to a relevant supported datatype`, "TABLE", regMatch[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + reportCase(fpath, "AnyType datatype doesn't have a mapping in YugabyteDB", "https://github.com/yugabyte/yb-voyager/issues/1541", `Remove the column with AnyType datatype or change it to a relevant supported datatype`, "TABLE", regMatch[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if regMatch := uriTypeRegex.FindStringSubmatch(sqlInfo.stmt); regMatch != nil { summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true - reportCase(fpath, "URIType datatype doesn't have a mapping in YugabyteDB", "https://github.com/yugabyte/yb-voyager/issues/1541", `Remove the column with URIType datatype or change it to a relevant supported datatype`, "TABLE", regMatch[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + reportCase(fpath, "URIType datatype doesn't have a mapping in YugabyteDB", "https://github.com/yugabyte/yb-voyager/issues/1541", `Remove the column with URIType datatype or change it to a relevant supported datatype`, "TABLE", regMatch[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if regMatch := jsonFuncRegex.FindStringSubmatch(sqlInfo.stmt); regMatch != nil { summaryMap[objType].invalidCount[sqlInfo.objName] = true - reportCase(fpath, "JSON_ARRAYAGG() function is not available in YugabyteDB", "https://github.com/yugabyte/yb-voyager/issues/1542", `Rename the function to YugabyteDB's equivalent JSON_AGG()`, objType, regMatch[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + reportCase(fpath, "JSON_ARRAYAGG() function is not available in YugabyteDB", "https://github.com/yugabyte/yb-voyager/issues/1542", `Rename the function to YugabyteDB's equivalent JSON_AGG()`, objType, regMatch[3], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } } @@ -1548,11 +559,13 @@ func checkForeign(sqlInfoArr []sqlInfo, fpath string) { for _, sqlInfo := range sqlInfoArr { //TODO: refactor it later to remove all the unneccessary regexes if tbl := primRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "Primary key constraints are not supported on foreign tables.", - "https://github.com/yugabyte/yugabyte-db/issues/10698", "", "TABLE", tbl[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/yugabyte/yugabyte-db/issues/10698", "", "TABLE", tbl[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } else if tbl := foreignKeyRegex.FindStringSubmatch(sqlInfo.stmt); tbl != nil { + summaryMap["TABLE"].invalidCount[sqlInfo.objName] = true reportCase(fpath, "Foreign key constraints are not supported on foreign tables.", - "https://github.com/yugabyte/yugabyte-db/issues/10699", "", "TABLE", tbl[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/yugabyte/yugabyte-db/issues/10699", "", "TABLE", tbl[1], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") } } } @@ -1562,7 +575,7 @@ func checkRemaining(sqlInfoArr []sqlInfo, fpath string) { for _, sqlInfo := range sqlInfoArr { if trig := compoundTrigRegex.FindStringSubmatch(sqlInfo.stmt); trig != nil { reportCase(fpath, COMPOUND_TRIGGER_ISSUE_REASON, - "https://github.com/yugabyte/yb-voyager/issues/1543", "", "TRIGGER", trig[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, "") + "https://github.com/yugabyte/yb-voyager/issues/1543", "", "TRIGGER", trig[2], sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, "", "") summaryMap["TRIGGER"].invalidCount[sqlInfo.objName] = true } } @@ -1580,6 +593,99 @@ func checker(sqlInfoArr []sqlInfo, fpath string, objType string) { checkForeign(sqlInfoArr, fpath) checkRemaining(sqlInfoArr, fpath) checkStmtsUsingParser(sqlInfoArr, fpath, objType) + if utils.GetEnvAsBool("REPORT_UNSUPPORTED_PLPGSQL_OBJECTS", true) { + checkPlPgSQLStmtsUsingParser(sqlInfoArr, fpath, objType) + } +} + +func checkPlPgSQLStmtsUsingParser(sqlInfoArr []sqlInfo, fpath string, objType string) { + for _, sqlInfoStmt := range sqlInfoArr { + issues, err := parserIssueDetector.GetAllPLPGSQLIssues(sqlInfoStmt.formattedStmt, targetDbVersion) + if err != nil { + log.Infof("error in getting the issues-%s: %v", sqlInfoStmt.formattedStmt, err) + continue + } + for _, issueInstance := range issues { + issue := convertIssueInstanceToAnalyzeIssue(issueInstance, fpath, true) + schemaAnalysisReport.Issues = append(schemaAnalysisReport.Issues, issue) + } + } + +} + +var MigrationCaveatsIssues = []string{ + ADDING_PK_TO_PARTITIONED_TABLE_ISSUE_REASON, + FOREIGN_TABLE_ISSUE_REASON, + POLICY_ROLE_ISSUE, + UNSUPPORTED_DATATYPE_LIVE_MIGRATION, + UNSUPPORTED_DATATYPE_LIVE_MIGRATION_WITH_FF_FB, +} + +func convertIssueInstanceToAnalyzeIssue(issueInstance queryissue.QueryIssue, fileName string, isPlPgSQLIssue bool) utils.AnalyzeSchemaIssue { + issueType := UNSUPPORTED_FEATURES_CATEGORY + switch true { + case isPlPgSQLIssue: + issueType = UNSUPPORTED_PLPGSQL_OBJECTS_CATEGORY + case slices.ContainsFunc(MigrationCaveatsIssues, func(i string) bool { + //Adding the MIGRATION_CAVEATS issueType of the utils.Issue for these issueInstances in MigrationCaveatsIssues + return strings.Contains(issueInstance.Name, i) + }): + issueType = MIGRATION_CAVEATS_CATEGORY + case strings.HasPrefix(issueInstance.Name, UNSUPPORTED_DATATYPE): + //Adding the UNSUPPORTED_DATATYPES issueType of the utils.Issue for these issues whose TypeName starts with "Unsupported datatype ..." + issueType = UNSUPPORTED_DATATYPES_CATEGORY + } + + var constraintIssues = []string{ + queryissue.EXCLUSION_CONSTRAINTS, + queryissue.DEFERRABLE_CONSTRAINTS, + queryissue.PK_UK_ON_COMPLEX_DATATYPE, + queryissue.FOREIGN_KEY_REFERENCES_PARTITIONED_TABLE, + } + /* + TODO: + // unsupportedIndexIssue + // ObjectType = INDEX + // ObjectName = idx_name ON table_name + // invalidCount.Type = INDEX + // invalidCount.Name = ObjectName (because this is fully qualified) + // DisplayName = ObjectName + + // deferrableConstraintIssue + // ObjectType = TABLE + // ObjectName = table_name + // invalidCount.Type = TABLE + // invalidCount.Name = ObjectName + // DisplayName = table_name (constraint_name) (!= ObjectName) + + // Solutions + // 1. Define a issue.ObjectDisplayName + // 2. Keep it in issue.Details and write logic in UI layer to construct display name. + */ + displayObjectName := issueInstance.ObjectName + + constraintName, ok := issueInstance.Details[queryissue.CONSTRAINT_NAME] + if slices.Contains(constraintIssues, issueInstance.Type) && ok { + //In case of constraint issues we add constraint name to the object name as well + displayObjectName = fmt.Sprintf("%s, constraint: (%s)", issueInstance.ObjectName, constraintName) + } + + summaryMap[issueInstance.ObjectType].invalidCount[issueInstance.ObjectName] = true + + return utils.AnalyzeSchemaIssue{ + IssueType: issueType, + ObjectType: issueInstance.ObjectType, + ObjectName: displayObjectName, + Reason: issueInstance.Name, + Type: issueInstance.Type, + Impact: issueInstance.Impact, + SqlStatement: issueInstance.SqlStatement, + DocsLink: issueInstance.DocsLink, + FilePath: fileName, + Suggestion: issueInstance.Suggestion, + GH: issueInstance.GH, + MinimumVersionsFixedIn: issueInstance.MinimumVersionsFixedIn, + } } func checkExtensions(sqlInfoArr []sqlInfo, fpath string) { @@ -1587,7 +693,7 @@ func checkExtensions(sqlInfoArr []sqlInfo, fpath string) { if sqlInfo.objName != "" && !slices.Contains(supportedExtensionsOnYB, sqlInfo.objName) { summaryMap["EXTENSION"].invalidCount[sqlInfo.objName] = true reportCase(fpath, UNSUPPORTED_EXTENSION_ISSUE+" Refer to the docs link for the more information on supported extensions.", "https://github.com/yugabyte/yb-voyager/issues/1538", "", "EXTENSION", - sqlInfo.objName, sqlInfo.formattedStmt, UNSUPPORTED_FEATURES, EXTENSION_DOC_LINK) + sqlInfo.objName, sqlInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, EXTENSION_DOC_LINK, constants.IMPACT_LEVEL_3) } if strings.ToLower(sqlInfo.objName) == "hll" { summaryMap["EXTENSION"].details[`'hll' extension is supported in YugabyteDB v2.18 onwards. Please verify this extension as per the target YugabyteDB version.`] = true @@ -1705,7 +811,7 @@ func getObjectNameWithTable(stmt string, regexObjName string) string { parsedTree, err := pg_query.Parse(stmt) if err != nil { // in case it is not able to parse stmt as its not in PG syntax so returning the regex name - log.Errorf("Erroring parsing the the stmt %s - %v", stmt, err) + log.Errorf("Error parsing the the stmt %s - %v", stmt, err) return regexObjName } var objectName *sqlname.ObjectName @@ -1746,7 +852,7 @@ func parseSqlFileForObjectType(path string, objType string) []sqlInfo { reportNextSql := 0 file, err := os.ReadFile(path) if err != nil { - utils.ErrExit("Error while reading %q: %s", path, err) + utils.ErrExit("Error while reading file: %q: %s", path, err) } lines := strings.Split(string(file), "\n") @@ -1933,11 +1039,12 @@ var funcMap = template.FuncMap{ } return total }, - "split": split, + "split": split, + "getSupportedVersionString": getSupportedVersionString, } // add info to the 'reportStruct' variable and return -func analyzeSchemaInternal(sourceDBConf *srcdb.Source) utils.SchemaReport { +func analyzeSchemaInternal(sourceDBConf *srcdb.Source, detectIssues bool) utils.SchemaReport { /* NOTE: Don't create local var with name 'schemaAnalysisReport' since global one is used across all the internal functions called by analyzeSchemaInternal() @@ -1946,6 +1053,7 @@ func analyzeSchemaInternal(sourceDBConf *srcdb.Source) utils.SchemaReport { schemaAnalysisReport = utils.SchemaReport{} sourceObjList = utils.GetSchemaObjectList(sourceDBConf.DBType) initializeSummaryMap() + for _, objType := range sourceObjList { var sqlInfoArr []sqlInfo filePath := utils.GetObjectFilePath(schemaDir, objType) @@ -1958,22 +1066,31 @@ func analyzeSchemaInternal(sourceDBConf *srcdb.Source) utils.SchemaReport { otherFPaths = utils.GetObjectFilePath(schemaDir, "FTS_INDEX") sqlInfoArr = append(sqlInfoArr, parseSqlFileForObjectType(otherFPaths, "FTS_INDEX")...) } - if objType == "EXTENSION" { - checkExtensions(sqlInfoArr, filePath) - } - if objType == "FOREIGN TABLE" { - checkForeignTable(sqlInfoArr, filePath) - } - checker(sqlInfoArr, filePath, objType) + if detectIssues { + if objType == "EXTENSION" { + checkExtensions(sqlInfoArr, filePath) + } + checker(sqlInfoArr, filePath, objType) - if objType == "CONVERSION" { - checkConversions(sqlInfoArr, filePath) + if objType == "CONVERSION" { + checkConversions(sqlInfoArr, filePath) + } + + // Ideally all filtering of issues should happen in queryissue pkg layer, + // but until we move all issue detection logic to queryissue pkg, we will filter issues here as well. + schemaAnalysisReport.Issues = lo.Filter(schemaAnalysisReport.Issues, func(i utils.AnalyzeSchemaIssue, index int) bool { + fixed, err := i.IsFixedIn(targetDbVersion) + if err != nil { + utils.ErrExit("checking if issue %v is supported: %v", i, err) + } + return !fixed + }) } } schemaAnalysisReport.SchemaSummary = reportSchemaSummary(sourceDBConf) schemaAnalysisReport.VoyagerVersion = utils.YB_VOYAGER_VERSION - schemaAnalysisReport.MigrationComplexity = getMigrationComplexity(sourceDBConf.DBType, schemaDir, schemaAnalysisReport) + schemaAnalysisReport.TargetDBVersion = targetDbVersion return schemaAnalysisReport } @@ -1981,7 +1098,7 @@ func checkConversions(sqlInfoArr []sqlInfo, filePath string) { for _, sqlStmtInfo := range sqlInfoArr { parseTree, err := pg_query.Parse(sqlStmtInfo.stmt) if err != nil { - utils.ErrExit("failed to parse the stmt %v: %v", sqlStmtInfo.stmt, err) + utils.ErrExit("failed to parse the stmt: %v: %v", sqlStmtInfo.stmt, err) } createConvNode, ok := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CreateConversionStmt) @@ -1995,12 +1112,12 @@ func checkConversions(sqlInfoArr []sqlInfo, filePath string) { convName = fmt.Sprintf("%s.%s", convName, nameList[1].GetString_().Sval) } reportCase(filePath, CONVERSION_ISSUE_REASON, "https://github.com/yugabyte/yugabyte-db/issues/10866", - "Remove it from the exported schema", "CONVERSION", convName, sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, CREATE_CONVERSION_DOC_LINK) + "Remove it from the exported schema", "CONVERSION", convName, sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, CREATE_CONVERSION_DOC_LINK, constants.IMPACT_LEVEL_3) } else { //pg_query doesn't seem to have a Node type of AlterConversionStmt so using regex for now if stmt := alterConvRegex.FindStringSubmatch(sqlStmtInfo.stmt); stmt != nil { reportCase(filePath, "ALTER CONVERSION is not supported yet", "https://github.com/YugaByte/yugabyte-db/issues/10866", - "Remove it from the exported schema", "CONVERSION", stmt[1], sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES, CREATE_CONVERSION_DOC_LINK) + "Remove it from the exported schema", "CONVERSION", stmt[1], sqlStmtInfo.formattedStmt, UNSUPPORTED_FEATURES_CATEGORY, CREATE_CONVERSION_DOC_LINK, "") } } @@ -2008,11 +1125,8 @@ func checkConversions(sqlInfoArr []sqlInfo, filePath string) { } func analyzeSchema() { - err := retrieveMigrationUUID() - if err != nil { - utils.ErrExit("failed to get migration UUID: %w", err) - } + utils.PrintAndLog("Analyzing schema for target YugabyteDB version %s\n", targetDbVersion) schemaAnalysisStartedEvent := createSchemaAnalysisStartedEvent() controlPlane.SchemaAnalysisStarted(&schemaAnalysisStartedEvent) @@ -2024,7 +1138,7 @@ func analyzeSchema() { if err != nil { utils.ErrExit("analyze schema : load migration status record: %s", err) } - analyzeSchemaInternal(msr.SourceDBConf) + analyzeSchemaInternal(msr.SourceDBConf, true) if analyzeSchemaReportFormat != "" { generateAnalyzeSchemaReport(msr, analyzeSchemaReportFormat) @@ -2033,7 +1147,7 @@ func analyzeSchema() { generateAnalyzeSchemaReport(msr, JSON) } - packAndSendAnalyzeSchemaPayload(COMPLETE) + packAndSendAnalyzeSchemaPayload(COMPLETE, "") schemaAnalysisReport := createSchemaAnalysisIterationCompletedEvent(schemaAnalysisReport) controlPlane.SchemaAnalysisIterationCompleted(&schemaAnalysisReport) @@ -2084,7 +1198,7 @@ func generateAnalyzeSchemaReport(msr *metadb.MigrationStatusRecord, reportFormat file, err := os.OpenFile(reportPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { - utils.ErrExit("Error while opening %q: %s", reportPath, err) + utils.ErrExit("Error while opening: %q: %s", reportPath, err) } defer func() { if err := file.Close(); err != nil { @@ -2094,13 +1208,14 @@ func generateAnalyzeSchemaReport(msr *metadb.MigrationStatusRecord, reportFormat _, err = file.WriteString(finalReport) if err != nil { - utils.ErrExit("failed to write report to %q: %s", reportPath, err) + utils.ErrExit("failed to write report to: %q: %s", reportPath, err) } fmt.Printf("-- find schema analysis report at: %s\n", reportPath) return nil } -var reasonsIncludingSensitiveInformation = []string{ +// analyze issue reasons to modify the reason before sending to callhome as will have sensitive information +var reasonsIncludingSensitiveInformationToCallhome = []string{ UNSUPPORTED_PG_SYNTAX, POLICY_ROLE_ISSUE, UNSUPPORTED_DATATYPE, @@ -2110,18 +1225,27 @@ var reasonsIncludingSensitiveInformation = []string{ INSUFFICIENT_COLUMNS_IN_PK_FOR_PARTITION, } -func packAndSendAnalyzeSchemaPayload(status string) { +// analyze issue reasons to send the object names for to callhome +var reasonsToSendObjectNameToCallhome = []string{ + UNSUPPORTED_EXTENSION_ISSUE, +} + +func packAndSendAnalyzeSchemaPayload(status string, errorMsg string) { if !shouldSendCallhome() { return } payload := createCallhomePayload() payload.MigrationPhase = ANALYZE_PHASE - var callhomeIssues []utils.Issue + var callhomeIssues []utils.AnalyzeSchemaIssue for _, issue := range schemaAnalysisReport.Issues { - issue.SqlStatement = "" // Obfuscate sensitive information before sending to callhome cluster - issue.ObjectName = "XXX" // Redacting object name before sending - for _, sensitiveReason := range reasonsIncludingSensitiveInformation { + issue.SqlStatement = "" // Obfuscate sensitive information before sending to callhome cluster + if !lo.ContainsBy(reasonsToSendObjectNameToCallhome, func(r string) bool { + return strings.Contains(issue.Reason, r) + }) { + issue.ObjectName = "XXX" // Redacting object name before sending in case reason is not in list + } + for _, sensitiveReason := range reasonsIncludingSensitiveInformationToCallhome { if strings.Contains(issue.Reason, sensitiveReason) { switch sensitiveReason { case UNSUPPORTED_DATATYPE, UNSUPPORTED_DATATYPE_LIVE_MIGRATION: @@ -2139,11 +1263,13 @@ func packAndSendAnalyzeSchemaPayload(status string) { } analyzePayload := callhome.AnalyzePhasePayload{ - Issues: callhome.MarshalledJsonString(callhomeIssues), + TargetDBVersion: schemaAnalysisReport.TargetDBVersion, + Issues: callhome.MarshalledJsonString(callhomeIssues), DatabaseObjects: callhome.MarshalledJsonString(lo.Map(schemaAnalysisReport.SchemaSummary.DBObjects, func(dbObject utils.DBObject, _ int) utils.DBObject { dbObject.ObjectNames = "" return dbObject })), + Error: callhome.SanitizeErrorMsg(errorMsg), } payload.PhasePayload = callhome.MarshalledJsonString(analyzePayload) payload.Status = status @@ -2160,8 +1286,16 @@ var analyzeSchemaCmd = &cobra.Command{ "For more details and examples, visit https://docs.yugabyte.com/preview/yugabyte-voyager/reference/schema-migration/analyze-schema/", Long: ``, PreRun: func(cmd *cobra.Command, args []string) { + err := retrieveMigrationUUID() + if err != nil { + utils.ErrExit("failed to get migration UUID: %w", err) + } validOutputFormats := []string{"html", "json", "txt", "xml"} validateReportOutputFormat(validOutputFormats, analyzeSchemaReportFormat) + err = validateAndSetTargetDbVersionFlag() + if err != nil { + utils.ErrExit("%v", err) + } }, Run: func(cmd *cobra.Command, args []string) { @@ -2174,6 +1308,9 @@ func init() { registerCommonGlobalFlags(analyzeSchemaCmd) analyzeSchemaCmd.PersistentFlags().StringVar(&analyzeSchemaReportFormat, "output-format", "", "format in which report can be generated: ('html', 'txt', 'json', 'xml'). If not provided, reports will be generated in both 'json' and 'html' formats by default.") + + analyzeSchemaCmd.Flags().StringVar(&targetDbVersionStrFlag, "target-db-version", "", + fmt.Sprintf("Target YugabyteDB version to analyze schema for (in format A.B.C.D). Defaults to latest stable version (%s)", ybversion.LatestStable.String())) } func validateReportOutputFormat(validOutputFormats []string, format string) { diff --git a/yb-voyager/cmd/analyzeSchema_test.go b/yb-voyager/cmd/analyzeSchema_test.go index 1b97697d31..d768b94b06 100644 --- a/yb-voyager/cmd/analyzeSchema_test.go +++ b/yb-voyager/cmd/analyzeSchema_test.go @@ -1,3 +1,5 @@ +//go:build unit + /* Copyright (c) YugabyteDB, Inc. diff --git a/yb-voyager/cmd/archiveChangesCommand.go b/yb-voyager/cmd/archiveChangesCommand.go index 945c69ee9a..c47f5f9389 100644 --- a/yb-voyager/cmd/archiveChangesCommand.go +++ b/yb-voyager/cmd/archiveChangesCommand.go @@ -55,6 +55,15 @@ func archiveChangesCommandFn(cmd *cobra.Command, args []string) { utils.ErrExit("one of the --move-to and --delete-changes-without-archiving must be set") } + // Check to ensure that export data with live migration is running + msr, err := metaDB.GetMigrationStatusRecord() + if err != nil { + utils.ErrExit("Error getting migration status record: %v", err) + } + if !msr.ExportDataSourceDebeziumStarted { + utils.ErrExit("The streaming phase of export data has not started yet. This command can only be run after the streaming phase begins.") + } + metaDB.UpdateMigrationStatusRecord(func(record *metadb.MigrationStatusRecord) { record.ArchivingEnabled = true }) diff --git a/yb-voyager/cmd/archiveCommand.go b/yb-voyager/cmd/archiveCommand.go index 8fac243a68..5dfec23227 100644 --- a/yb-voyager/cmd/archiveCommand.go +++ b/yb-voyager/cmd/archiveCommand.go @@ -59,12 +59,12 @@ func validateCommonArchiveFlags() { func validateMoveToFlag() { if moveDestination != "" { if !utils.FileOrFolderExists(moveDestination) { - utils.ErrExit("move destination %q doesn't exists.\n", moveDestination) + utils.ErrExit("move destination doesn't exists: %q: \n", moveDestination) } else { var err error moveDestination, err = filepath.Abs(moveDestination) if err != nil { - utils.ErrExit("Failed to get absolute path for move destination %q: %v\n", moveDestination, err) + utils.ErrExit("Failed to get absolute path for move destination: %q: %v\n", moveDestination, err) } moveDestination = filepath.Clean(moveDestination) fmt.Printf("Note: Using %q as move destination\n", moveDestination) diff --git a/yb-voyager/cmd/assessMigrationBulkCommand.go b/yb-voyager/cmd/assessMigrationBulkCommand.go index fb44192f8f..a4973a7e93 100644 --- a/yb-voyager/cmd/assessMigrationBulkCommand.go +++ b/yb-voyager/cmd/assessMigrationBulkCommand.go @@ -48,23 +48,26 @@ var assessMigrationBulkCmd = &cobra.Command{ Long: "Bulk Assessment of multiple schemas across one or more Oracle database instances", PreRun: func(cmd *cobra.Command, args []string) { - err := validateFleetConfigFile(fleetConfigPath) + err := retrieveMigrationUUID() if err != nil { - utils.ErrExit("%s", err.Error()) + utils.ErrExit("failed to get migration UUID: %w", err) + } + err = validateFleetConfigFile(fleetConfigPath) + if err != nil { + utils.ErrExit("validating fleet config file: %s", err.Error()) } }, Run: func(cmd *cobra.Command, args []string) { err := assessMigrationBulk() if err != nil { - packAndSendAssessMigrationBulkPayload(ERROR) utils.ErrExit("failed assess migration bulk: %s", err) } - packAndSendAssessMigrationBulkPayload(COMPLETE) + packAndSendAssessMigrationBulkPayload(COMPLETE, "") }, } -func packAndSendAssessMigrationBulkPayload(status string) { +func packAndSendAssessMigrationBulkPayload(status string, errorMsg string) { if !shouldSendCallhome() { return } @@ -77,6 +80,7 @@ func packAndSendAssessMigrationBulkPayload(status string) { } assessMigBulkPayload := callhome.AssessMigrationBulkPhasePayload{ FleetConfigCount: len(bulkAssessmentDBConfigs), + Error: callhome.SanitizeErrorMsg(errorMsg), } payload.PhasePayload = callhome.MarshalledJsonString(assessMigBulkPayload) @@ -156,11 +160,6 @@ func assessMigrationBulk() error { return fmt.Errorf("failed to parse fleet config file: %w", err) } - err = retrieveMigrationUUID() - if err != nil { - return fmt.Errorf("failed to get migration UUID: %w", err) - } - for _, dbConfig := range bulkAssessmentDBConfigs { utils.PrintAndLog("\nAssessing '%s' schema", dbConfig.GetSchemaIdentifier()) @@ -458,7 +457,7 @@ func validateBulkAssessmentDirFlag() { utils.ErrExit(`ERROR: required flag "bulk-assessment-dir" not set`) } if !utils.FileOrFolderExists(bulkAssessmentDir) { - utils.ErrExit("bulk-assessment-dir %q doesn't exists.\n", bulkAssessmentDir) + utils.ErrExit("bulk-assessment-dir doesn't exists: %q\n", bulkAssessmentDir) } else { if bulkAssessmentDir == "." { fmt.Println("Note: Using current directory as bulk-assessment-dir") @@ -466,7 +465,7 @@ func validateBulkAssessmentDirFlag() { var err error bulkAssessmentDir, err = filepath.Abs(bulkAssessmentDir) if err != nil { - utils.ErrExit("Failed to get absolute path for bulk-assessment-dir %q: %v\n", exportDir, err) + utils.ErrExit("Failed to get absolute path for bulk-assessment-dir: %q: %v\n", exportDir, err) } bulkAssessmentDir = filepath.Clean(bulkAssessmentDir) } diff --git a/yb-voyager/cmd/assessMigrationBulkCommand_test.go b/yb-voyager/cmd/assessMigrationBulkCommand_test.go index cfb8b3436f..5b887c901a 100644 --- a/yb-voyager/cmd/assessMigrationBulkCommand_test.go +++ b/yb-voyager/cmd/assessMigrationBulkCommand_test.go @@ -1,3 +1,5 @@ +//go:build unit + /* Copyright (c) YugabyteDB, Inc. diff --git a/yb-voyager/cmd/assessMigrationCommand.go b/yb-voyager/cmd/assessMigrationCommand.go index d3e453b23a..e2636fd57e 100644 --- a/yb-voyager/cmd/assessMigrationCommand.go +++ b/yb-voyager/cmd/assessMigrationCommand.go @@ -21,6 +21,7 @@ import ( _ "embed" "encoding/csv" "encoding/json" + "errors" "fmt" "os" "os/exec" @@ -37,12 +38,15 @@ import ( "golang.org/x/exp/slices" "github.com/yugabyte/yb-voyager/yb-voyager/src/callhome" + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" "github.com/yugabyte/yb-voyager/yb-voyager/src/cp" "github.com/yugabyte/yb-voyager/yb-voyager/src/metadb" "github.com/yugabyte/yb-voyager/yb-voyager/src/migassessment" - "github.com/yugabyte/yb-voyager/yb-voyager/src/queryissue" + "github.com/yugabyte/yb-voyager/yb-voyager/src/query/queryissue" + "github.com/yugabyte/yb-voyager/yb-voyager/src/query/queryparser" "github.com/yugabyte/yb-voyager/yb-voyager/src/srcdb" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" + "github.com/yugabyte/yb-voyager/yb-voyager/src/ybversion" ) var ( @@ -53,7 +57,9 @@ var ( intervalForCapturingIOPS int64 assessMigrationSupportedDBTypes = []string{POSTGRESQL, ORACLE} referenceOrTablePartitionPresent = false + pgssEnabledForAssessment = false ) + var sourceConnectionFlags = []string{ "source-db-host", "source-db-password", @@ -74,12 +80,21 @@ var assessMigrationCmd = &cobra.Command{ Long: fmt.Sprintf("Assess the migration from source (%s) database to YugabyteDB.", strings.Join(assessMigrationSupportedDBTypes, ", ")), PreRun: func(cmd *cobra.Command, args []string) { + CreateMigrationProjectIfNotExists(source.DBType, exportDir) + err := retrieveMigrationUUID() + if err != nil { + utils.ErrExit("failed to get migration UUID: %w", err) + } validateSourceDBTypeForAssessMigration() setExportFlagsDefaults() validateSourceSchema() validatePortRange() validateSSLMode() validateOracleParams() + err = validateAndSetTargetDbVersionFlag() + if err != nil { + utils.ErrExit("%v", err) + } if cmd.Flags().Changed("assessment-metadata-dir") { validateAssessmentMetadataDirFlag() for _, f := range sourceConnectionFlags { @@ -105,6 +120,9 @@ var assessMigrationCmd = &cobra.Command{ }, } +// Assessment feature names to send the object names for to callhome +var featuresToSendObjectsToCallhome = []string{} + func packAndSendAssessMigrationPayload(status string, errMsg string) { if !shouldSendCallhome() { return @@ -149,30 +167,62 @@ func packAndSendAssessMigrationPayload(status string, errMsg string) { return len(constructs) }) - assessPayload := callhome.AssessMigrationPhasePayload{ - MigrationComplexity: assessmentReport.MigrationComplexity, - UnsupportedFeatures: callhome.MarshalledJsonString(lo.Map(assessmentReport.UnsupportedFeatures, func(feature UnsupportedFeature, _ int) callhome.UnsupportedFeature { - return callhome.UnsupportedFeature{ - FeatureName: feature.FeatureName, - ObjectCount: len(feature.Objects), + var unsupportedFeatures []callhome.UnsupportedFeature + for _, feature := range assessmentReport.UnsupportedFeatures { + if feature.FeatureName == EXTENSION_FEATURE { + // For extensions, we need to send the extension name in the feature name + // for better categorization in callhome + for _, object := range feature.Objects { + unsupportedFeatures = append(unsupportedFeatures, callhome.UnsupportedFeature{ + FeatureName: fmt.Sprintf("%s - %s", feature.FeatureName, object.ObjectName), + ObjectCount: 1, + TotalOccurrences: 1, + }) } - })), + } else { + var objects []string + if slices.Contains(featuresToSendObjectsToCallhome, feature.FeatureName) { + objects = lo.Map(feature.Objects, func(o ObjectInfo, _ int) string { + return o.ObjectName + }) + } + unsupportedFeatures = append(unsupportedFeatures, callhome.UnsupportedFeature{ + FeatureName: feature.FeatureName, + ObjectCount: len(feature.Objects), + Objects: objects, + TotalOccurrences: len(feature.Objects), + }) + } + } + + assessPayload := callhome.AssessMigrationPhasePayload{ + TargetDBVersion: assessmentReport.TargetDBVersion, + MigrationComplexity: assessmentReport.MigrationComplexity, + UnsupportedFeatures: callhome.MarshalledJsonString(unsupportedFeatures), UnsupportedQueryConstructs: callhome.MarshalledJsonString(countByConstructType), UnsupportedDatatypes: callhome.MarshalledJsonString(unsupportedDatatypesList), MigrationCaveats: callhome.MarshalledJsonString(lo.Map(assessmentReport.MigrationCaveats, func(feature UnsupportedFeature, _ int) callhome.UnsupportedFeature { return callhome.UnsupportedFeature{ - FeatureName: feature.FeatureName, - ObjectCount: len(feature.Objects), + FeatureName: feature.FeatureName, + ObjectCount: len(feature.Objects), + TotalOccurrences: len(feature.Objects), + } + })), + UnsupportedPlPgSqlObjects: callhome.MarshalledJsonString(lo.Map(assessmentReport.UnsupportedPlPgSqlObjects, func(plpgsql UnsupportedFeature, _ int) callhome.UnsupportedFeature { + groupedObjects := groupByObjectName(plpgsql.Objects) + return callhome.UnsupportedFeature{ + FeatureName: plpgsql.FeatureName, + ObjectCount: len(lo.Keys(groupedObjects)), + TotalOccurrences: len(plpgsql.Objects), } })), TableSizingStats: callhome.MarshalledJsonString(tableSizingStats), IndexSizingStats: callhome.MarshalledJsonString(indexSizingStats), SchemaSummary: callhome.MarshalledJsonString(schemaSummaryCopy), IopsInterval: intervalForCapturingIOPS, + Error: callhome.SanitizeErrorMsg(errMsg), } - if status == ERROR { - assessPayload.Error = "ERROR" // removing error for now, TODO to see if we want to keep it - } + if assessmentMetadataDirFlag == "" { sourceDBDetails := callhome.SourceDBDetails{ DBType: source.DBType, @@ -261,6 +311,9 @@ func init() { "Interval (in seconds) at which voyager will gather IOPS metadata from source database for the given schema(s). (only valid for PostgreSQL)") BoolVar(assessMigrationCmd.Flags(), &source.RunGuardrailsChecks, "run-guardrails-checks", true, "run guardrails checks before assess migration. (only valid for PostgreSQL)") + + assessMigrationCmd.Flags().StringVar(&targetDbVersionStrFlag, "target-db-version", "", + fmt.Sprintf("Target YugabyteDB version to assess migration for (in format A.B.C.D). Defaults to latest stable version (%s)", ybversion.LatestStable.String())) } func assessMigration() (err error) { @@ -270,12 +323,7 @@ func assessMigration() (err error) { schemaDir = filepath.Join(assessmentMetadataDir, "schema") checkStartCleanForAssessMigration(assessmentMetadataDirFlag != "") - CreateMigrationProjectIfNotExists(source.DBType, exportDir) - - err = retrieveMigrationUUID() - if err != nil { - return fmt.Errorf("failed to get migration UUID: %w", err) - } + utils.PrintAndLog("Assessing for migration to target YugabyteDB version %s\n", targetDbVersion) assessmentDir := filepath.Join(exportDir, "assessment") migassessment.AssessmentDir = assessmentDir @@ -297,6 +345,14 @@ func assessMigration() (err error) { // We will require source db connection for the below checks // Check if required binaries are installed. if source.RunGuardrailsChecks { + // Check source database version. + log.Info("checking source DB version") + err = source.DB().CheckSourceDBVersion(exportType) + if err != nil { + return fmt.Errorf("source DB version check failed: %w", err) + } + + // Check if required binaries are installed. binaryCheckIssues, err := checkDependenciesForExport() if err != nil { return fmt.Errorf("failed to check dependencies for assess migration: %w", err) @@ -312,7 +368,9 @@ func assessMigration() (err error) { // Check if source db has permissions to assess migration if source.RunGuardrailsChecks { - missingPerms, err := source.DB().GetMissingExportSchemaPermissions() + checkIfSchemasHaveUsagePermissions() + var missingPerms []string + missingPerms, pgssEnabledForAssessment, err = source.DB().GetMissingAssessMigrationPermissions() if err != nil { return fmt.Errorf("failed to get missing assess migration permissions: %w", err) } @@ -363,6 +421,8 @@ func assessMigration() (err error) { return fmt.Errorf("failed to generate assessment report: %w", err) } + log.Infof("number of assessment issues detected: %d\n", len(assessmentReport.Issues)) + utils.PrintAndLog("Migration assessment completed successfully.") completedEvent := createMigrationAssessmentCompletedEvent() controlPlane.MigrationAssessmentCompleted(completedEvent) @@ -437,6 +497,7 @@ func createMigrationAssessmentCompletedEvent() *cp.MigrationAssessmentCompletedE payload := AssessMigrationPayload{ PayloadVersion: ASSESS_MIGRATION_PAYLOAD_VERSION, VoyagerVersion: assessmentReport.VoyagerVersion, + TargetDBVersion: assessmentReport.TargetDBVersion, MigrationComplexity: assessmentReport.MigrationComplexity, SchemaSummary: assessmentReport.SchemaSummary, AssessmentIssues: assessmentIssues, @@ -465,8 +526,8 @@ func createMigrationAssessmentCompletedEvent() *cp.MigrationAssessmentCompletedE } // flatten UnsupportedDataTypes, UnsupportedFeatures, MigrationCaveats -func flattenAssessmentReportToAssessmentIssues(ar AssessmentReport) []AssessmentIssuePayload { - var issues []AssessmentIssuePayload +func flattenAssessmentReportToAssessmentIssues(ar AssessmentReport) []AssessmentIssueYugabyteD { + var issues []AssessmentIssueYugabyteD var dataTypesDocsLink string switch source.DBType { @@ -476,9 +537,9 @@ func flattenAssessmentReportToAssessmentIssues(ar AssessmentReport) []Assessment dataTypesDocsLink = UNSUPPORTED_DATATYPES_DOC_LINK_ORACLE } for _, unsupportedDataType := range ar.UnsupportedDataTypes { - issues = append(issues, AssessmentIssuePayload{ - Type: DATATYPE, - TypeDescription: DATATYPE_ISSUE_TYPE_DESCRIPTION, + issues = append(issues, AssessmentIssueYugabyteD{ + Type: constants.DATATYPE, + TypeDescription: GetCategoryDescription(constants.DATATYPE), Subtype: unsupportedDataType.DataType, ObjectName: fmt.Sprintf("%s.%s.%s", unsupportedDataType.SchemaName, unsupportedDataType.TableName, unsupportedDataType.ColumnName), SqlStatement: "", @@ -488,42 +549,59 @@ func flattenAssessmentReportToAssessmentIssues(ar AssessmentReport) []Assessment for _, unsupportedFeature := range ar.UnsupportedFeatures { for _, object := range unsupportedFeature.Objects { - issues = append(issues, AssessmentIssuePayload{ - Type: FEATURE, - TypeDescription: FEATURE_ISSUE_TYPE_DESCRIPTION, - Subtype: unsupportedFeature.FeatureName, - SubtypeDescription: unsupportedFeature.FeatureDescription, // TODO: test payload once we add desc for unsupported features - ObjectName: object.ObjectName, - SqlStatement: object.SqlStatement, - DocsLink: unsupportedFeature.DocsLink, + issues = append(issues, AssessmentIssueYugabyteD{ + Type: constants.FEATURE, + TypeDescription: GetCategoryDescription(constants.FEATURE), + Subtype: unsupportedFeature.FeatureName, + SubtypeDescription: unsupportedFeature.FeatureDescription, // TODO: test payload once we add desc for unsupported features + ObjectName: object.ObjectName, + SqlStatement: object.SqlStatement, + DocsLink: unsupportedFeature.DocsLink, + MinimumVersionsFixedIn: unsupportedFeature.MinimumVersionsFixedIn, }) } } for _, migrationCaveat := range ar.MigrationCaveats { for _, object := range migrationCaveat.Objects { - issues = append(issues, AssessmentIssuePayload{ - Type: MIGRATION_CAVEATS, - TypeDescription: MIGRATION_CAVEATS_TYPE_DESCRIPTION, - Subtype: migrationCaveat.FeatureName, - SubtypeDescription: migrationCaveat.FeatureDescription, - ObjectName: object.ObjectName, - SqlStatement: object.SqlStatement, - DocsLink: migrationCaveat.DocsLink, + issues = append(issues, AssessmentIssueYugabyteD{ + Type: constants.MIGRATION_CAVEATS, + TypeDescription: GetCategoryDescription(constants.MIGRATION_CAVEATS), + Subtype: migrationCaveat.FeatureName, + SubtypeDescription: migrationCaveat.FeatureDescription, + ObjectName: object.ObjectName, + SqlStatement: object.SqlStatement, + DocsLink: migrationCaveat.DocsLink, + MinimumVersionsFixedIn: migrationCaveat.MinimumVersionsFixedIn, }) } } for _, uqc := range ar.UnsupportedQueryConstructs { - issues = append(issues, AssessmentIssuePayload{ - Type: QUERY_CONSTRUCT, - TypeDescription: UNSUPPORTED_QUERY_CONSTRUTS_DESCRIPTION, - Subtype: uqc.ConstructTypeName, - SqlStatement: uqc.Query, - DocsLink: uqc.DocsLink, + issues = append(issues, AssessmentIssueYugabyteD{ + Type: constants.QUERY_CONSTRUCT, + TypeDescription: GetCategoryDescription(constants.QUERY_CONSTRUCT), + Subtype: uqc.ConstructTypeName, + SqlStatement: uqc.Query, + DocsLink: uqc.DocsLink, + MinimumVersionsFixedIn: uqc.MinimumVersionsFixedIn, }) } + for _, plpgsqlObjects := range ar.UnsupportedPlPgSqlObjects { + for _, object := range plpgsqlObjects.Objects { + issues = append(issues, AssessmentIssueYugabyteD{ + Type: constants.PLPGSQL_OBJECT, + TypeDescription: GetCategoryDescription(constants.PLPGSQL_OBJECT), + Subtype: plpgsqlObjects.FeatureName, + SubtypeDescription: plpgsqlObjects.FeatureDescription, + ObjectName: object.ObjectName, + SqlStatement: object.SqlStatement, + DocsLink: plpgsqlObjects.DocsLink, + MinimumVersionsFixedIn: plpgsqlObjects.MinimumVersionsFixedIn, + }) + } + } return issues } @@ -569,7 +647,7 @@ func checkStartCleanForAssessMigration(metadataDirPassedByUser bool) { utils.ErrExit("failed to start clean: %v", err) } } else { - utils.ErrExit("assessment metadata or reports files already exist in the assessment directory at '%s'. Use the --start-clean flag to clear the directory before proceeding.", assessmentDir) + utils.ErrExit("assessment metadata or reports files already exist in the assessment directory: '%s'. Use the --start-clean flag to clear the directory before proceeding.", assessmentDir) } } } @@ -634,8 +712,9 @@ func gatherAssessmentMetadataFromPG() (err error) { if err != nil { return err } + return runGatherAssessmentMetadataScript(scriptPath, []string{fmt.Sprintf("PGPASSWORD=%s", source.Password)}, - source.DB().GetConnectionUriWithoutPassword(), source.Schema, assessmentMetadataDir, fmt.Sprintf("%d", intervalForCapturingIOPS)) + source.DB().GetConnectionUriWithoutPassword(), source.Schema, assessmentMetadataDir, fmt.Sprintf("%t", pgssEnabledForAssessment), fmt.Sprintf("%d", intervalForCapturingIOPS)) } func findGatherMetadataScriptPath(dbType string) (string, error) { @@ -782,6 +861,9 @@ var bytesTemplate []byte func generateAssessmentReport() (err error) { utils.PrintAndLog("Generating assessment report...") + assessmentReport.VoyagerVersion = utils.YB_VOYAGER_VERSION + assessmentReport.TargetDBVersion = targetDbVersion + err = getAssessmentReportContentFromAnalyzeSchema() if err != nil { return fmt.Errorf("failed to generate assessment report content from analyze schema: %w", err) @@ -801,13 +883,19 @@ func generateAssessmentReport() (err error) { assessmentReport.UnsupportedQueryConstructs = unsupportedQueries } - assessmentReport.VoyagerVersion = utils.YB_VOYAGER_VERSION unsupportedDataTypes, unsupportedDataTypesForLiveMigration, unsupportedDataTypesForLiveMigrationWithFForFB, err := fetchColumnsWithUnsupportedDataTypes() if err != nil { return fmt.Errorf("failed to fetch columns with unsupported data types: %w", err) } assessmentReport.UnsupportedDataTypes = unsupportedDataTypes - assessmentReport.UnsupportedDataTypesDesc = DATATYPE_ISSUE_TYPE_DESCRIPTION + assessmentReport.UnsupportedDataTypesDesc = DATATYPE_CATEGORY_DESCRIPTION + + assessmentReport.AppendIssues(getAssessmentIssuesForUnsupportedDatatypes(unsupportedDataTypes)...) + + addMigrationCaveatsToAssessmentReport(unsupportedDataTypesForLiveMigration, unsupportedDataTypesForLiveMigrationWithFForFB) + + // calculating migration complexity after collecting all assessment issues + assessmentReport.MigrationComplexity = calculateMigrationComplexity(source.DBType, schemaDir, assessmentReport) assessmentReport.Sizing = migassessment.SizingReport assessmentReport.TableIndexStats, err = assessmentDB.FetchAllStats() @@ -816,7 +904,6 @@ func generateAssessmentReport() (err error) { } addNotesToAssessmentReport() - addMigrationCaveatsToAssessmentReport(unsupportedDataTypesForLiveMigration, unsupportedDataTypesForLiveMigrationWithFForFB) postProcessingOfAssessmentReport() assessmentReportDir := filepath.Join(exportDir, "assessment", "reports") @@ -833,15 +920,16 @@ func generateAssessmentReport() (err error) { } func getAssessmentReportContentFromAnalyzeSchema() error { - schemaAnalysisReport := analyzeSchemaInternal(&source) - assessmentReport.MigrationComplexity = schemaAnalysisReport.MigrationComplexity + /* + Here we are generating analyze schema report which converts issue instance to analyze schema issue + Then in assessment codepath we extract the required information from analyze schema issue which could have been done directly from issue instance(TODO) + + But current Limitation is analyze schema currently uses regexp etc to detect some issues(not using parser). + */ + schemaAnalysisReport := analyzeSchemaInternal(&source, true) assessmentReport.SchemaSummary = schemaAnalysisReport.SchemaSummary - assessmentReport.SchemaSummary.Description = SCHEMA_SUMMARY_DESCRIPTION - if source.DBType == ORACLE { - assessmentReport.SchemaSummary.Description = SCHEMA_SUMMARY_DESCRIPTION_ORACLE - } + assessmentReport.SchemaSummary.Description = lo.Ternary(source.DBType == ORACLE, SCHEMA_SUMMARY_DESCRIPTION_ORACLE, SCHEMA_SUMMARY_DESCRIPTION) - // fetching unsupportedFeaturing with the help of Issues report in SchemaReport var unsupportedFeatures []UnsupportedFeature var err error switch source.DBType { @@ -853,61 +941,144 @@ func getAssessmentReportContentFromAnalyzeSchema() error { panic(fmt.Sprintf("unsupported source db type %q", source.DBType)) } if err != nil { - return fmt.Errorf("failed to fetch %s unsupported features: %w", source.DBType, err) + return fmt.Errorf("failed to fetch '%s' unsupported features: %w", source.DBType, err) } assessmentReport.UnsupportedFeatures = append(assessmentReport.UnsupportedFeatures, unsupportedFeatures...) - assessmentReport.UnsupportedFeaturesDesc = FEATURE_ISSUE_TYPE_DESCRIPTION + assessmentReport.UnsupportedFeaturesDesc = FEATURE_CATEGORY_DESCRIPTION + + // Ques: Do we still need this and REPORT_UNSUPPORTED_QUERY_CONSTRUCTS env var + if utils.GetEnvAsBool("REPORT_UNSUPPORTED_PLPGSQL_OBJECTS", true) { + assessmentReport.UnsupportedPlPgSqlObjects = fetchUnsupportedPlPgSQLObjects(schemaAnalysisReport) + } return nil } -func getUnsupportedFeaturesFromSchemaAnalysisReport(featureName string, issueReason string, schemaAnalysisReport utils.SchemaReport, displayDDLInHTML bool, description string) UnsupportedFeature { +// when we group multiple Issue instances into a single bucket of UnsupportedFeature. +// Ideally, all the issues in the same bucket should have the same minimum version fixed in. +// We want to validate that and fail if not. +func areMinVersionsFixedInEqual(m1 map[string]*ybversion.YBVersion, m2 map[string]*ybversion.YBVersion) bool { + if m1 == nil && m2 == nil { + return true + } + if m1 == nil || m2 == nil { + return false + } + + if len(m1) != len(m2) { + return false + } + for k, v := range m1 { + if m2[k] == nil || !m2[k].Equal(v) { + return false + } + } + return true +} + +func getUnsupportedFeaturesFromSchemaAnalysisReport(featureName string, issueReason string, issueType string, schemaAnalysisReport utils.SchemaReport, displayDDLInHTML bool, description string) UnsupportedFeature { log.Info("filtering issues for feature: ", featureName) objects := make([]ObjectInfo, 0) link := "" // for oracle we shouldn't display any line for links - for _, issue := range schemaAnalysisReport.Issues { - if strings.Contains(issue.Reason, issueReason) { + var minVersionsFixedIn map[string]*ybversion.YBVersion + var minVersionsFixedInSet bool + + for _, analyzeIssue := range schemaAnalysisReport.Issues { + if !slices.Contains([]string{UNSUPPORTED_FEATURES_CATEGORY, MIGRATION_CAVEATS_CATEGORY}, analyzeIssue.IssueType) { + continue + } + issueMatched := lo.Ternary[bool](issueType != "", issueType == analyzeIssue.Type, strings.Contains(analyzeIssue.Reason, issueReason)) + + if issueMatched { + if !minVersionsFixedInSet { + minVersionsFixedIn = analyzeIssue.MinimumVersionsFixedIn + minVersionsFixedInSet = true + } + if !areMinVersionsFixedInEqual(minVersionsFixedIn, analyzeIssue.MinimumVersionsFixedIn) { + utils.ErrExit("Issues belonging to UnsupportedFeature %s have different minimum versions fixed in: %v, %v", featureName, minVersionsFixedIn, analyzeIssue.MinimumVersionsFixedIn) + } + objectInfo := ObjectInfo{ - ObjectName: issue.ObjectName, - SqlStatement: issue.SqlStatement, + ObjectName: analyzeIssue.ObjectName, + SqlStatement: analyzeIssue.SqlStatement, } - link = issue.DocsLink + link = analyzeIssue.DocsLink objects = append(objects, objectInfo) + + assessmentReport.AppendIssues(convertAnalyzeSchemaIssueToAssessmentIssue(analyzeIssue, description, minVersionsFixedIn)) } } - return UnsupportedFeature{featureName, objects, displayDDLInHTML, link, description} + + return UnsupportedFeature{featureName, objects, displayDDLInHTML, link, description, minVersionsFixedIn} +} + +// Q: do we no need of displayDDLInHTML in this approach? DDL can always be there for issues in the table if present. +func convertAnalyzeSchemaIssueToAssessmentIssue(analyzeSchemaIssue utils.AnalyzeSchemaIssue, issueDescription string, minVersionsFixedIn map[string]*ybversion.YBVersion) AssessmentIssue { + return AssessmentIssue{ + Category: analyzeSchemaIssue.IssueType, + CategoryDescription: GetCategoryDescription(analyzeSchemaIssue.IssueType), + Type: analyzeSchemaIssue.Type, + Name: analyzeSchemaIssue.Reason, // in convertIssueInstanceToAnalyzeIssue() we assign IssueType to Reason field + Description: issueDescription, // TODO: verify + Impact: analyzeSchemaIssue.Impact, + ObjectType: analyzeSchemaIssue.ObjectType, + ObjectName: analyzeSchemaIssue.ObjectName, + SqlStatement: analyzeSchemaIssue.SqlStatement, + DocsLink: analyzeSchemaIssue.DocsLink, + MinimumVersionFixedIn: minVersionsFixedIn, + } } func fetchUnsupportedPGFeaturesFromSchemaReport(schemaAnalysisReport utils.SchemaReport) ([]UnsupportedFeature, error) { log.Infof("fetching unsupported features for PG...") unsupportedFeatures := make([]UnsupportedFeature, 0) - for _, indexMethod := range unsupportedIndexMethods { + + for _, indexMethod := range queryissue.UnsupportedIndexMethods { displayIndexMethod := strings.ToUpper(indexMethod) - feature := fmt.Sprintf("%s indexes", displayIndexMethod) + featureName := fmt.Sprintf("%s indexes", displayIndexMethod) reason := fmt.Sprintf(INDEX_METHOD_ISSUE_REASON, displayIndexMethod) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(feature, reason, schemaAnalysisReport, false, "")) - } - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(CONSTRAINT_TRIGGERS_FEATURE, CONSTRAINT_TRIGGER_ISSUE_REASON, schemaAnalysisReport, false, "")) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(INHERITED_TABLES_FEATURE, INHERITANCE_ISSUE_REASON, schemaAnalysisReport, false, "")) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(GENERATED_COLUMNS_FEATURE, STORED_GENERATED_COLUMN_ISSUE_REASON, schemaAnalysisReport, false, "")) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(CONVERSIONS_OBJECTS_FEATURE, CONVERSION_ISSUE_REASON, schemaAnalysisReport, false, "")) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(MULTI_COLUMN_GIN_INDEX_FEATURE, GIN_INDEX_MULTI_COLUMN_ISSUE_REASON, schemaAnalysisReport, false, "")) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(ALTER_SETTING_ATTRIBUTE_FEATURE, ALTER_TABLE_SET_ATTRIBUTE_ISSUE, schemaAnalysisReport, true, "")) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(DISABLING_TABLE_RULE_FEATURE, ALTER_TABLE_DISABLE_RULE_ISSUE, schemaAnalysisReport, true, "")) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(CLUSTER_ON_FEATURE, ALTER_TABLE_CLUSTER_ON_ISSUE, schemaAnalysisReport, true, "")) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(STORAGE_PARAMETERS_FEATURE, STORAGE_PARAMETERS_DDL_STMT_ISSUE, schemaAnalysisReport, true, "")) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(EXTENSION_FEATURE, UNSUPPORTED_EXTENSION_ISSUE, schemaAnalysisReport, false, "")) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(EXCLUSION_CONSTRAINT_FEATURE, EXCLUSION_CONSTRAINT_ISSUE, schemaAnalysisReport, false, "")) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(DEFERRABLE_CONSTRAINT_FEATURE, DEFERRABLE_CONSTRAINT_ISSUE, schemaAnalysisReport, false, "")) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(VIEW_CHECK_FEATURE, VIEW_CHECK_OPTION_ISSUE, schemaAnalysisReport, false, "")) - unsupportedFeatures = append(unsupportedFeatures, getIndexesOnComplexTypeUnsupportedFeature(schemaAnalysisReport, UnsupportedIndexDatatypes)) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(UNLOGGED_TABLE_FEATURE, ISSUE_UNLOGGED_TABLE, schemaAnalysisReport, false, "")) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(REFERENCING_TRIGGER_FEATURE, REFERENCING_CLAUSE_FOR_TRIGGERS, schemaAnalysisReport, false, "")) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(BEFORE_FOR_EACH_ROW_TRIGGERS_ON_PARTITIONED_TABLE_FEATURE, BEFORE_FOR_EACH_ROW_TRIGGERS_ON_PARTITIONED_TABLE, schemaAnalysisReport, false, "")) - - return unsupportedFeatures, nil + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(featureName, reason, "", schemaAnalysisReport, false, "")) + } + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(CONSTRAINT_TRIGGERS_FEATURE, "", queryissue.CONSTRAINT_TRIGGER, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(INHERITED_TABLES_FEATURE, "", queryissue.INHERITANCE, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(GENERATED_COLUMNS_FEATURE, "", queryissue.STORED_GENERATED_COLUMNS, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(CONVERSIONS_OBJECTS_FEATURE, CONVERSION_ISSUE_REASON, "", schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(MULTI_COLUMN_GIN_INDEX_FEATURE, "", queryissue.MULTI_COLUMN_GIN_INDEX, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(ALTER_SETTING_ATTRIBUTE_FEATURE, "", queryissue.ALTER_TABLE_SET_COLUMN_ATTRIBUTE, schemaAnalysisReport, true, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(DISABLING_TABLE_RULE_FEATURE, "", queryissue.ALTER_TABLE_DISABLE_RULE, schemaAnalysisReport, true, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(CLUSTER_ON_FEATURE, "", queryissue.ALTER_TABLE_CLUSTER_ON, schemaAnalysisReport, true, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(STORAGE_PARAMETERS_FEATURE, "", queryissue.STORAGE_PARAMETER, schemaAnalysisReport, true, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(EXTENSION_FEATURE, UNSUPPORTED_EXTENSION_ISSUE, "", schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(EXCLUSION_CONSTRAINT_FEATURE, "", queryissue.EXCLUSION_CONSTRAINTS, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(DEFERRABLE_CONSTRAINT_FEATURE, "", queryissue.DEFERRABLE_CONSTRAINTS, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(VIEW_CHECK_FEATURE, VIEW_CHECK_OPTION_ISSUE, "", schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getIndexesOnComplexTypeUnsupportedFeature(schemaAnalysisReport, queryissue.UnsupportedIndexDatatypes)) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(PK_UK_CONSTRAINT_ON_COMPLEX_DATATYPES_FEATURE, "", queryissue.PK_UK_ON_COMPLEX_DATATYPE, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(UNLOGGED_TABLE_FEATURE, "", queryissue.UNLOGGED_TABLE, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(REFERENCING_TRIGGER_FEATURE, "", queryissue.REFERENCING_CLAUSE_IN_TRIGGER, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(BEFORE_FOR_EACH_ROW_TRIGGERS_ON_PARTITIONED_TABLE_FEATURE, "", queryissue.BEFORE_ROW_TRIGGER_ON_PARTITIONED_TABLE, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(queryissue.ADVISORY_LOCKS_NAME, "", queryissue.ADVISORY_LOCKS, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(queryissue.XML_FUNCTIONS_NAME, "", queryissue.XML_FUNCTIONS, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(queryissue.SYSTEM_COLUMNS_NAME, "", queryissue.SYSTEM_COLUMNS, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(queryissue.LARGE_OBJECT_FUNCTIONS_NAME, "", queryissue.LARGE_OBJECT_FUNCTIONS, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(REGEX_FUNCTIONS_FEATURE, "", queryissue.REGEX_FUNCTIONS, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(FETCH_WITH_TIES_FEATURE, "", queryissue.FETCH_WITH_TIES, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(queryissue.JSON_QUERY_FUNCTIONS_NAME, "", queryissue.JSON_QUERY_FUNCTION, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(queryissue.JSON_CONSTRUCTOR_FUNCTION_NAME, "", queryissue.JSON_CONSTRUCTOR_FUNCTION, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(queryissue.AGGREGATION_FUNCTIONS_NAME, "", queryissue.AGGREGATE_FUNCTION, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(queryissue.SECURITY_INVOKER_VIEWS_NAME, "", queryissue.SECURITY_INVOKER_VIEWS, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(queryissue.DETERMINISTIC_OPTION_WITH_COLLATION_NAME, "", queryissue.DETERMINISTIC_OPTION_WITH_COLLATION, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(queryissue.UNIQUE_NULLS_NOT_DISTINCT_NAME, "", queryissue.UNIQUE_NULLS_NOT_DISTINCT, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(queryissue.JSONB_SUBSCRIPTING_NAME, "", queryissue.JSONB_SUBSCRIPTING, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(queryissue.FOREIGN_KEY_REFERENCES_PARTITIONED_TABLE_NAME, "", queryissue.FOREIGN_KEY_REFERENCES_PARTITIONED_TABLE, schemaAnalysisReport, false, "")) + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(queryissue.JSON_TYPE_PREDICATE_NAME, "", queryissue.JSON_TYPE_PREDICATE, schemaAnalysisReport, false, "")) + + return lo.Filter(unsupportedFeatures, func(f UnsupportedFeature, _ int) bool { + return len(f.Objects) > 0 + }), nil } -func getIndexesOnComplexTypeUnsupportedFeature(schemaAnalysisiReport utils.SchemaReport, unsupportedIndexDatatypes []string) UnsupportedFeature { +func getIndexesOnComplexTypeUnsupportedFeature(schemaAnalysisReport utils.SchemaReport, unsupportedIndexDatatypes []string) UnsupportedFeature { + // TODO: include MinimumVersionsFixedIn indexesOnComplexTypesFeature := UnsupportedFeature{ FeatureName: "Index on complex datatypes", DisplayDDL: false, @@ -916,7 +1087,7 @@ func getIndexesOnComplexTypeUnsupportedFeature(schemaAnalysisiReport utils.Schem unsupportedIndexDatatypes = append(unsupportedIndexDatatypes, "array") // adding it here only as we know issue form analyze will come with type unsupportedIndexDatatypes = append(unsupportedIndexDatatypes, "user_defined_type") // adding it here as we UDTs will come with this type. for _, unsupportedType := range unsupportedIndexDatatypes { - indexes := getUnsupportedFeaturesFromSchemaAnalysisReport(fmt.Sprintf("%s indexes", unsupportedType), fmt.Sprintf(ISSUE_INDEX_WITH_COMPLEX_DATATYPES, unsupportedType), schemaAnalysisReport, false, "") + indexes := getUnsupportedFeaturesFromSchemaAnalysisReport(fmt.Sprintf("%s indexes", unsupportedType), fmt.Sprintf(ISSUE_INDEX_WITH_COMPLEX_DATATYPES, unsupportedType), "", schemaAnalysisReport, false, "") for _, object := range indexes.Objects { formattedObject := object formattedObject.ObjectName = fmt.Sprintf("%s: %s", strings.ToUpper(unsupportedType), object.ObjectName) @@ -926,15 +1097,16 @@ func getIndexesOnComplexTypeUnsupportedFeature(schemaAnalysisiReport utils.Schem } } } - return indexesOnComplexTypesFeature } func fetchUnsupportedOracleFeaturesFromSchemaReport(schemaAnalysisReport utils.SchemaReport) ([]UnsupportedFeature, error) { log.Infof("fetching unsupported features for Oracle...") unsupportedFeatures := make([]UnsupportedFeature, 0) - unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(COMPOUND_TRIGGER_FEATURE, COMPOUND_TRIGGER_ISSUE_REASON, schemaAnalysisReport, false, "")) - return unsupportedFeatures, nil + unsupportedFeatures = append(unsupportedFeatures, getUnsupportedFeaturesFromSchemaAnalysisReport(COMPOUND_TRIGGER_FEATURE, COMPOUND_TRIGGER_ISSUE_REASON, "", schemaAnalysisReport, false, "")) + return lo.Filter(unsupportedFeatures, func(f UnsupportedFeature, _ int) bool { + return len(f.Objects) > 0 + }), nil } var OracleUnsupportedIndexTypes = []string{"CLUSTER INDEX", "DOMAIN INDEX", "FUNCTION-BASED DOMAIN INDEX", "IOT - TOP INDEX", "NORMAL/REV INDEX", "FUNCTION-BASED NORMAL/REV INDEX"} @@ -968,29 +1140,117 @@ func fetchUnsupportedObjectTypes() ([]UnsupportedFeature, error) { unsupportedIndexes = append(unsupportedIndexes, ObjectInfo{ ObjectName: fmt.Sprintf("Index Name: %s, Index Type=%s", objectName, objectType), }) + + assessmentReport.AppendIssues(AssessmentIssue{ + Category: UNSUPPORTED_FEATURES_CATEGORY, + Type: "", // TODO + Name: UNSUPPORTED_INDEXES_FEATURE, + ObjectType: "INDEX", + ObjectName: fmt.Sprintf("Index Name: %s, Index Type=%s", objectName, objectType), + }) } else if objectType == VIRTUAL_COLUMN { virtualColumns = append(virtualColumns, ObjectInfo{ObjectName: objectName}) + assessmentReport.AppendIssues(AssessmentIssue{ + Category: UNSUPPORTED_FEATURES_CATEGORY, + Type: "", // TODO + Name: VIRTUAL_COLUMNS_FEATURE, + ObjectName: objectName, + }) } else if objectType == INHERITED_TYPE { inheritedTypes = append(inheritedTypes, ObjectInfo{ObjectName: objectName}) + assessmentReport.AppendIssues(AssessmentIssue{ + Category: UNSUPPORTED_FEATURES_CATEGORY, + Type: "", // TODO + Name: INHERITED_TYPES_FEATURE, + ObjectName: objectName, + }) } else if objectType == REFERENCE_PARTITION || objectType == SYSTEM_PARTITION { referenceOrTablePartitionPresent = true unsupportedPartitionTypes = append(unsupportedPartitionTypes, ObjectInfo{ObjectName: fmt.Sprintf("Table Name: %s, Partition Method: %s", objectName, objectType)}) + + // For oracle migration complexity comes from ora2pg, so defining Impact not required right now + assessmentReport.AppendIssues(AssessmentIssue{ + Category: UNSUPPORTED_FEATURES_CATEGORY, + Type: "", // TODO + Name: UNSUPPORTED_PARTITIONING_METHODS_FEATURE, + ObjectType: "TABLE", + ObjectName: fmt.Sprintf("Table Name: %s, Partition Method: %s", objectName, objectType), + }) } } unsupportedFeatures := make([]UnsupportedFeature, 0) - unsupportedFeatures = append(unsupportedFeatures, UnsupportedFeature{UNSUPPORTED_INDEXES_FEATURE, unsupportedIndexes, false, "", ""}) - unsupportedFeatures = append(unsupportedFeatures, UnsupportedFeature{VIRTUAL_COLUMNS_FEATURE, virtualColumns, false, "", ""}) - unsupportedFeatures = append(unsupportedFeatures, UnsupportedFeature{INHERITED_TYPES_FEATURE, inheritedTypes, false, "", ""}) - unsupportedFeatures = append(unsupportedFeatures, UnsupportedFeature{UNSUPPORTED_PARTITIONING_METHODS_FEATURE, unsupportedPartitionTypes, false, "", ""}) - return unsupportedFeatures, nil + unsupportedFeatures = append(unsupportedFeatures, UnsupportedFeature{UNSUPPORTED_INDEXES_FEATURE, unsupportedIndexes, false, "", "", nil}) + unsupportedFeatures = append(unsupportedFeatures, UnsupportedFeature{VIRTUAL_COLUMNS_FEATURE, virtualColumns, false, "", "", nil}) + unsupportedFeatures = append(unsupportedFeatures, UnsupportedFeature{INHERITED_TYPES_FEATURE, inheritedTypes, false, "", "", nil}) + unsupportedFeatures = append(unsupportedFeatures, UnsupportedFeature{UNSUPPORTED_PARTITIONING_METHODS_FEATURE, unsupportedPartitionTypes, false, "", "", nil}) + return lo.Filter(unsupportedFeatures, func(f UnsupportedFeature, _ int) bool { + return len(f.Objects) > 0 + }), nil +} + +func fetchUnsupportedPlPgSQLObjects(schemaAnalysisReport utils.SchemaReport) []UnsupportedFeature { + if source.DBType != POSTGRESQL { + return nil + } + + plpgsqlIssues := lo.Filter(schemaAnalysisReport.Issues, func(issue utils.AnalyzeSchemaIssue, _ int) bool { + return issue.IssueType == UNSUPPORTED_PLPGSQL_OBJECTS_CATEGORY + }) + groupPlpgsqlIssuesByReason := lo.GroupBy(plpgsqlIssues, func(issue utils.AnalyzeSchemaIssue) string { + return issue.Reason + }) + var unsupportedPlpgSqlObjects []UnsupportedFeature + for reason, issues := range groupPlpgsqlIssuesByReason { + var objects []ObjectInfo + var docsLink string + var minVersionsFixedIn map[string]*ybversion.YBVersion + var minVersionsFixedInSet bool + + for _, issue := range issues { + if !minVersionsFixedInSet { + minVersionsFixedIn = issue.MinimumVersionsFixedIn + minVersionsFixedInSet = true + } + if !areMinVersionsFixedInEqual(minVersionsFixedIn, issue.MinimumVersionsFixedIn) { + utils.ErrExit("Issues belonging to UnsupportedFeature %s have different minimum versions fixed in: %v, %v", reason, minVersionsFixedIn, issue.MinimumVersionsFixedIn) + } + + objects = append(objects, ObjectInfo{ + ObjectType: issue.ObjectType, + ObjectName: issue.ObjectName, + SqlStatement: issue.SqlStatement, + }) + docsLink = issue.DocsLink + + assessmentReport.AppendIssues(AssessmentIssue{ + Category: UNSUPPORTED_PLPGSQL_OBJECTS_CATEGORY, + Type: issue.Type, + Name: reason, + Impact: issue.Impact, // TODO: verify(expected already there since underlying issues are assigned) + ObjectType: issue.ObjectType, + ObjectName: issue.ObjectName, + SqlStatement: issue.SqlStatement, + DocsLink: issue.DocsLink, + MinimumVersionFixedIn: issue.MinimumVersionsFixedIn, + }) + } + feature := UnsupportedFeature{ + FeatureName: reason, + DisplayDDL: true, + DocsLink: docsLink, + Objects: objects, + } + unsupportedPlpgSqlObjects = append(unsupportedPlpgSqlObjects, feature) + } + + return unsupportedPlpgSqlObjects } func fetchUnsupportedQueryConstructs() ([]utils.UnsupportedQueryConstruct, error) { if source.DBType != POSTGRESQL { return nil, nil } - parserIssueDetector := queryissue.NewParserIssueDetector() query := fmt.Sprintf("SELECT DISTINCT query from %s", migassessment.DB_QUERIES_SUMMARY) rows, err := assessmentDB.Query(query) if err != nil { @@ -1022,8 +1282,20 @@ func fetchUnsupportedQueryConstructs() ([]utils.UnsupportedQueryConstruct, error for i := 0; i < len(executedQueries); i++ { query := executedQueries[i] log.Debugf("fetching unsupported query constructs for query - [%s]", query) + collectedSchemaList, err := queryparser.GetSchemaUsed(query) + if err != nil { // no need to error out if failed to get schemas for a query + log.Errorf("failed to get schemas used for query [%s]: %v", query, err) + continue + } - issues, err := parserIssueDetector.GetIssues(query) + log.Infof("collected schema list %v(len=%d) for query [%s]", collectedSchemaList, len(collectedSchemaList), query) + if !considerQueryForIssueDetection(collectedSchemaList) { + log.Infof("ignoring query due to difference in collected schema list %v(len=%d) vs source schema list %v(len=%d)", + collectedSchemaList, len(collectedSchemaList), source.GetSchemaList(), len(source.GetSchemaList())) + continue + } + + issues, err := parserIssueDetector.GetDMLIssues(query, targetDbVersion) if err != nil { log.Errorf("failed while trying to fetch query issues in query - [%s]: %v", query, err) @@ -1031,13 +1303,23 @@ func fetchUnsupportedQueryConstructs() ([]utils.UnsupportedQueryConstruct, error for _, issue := range issues { uqc := utils.UnsupportedQueryConstruct{ - Query: issue.SqlStatement, - ConstructTypeName: issue.TypeName, - DocsLink: issue.DocsLink, + Query: issue.SqlStatement, + ConstructTypeName: issue.Name, + DocsLink: issue.DocsLink, + MinimumVersionsFixedIn: issue.MinimumVersionsFixedIn, } result = append(result, uqc) - } + assessmentReport.AppendIssues(AssessmentIssue{ + Category: UNSUPPORTED_QUERY_CONSTRUCTS_CATEGORY, + Type: issue.Type, + Name: issue.Name, + Impact: issue.Impact, + SqlStatement: issue.SqlStatement, + DocsLink: issue.DocsLink, + MinimumVersionFixedIn: issue.MinimumVersionsFixedIn, + }) + } } // sort the slice to group same constructType in html and json reports @@ -1101,9 +1383,9 @@ func fetchColumnsWithUnsupportedDataTypes() ([]utils.TableColumnsDataTypes, []ut isUnsupportedDatatypeInLive := utils.ContainsAnyStringFromSlice(liveUnsupportedDatatypes, typeName) isUnsupportedDatatypeInLiveWithFFOrFBList := utils.ContainsAnyStringFromSlice(liveWithFForFBUnsupportedDatatypes, typeName) - isUDTDatatype := utils.ContainsAnyStringFromSlice(compositeTypes, allColumnsDataTypes[i].DataType) - isArrayDatatype := strings.HasSuffix(allColumnsDataTypes[i].DataType, "[]") //if type is array - isEnumDatatype := utils.ContainsAnyStringFromSlice(enumTypes, strings.TrimSuffix(allColumnsDataTypes[i].DataType, "[]")) //is ENUM type + isUDTDatatype := utils.ContainsAnyStringFromSlice(parserIssueDetector.GetCompositeTypes(), allColumnsDataTypes[i].DataType) + isArrayDatatype := strings.HasSuffix(allColumnsDataTypes[i].DataType, "[]") //if type is array + isEnumDatatype := utils.ContainsAnyStringFromSlice(parserIssueDetector.GetEnumTypes(), strings.TrimSuffix(allColumnsDataTypes[i].DataType, "[]")) //is ENUM type isArrayOfEnumsDatatype := isArrayDatatype && isEnumDatatype isUnsupportedDatatypeInLiveWithFFOrFB := isUnsupportedDatatypeInLiveWithFFOrFBList || isUDTDatatype || isArrayOfEnumsDatatype @@ -1129,13 +1411,76 @@ func fetchColumnsWithUnsupportedDataTypes() ([]utils.TableColumnsDataTypes, []ut return unsupportedDataTypes, unsupportedDataTypesForLiveMigration, unsupportedDataTypesForLiveMigrationWithFForFB, nil } +func getAssessmentIssuesForUnsupportedDatatypes(unsupportedDatatypes []utils.TableColumnsDataTypes) []AssessmentIssue { + var assessmentIssues []AssessmentIssue + for _, colInfo := range unsupportedDatatypes { + qualifiedColName := fmt.Sprintf("%s.%s.%s", colInfo.SchemaName, colInfo.TableName, colInfo.ColumnName) + issue := AssessmentIssue{ + Category: UNSUPPORTED_DATATYPES_CATEGORY, + CategoryDescription: GetCategoryDescription(UNSUPPORTED_DATATYPES_CATEGORY), + Type: colInfo.DataType, // TODO: maybe name it like "unsupported datatype - geometry" + Name: colInfo.DataType, // TODO: maybe name it like "unsupported datatype - geometry" + Impact: constants.IMPACT_LEVEL_3, + ObjectType: constants.COLUMN, + ObjectName: qualifiedColName, + DocsLink: "", // TODO + MinimumVersionFixedIn: nil, // TODO + } + assessmentIssues = append(assessmentIssues, issue) + } + + return assessmentIssues +} + +/* +Queries to ignore: +- Collected schemas is totally different than source schema list, not containing "" + +Queries to consider: +- Collected schemas subset of source schema list +- Collected schemas contains some from source schema list and some extras + +Caveats: +There can be a lot of false positives. +For example: standard sql functions like sum(), count() won't be qualified(pg_catalog) in queries generally. +Making the schema unknown for that object, resulting in query consider +*/ +func considerQueryForIssueDetection(collectedSchemaList []string) bool { + // filtering out pg_catalog schema, since it doesn't impact query consideration decision + collectedSchemaList = lo.Filter(collectedSchemaList, func(item string, _ int) bool { + return item != "pg_catalog" + }) + + sourceSchemaList := strings.Split(source.Schema, "|") + + // fallback in case: unable to collect objects or there are no object(s) in the query + if len(collectedSchemaList) == 0 { + return true + } + + // empty schemaname indicates presence of unqualified objectnames in query + if slices.Contains(collectedSchemaList, "") { + log.Debug("considering due to empty schema\n") + return true + } + + for _, collectedSchema := range collectedSchemaList { + if slices.Contains(sourceSchemaList, collectedSchema) { + log.Debugf("considering due to '%s' schema\n", collectedSchema) + return true + } + } + return false +} + const ( ORACLE_PARTITION_DEFAULT_COLOCATION = `For sharding/colocation recommendations, each partition is treated individually. During the export schema phase, all the partitions of a partitioned table are currently created as colocated by default. To manually modify the schema, please refer: https://github.com/yugabyte/yb-voyager/issues/1581.` ORACLE_UNSUPPPORTED_PARTITIONING = `Reference and System Partitioned tables are created as normal tables, but are not considered for target cluster sizing recommendations.` - GIN_INDEXES = `There are some BITMAP indexes present in the schema that will get converted to GIN indexes, but GIN indexes are partially supported in YugabyteDB as mentioned in https://github.com/yugabyte/yugabyte-db/issues/7850 so take a look and modify them if not supported.` + GIN_INDEXES = `There are some BITMAP indexes present in the schema that will get converted to GIN indexes, but GIN indexes are partially supported in YugabyteDB as mentioned in https://github.com/yugabyte/yugabyte-db/issues/7850 so take a look and modify them if not supported.` + UNLOGGED_TABLE_NOTE = `There are some Unlogged tables in the schema. They will be created as regular LOGGED tables in YugabyteDB as unlogged tables are not supported.` ) const FOREIGN_TABLE_NOTE = `There are some Foreign tables in the schema, but during the export schema phase, exported schema does not include the SERVER and USER MAPPING objects. Therefore, you must manually create these objects before import schema. For more information on each of them, run analyze-schema. ` @@ -1162,7 +1507,12 @@ func addNotesToAssessmentReport() { } } } + case POSTGRESQL: + if parserIssueDetector.IsUnloggedTablesIssueFiltered { + assessmentReport.Notes = append(assessmentReport.Notes, UNLOGGED_TABLE_NOTE) + } } + } func addMigrationCaveatsToAssessmentReport(unsupportedDataTypesForLiveMigration []utils.TableColumnsDataTypes, unsupportedDataTypesForLiveMigrationWithFForFB []utils.TableColumnsDataTypes) { @@ -1170,28 +1520,62 @@ func addMigrationCaveatsToAssessmentReport(unsupportedDataTypesForLiveMigration case POSTGRESQL: log.Infof("add migration caveats to assessment report") migrationCaveats := make([]UnsupportedFeature, 0) - migrationCaveats = append(migrationCaveats, getUnsupportedFeaturesFromSchemaAnalysisReport(ALTER_PARTITION_ADD_PK_CAVEAT_FEATURE, ADDING_PK_TO_PARTITIONED_TABLE_ISSUE_REASON, + migrationCaveats = append(migrationCaveats, getUnsupportedFeaturesFromSchemaAnalysisReport(ALTER_PARTITION_ADD_PK_CAVEAT_FEATURE, "", queryissue.ALTER_TABLE_ADD_PK_ON_PARTITIONED_TABLE, schemaAnalysisReport, true, DESCRIPTION_ADD_PK_TO_PARTITION_TABLE)) - migrationCaveats = append(migrationCaveats, getUnsupportedFeaturesFromSchemaAnalysisReport(FOREIGN_TABLE_CAVEAT_FEATURE, FOREIGN_TABLE_ISSUE_REASON, + migrationCaveats = append(migrationCaveats, getUnsupportedFeaturesFromSchemaAnalysisReport(FOREIGN_TABLE_CAVEAT_FEATURE, "", queryissue.FOREIGN_TABLE, schemaAnalysisReport, false, DESCRIPTION_FOREIGN_TABLES)) - migrationCaveats = append(migrationCaveats, getUnsupportedFeaturesFromSchemaAnalysisReport(POLICIES_CAVEAT_FEATURE, POLICY_ROLE_ISSUE, - schemaAnalysisReport, false, DESCRIPTION_POLICY_ROLE_ISSUE)) + migrationCaveats = append(migrationCaveats, getUnsupportedFeaturesFromSchemaAnalysisReport(POLICIES_CAVEAT_FEATURE, "", queryissue.POLICY_WITH_ROLES, + schemaAnalysisReport, false, DESCRIPTION_POLICY_ROLE_DESCRIPTION)) if len(unsupportedDataTypesForLiveMigration) > 0 { columns := make([]ObjectInfo, 0) - for _, col := range unsupportedDataTypesForLiveMigration { - columns = append(columns, ObjectInfo{ObjectName: fmt.Sprintf("%s.%s.%s (%s)", col.SchemaName, col.TableName, col.ColumnName, col.DataType)}) + for _, colInfo := range unsupportedDataTypesForLiveMigration { + columns = append(columns, ObjectInfo{ObjectName: fmt.Sprintf("%s.%s.%s (%s)", colInfo.SchemaName, colInfo.TableName, colInfo.ColumnName, colInfo.DataType)}) + + assessmentReport.AppendIssues(AssessmentIssue{ + Category: MIGRATION_CAVEATS_CATEGORY, + CategoryDescription: "", // TODO + Type: UNSUPPORTED_DATATYPES_LIVE_CAVEAT_FEATURE, // TODO add object type in type name + Name: "", // TODO + Impact: constants.IMPACT_LEVEL_1, // Caveat - we don't know the migration is offline/online; + Description: UNSUPPORTED_DATATYPES_FOR_LIVE_MIGRATION_DESCRIPTION, + ObjectType: constants.COLUMN, + ObjectName: fmt.Sprintf("%s.%s.%s", colInfo.SchemaName, colInfo.TableName, colInfo.ColumnName), + DocsLink: UNSUPPORTED_DATATYPE_LIVE_MIGRATION_DOC_LINK, + }) + } + if len(columns) > 0 { + migrationCaveats = append(migrationCaveats, UnsupportedFeature{UNSUPPORTED_DATATYPES_LIVE_CAVEAT_FEATURE, columns, false, UNSUPPORTED_DATATYPE_LIVE_MIGRATION_DOC_LINK, UNSUPPORTED_DATATYPES_FOR_LIVE_MIGRATION_DESCRIPTION, nil}) } - migrationCaveats = append(migrationCaveats, UnsupportedFeature{UNSUPPORTED_DATATYPES_LIVE_CAVEAT_FEATURE, columns, false, UNSUPPORTED_DATATYPE_LIVE_MIGRATION_DOC_LINK, UNSUPPORTED_DATATYPES_FOR_LIVE_MIGRATION_ISSUE}) } if len(unsupportedDataTypesForLiveMigrationWithFForFB) > 0 { columns := make([]ObjectInfo, 0) - for _, col := range unsupportedDataTypesForLiveMigrationWithFForFB { - columns = append(columns, ObjectInfo{ObjectName: fmt.Sprintf("%s.%s.%s (%s)", col.SchemaName, col.TableName, col.ColumnName, col.DataType)}) + for _, colInfo := range unsupportedDataTypesForLiveMigrationWithFForFB { + columns = append(columns, ObjectInfo{ObjectName: fmt.Sprintf("%s.%s.%s (%s)", colInfo.SchemaName, colInfo.TableName, colInfo.ColumnName, colInfo.DataType)}) + + assessmentReport.AppendIssues(AssessmentIssue{ + Category: MIGRATION_CAVEATS_CATEGORY, + CategoryDescription: "", // TODO + Type: UNSUPPORTED_DATATYPES_LIVE_WITH_FF_FB_CAVEAT_FEATURE, // TODO add object type in type name + Name: "", // TODO + Impact: constants.IMPACT_LEVEL_1, + Description: UNSUPPORTED_DATATYPES_FOR_LIVE_MIGRATION_WITH_FF_FB_DESCRIPTION, + ObjectType: constants.COLUMN, + ObjectName: fmt.Sprintf("%s.%s.%s", colInfo.SchemaName, colInfo.TableName, colInfo.ColumnName), + DocsLink: UNSUPPORTED_DATATYPE_LIVE_MIGRATION_DOC_LINK, + }) } - migrationCaveats = append(migrationCaveats, UnsupportedFeature{UNSUPPORTED_DATATYPES_LIVE_WITH_FF_FB_CAVEAT_FEATURE, columns, false, UNSUPPORTED_DATATYPE_LIVE_MIGRATION_DOC_LINK, UNSUPPORTED_DATATYPES_FOR_LIVE_MIGRATION_WITH_FF_FB_ISSUE}) + if len(columns) > 0 { + migrationCaveats = append(migrationCaveats, UnsupportedFeature{UNSUPPORTED_DATATYPES_LIVE_WITH_FF_FB_CAVEAT_FEATURE, columns, false, UNSUPPORTED_DATATYPE_LIVE_MIGRATION_DOC_LINK, UNSUPPORTED_DATATYPES_FOR_LIVE_MIGRATION_WITH_FF_FB_DESCRIPTION, nil}) + } + } + migrationCaveats = lo.Filter(migrationCaveats, func(m UnsupportedFeature, _ int) bool { + return len(m.Objects) > 0 + }) + if len(migrationCaveats) > 0 { + assessmentReport.MigrationCaveats = migrationCaveats } - assessmentReport.MigrationCaveats = migrationCaveats + } } @@ -1218,6 +1602,14 @@ func postProcessingOfAssessmentReport() { func generateAssessmentReportJson(reportDir string) error { jsonReportFilePath := filepath.Join(reportDir, fmt.Sprintf("%s%s", ASSESSMENT_FILE_NAME, JSON_EXTENSION)) log.Infof("writing assessment report to file: %s", jsonReportFilePath) + + var err error + assessmentReport.MigrationComplexityExplanation, err = buildMigrationComplexityExplanation(source.DBType, assessmentReport, "") + if err != nil { + return fmt.Errorf("unable to build migration complexity explanation for json report: %w", err) + } + log.Info(assessmentReport.MigrationComplexityExplanation) + strReport, err := json.MarshalIndent(assessmentReport, "", "\t") if err != nil { return fmt.Errorf("failed to marshal the assessment report: %w", err) @@ -1236,6 +1628,12 @@ func generateAssessmentReportHtml(reportDir string) error { htmlReportFilePath := filepath.Join(reportDir, fmt.Sprintf("%s%s", ASSESSMENT_FILE_NAME, HTML_EXTENSION)) log.Infof("writing assessment report to file: %s", htmlReportFilePath) + var err error + assessmentReport.MigrationComplexityExplanation, err = buildMigrationComplexityExplanation(source.DBType, assessmentReport, "html") + if err != nil { + return fmt.Errorf("unable to build migration complexity explanation for html report: %w", err) + } + file, err := os.Create(htmlReportFilePath) if err != nil { return fmt.Errorf("failed to create file for %q: %w", filepath.Base(htmlReportFilePath), err) @@ -1249,7 +1647,12 @@ func generateAssessmentReportHtml(reportDir string) error { log.Infof("creating template for assessment report...") funcMap := template.FuncMap{ - "split": split, + "split": split, + "groupByObjectType": groupByObjectType, + "numKeysInMapStringObjectInfo": numKeysInMapStringObjectInfo, + "groupByObjectName": groupByObjectName, + "totalUniqueObjectNamesOfAllTypes": totalUniqueObjectNamesOfAllTypes, + "getSupportedVersionString": getSupportedVersionString, } tmpl := template.Must(template.New("report").Funcs(funcMap).Parse(string(bytesTemplate))) @@ -1267,10 +1670,48 @@ func generateAssessmentReportHtml(reportDir string) error { return nil } +func groupByObjectType(objects []ObjectInfo) map[string][]ObjectInfo { + return lo.GroupBy(objects, func(object ObjectInfo) string { + return object.ObjectType + }) +} + +func groupByObjectName(objects []ObjectInfo) map[string][]ObjectInfo { + return lo.GroupBy(objects, func(object ObjectInfo) string { + return object.ObjectName + }) +} + +func totalUniqueObjectNamesOfAllTypes(m map[string][]ObjectInfo) int { + totalObjectNames := 0 + for _, objects := range m { + totalObjectNames += len(lo.Keys(groupByObjectName(objects))) + } + return totalObjectNames +} + +func numKeysInMapStringObjectInfo(m map[string][]ObjectInfo) int { + return len(lo.Keys(m)) +} + func split(value string, delimiter string) []string { return strings.Split(value, delimiter) } +func getSupportedVersionString(minimumVersionsFixedIn map[string]*ybversion.YBVersion) string { + if minimumVersionsFixedIn == nil { + return "" + } + supportedVersions := []string{} + for series, minVersionFixedIn := range minimumVersionsFixedIn { + if minVersionFixedIn == nil { + continue + } + supportedVersions = append(supportedVersions, fmt.Sprintf(">=%s (%s series)", minVersionFixedIn.String(), series)) + } + return strings.Join(supportedVersions, ", ") +} + func validateSourceDBTypeForAssessMigration() { if source.DBType == "" { utils.ErrExit("Error: required flag \"source-db-type\" not set") @@ -1286,9 +1727,32 @@ func validateSourceDBTypeForAssessMigration() { func validateAssessmentMetadataDirFlag() { if assessmentMetadataDirFlag != "" { if !utils.FileOrFolderExists(assessmentMetadataDirFlag) { - utils.ErrExit("assessment metadata directory %q provided with `--assessment-metadata-dir` flag does not exist", assessmentMetadataDirFlag) + utils.ErrExit("assessment metadata directory: %q provided with `--assessment-metadata-dir` flag does not exist", assessmentMetadataDirFlag) } else { log.Infof("using provided assessment metadata directory: %s", assessmentMetadataDirFlag) } } } + +func validateAndSetTargetDbVersionFlag() error { + if targetDbVersionStrFlag == "" { + targetDbVersion = ybversion.LatestStable + return nil + } + var err error + targetDbVersion, err = ybversion.NewYBVersion(targetDbVersionStrFlag) + + if err == nil || !errors.Is(err, ybversion.ErrUnsupportedSeries) { + return err + } + + // error is ErrUnsupportedSeries + utils.PrintAndLog("%v", err) + if utils.AskPrompt("Do you want to continue with the latest stable YugabyteDB version:", ybversion.LatestStable.String()) { + targetDbVersion = ybversion.LatestStable + return nil + } else { + utils.ErrExit("Aborting..") + return nil + } +} diff --git a/yb-voyager/cmd/common.go b/yb-voyager/cmd/common.go index 3bce78ffd0..4c9f34138c 100644 --- a/yb-voyager/cmd/common.go +++ b/yb-voyager/cmd/common.go @@ -16,7 +16,6 @@ limitations under the License. package cmd import ( - "encoding/csv" "encoding/json" "errors" "fmt" @@ -46,6 +45,7 @@ import ( "golang.org/x/exp/slices" "golang.org/x/term" + "github.com/hashicorp/go-version" "github.com/yugabyte/yb-voyager/yb-voyager/src/callhome" "github.com/yugabyte/yb-voyager/yb-voyager/src/cp" "github.com/yugabyte/yb-voyager/yb-voyager/src/datafile" @@ -58,12 +58,15 @@ import ( "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/jsonfile" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" + "github.com/yugabyte/yb-voyager/yb-voyager/src/ybversion" ) var ( - metaDB *metadb.MetaDB - PARENT_COMMAND_USAGE = "Parent command. Refer to the sub-commands for usage help." - startTime time.Time + metaDB *metadb.MetaDB + PARENT_COMMAND_USAGE = "Parent command. Refer to the sub-commands for usage help." + startTime time.Time + targetDbVersionStrFlag string + targetDbVersion *ybversion.YBVersion ) func PrintElapsedDuration() { @@ -162,7 +165,7 @@ func getMappingForTableNameVsTableFileName(dataDirPath string, noWait bool) map[ fullTableName := fmt.Sprintf("%s.%s", schemaName, tableName) table, err := namereg.NameReg.LookupTableName(fullTableName) if err != nil { - utils.ErrExit("lookup table %s in name registry : %v", fullTableName, err) + utils.ErrExit("lookup table in name registry: %q: %v", fullTableName, err) } tableNameVsFileNameMap[table.ForKey()] = fileName } @@ -170,7 +173,7 @@ func getMappingForTableNameVsTableFileName(dataDirPath string, noWait bool) map[ tocTextFileDataBytes, err := os.ReadFile(tocTextFilePath) if err != nil { - utils.ErrExit("Failed to read file %q: %v", tocTextFilePath, err) + utils.ErrExit("Failed to read file: %q: %v", tocTextFilePath, err) } tocTextFileData := strings.Split(string(tocTextFileDataBytes), "\n") @@ -205,7 +208,7 @@ func GetTableRowCount(filePath string) map[string]int64 { fileBytes, err := os.ReadFile(filePath) if err != nil { - utils.ErrExit("read file %q: %s", filePath, err) + utils.ErrExit("read file: %q: %s", filePath, err) } lines := strings.Split(strings.Trim(string(fileBytes), "\n"), "\n") @@ -236,11 +239,13 @@ func getLeafPartitionsFromRootTable() map[string][]string { if err != nil { utils.ErrExit("get migration status record: %v", err) } - if !msr.IsExportTableListSet || msr.SourceDBConf.DBType != POSTGRESQL { + if msr.SourceDBConf.DBType != POSTGRESQL { return leafPartitions } tables := msr.TableListExportedFromSource for leaf, root := range msr.SourceRenameTablesMap { + //Using the SQLName here to avoid creating the NameTuples manually for leafTable case as in a case partition names changes on target + //NameRegistry won't be able to figure out the map of source->target tuples. leafTable := sqlname.NewSourceNameFromQualifiedName(getQuotedFromUnquoted(leaf)) rootTable := sqlname.NewSourceNameFromQualifiedName(getQuotedFromUnquoted(root)) leaf = leafTable.Qualified.MinQuoted @@ -251,6 +256,7 @@ func getLeafPartitionsFromRootTable() map[string][]string { if !lo.Contains(tables, root) { continue } + //Adding a Qualified.MinQuoted to key and values which is similar to NameTuple.ForOutput(); leafPartitions[root] = append(leafPartitions[root], leaf) } @@ -268,6 +274,10 @@ func displayExportedRowCountSnapshot(snapshotViaDebezium bool) { fmt.Printf("snapshot export report\n") uitable := uitable.New() + msr, err := metaDB.GetMigrationStatusRecord() + if err != nil { + utils.ErrExit("error getting migration status record: %v", err) + } leafPartitions := getLeafPartitionsFromRootTable() if !snapshotViaDebezium { exportedRowCount := getExportedRowCountSnapshot(exportDir) @@ -284,11 +294,12 @@ func displayExportedRowCountSnapshot(snapshotViaDebezium bool) { for _, key := range keys { table, err := namereg.NameReg.LookupTableName(key) if err != nil { - utils.ErrExit("lookup table %s in name registry : %v", key, err) + utils.ErrExit("lookup table in name registry: %q: %v", key, err) } displayTableName := table.CurrentName.Unqualified.MinQuoted + //Using the ForOutput() as a key for leafPartitions map as we are populating the map in that way. partitions := leafPartitions[table.ForOutput()] - if source.DBType == POSTGRESQL && partitions != nil { + if source.DBType == POSTGRESQL && partitions != nil && msr.IsExportTableListSet { partitions := strings.Join(partitions, ", ") displayTableName = fmt.Sprintf("%s (%s)", table.CurrentName.Unqualified.MinQuoted, partitions) } @@ -317,7 +328,7 @@ func displayExportedRowCountSnapshot(snapshotViaDebezium bool) { } table, err := namereg.NameReg.LookupTableName(fmt.Sprintf("%s.%s", tableStatus.SchemaName, tableStatus.TableName)) if err != nil { - utils.ErrExit("lookup table %s in name registry : %v", tableStatus.TableName, err) + utils.ErrExit("lookup table in name registry : %q: %v", tableStatus.TableName, err) } displayTableName := table.CurrentName.Unqualified.MinQuoted partitions := leafPartitions[table.ForOutput()] @@ -353,21 +364,6 @@ func renameDatafileDescriptor(exportDir string) { datafileDescriptor.Save() } -func renameExportSnapshotStatus(exportSnapshotStatusFile *jsonfile.JsonFile[ExportSnapshotStatus]) error { - err := exportSnapshotStatusFile.Update(func(exportSnapshotStatus *ExportSnapshotStatus) { - for i, tableStatus := range exportSnapshotStatus.Tables { - renamedTable, isRenamed := renameTableIfRequired(tableStatus.TableName) - if isRenamed { - exportSnapshotStatus.Tables[i].TableName = renamedTable - } - } - }) - if err != nil { - return fmt.Errorf("update export snapshot status: %w", err) - } - return nil -} - func displayImportedRowCountSnapshot(state *ImportDataState, tasks []*ImportFileTask) { if importerRole == IMPORT_FILE_ROLE { fmt.Printf("import report\n") @@ -392,7 +388,7 @@ func displayImportedRowCountSnapshot(state *ImportDataState, tasks []*ImportFile for _, tableName := range tableList { tableRowCount, err := state.GetImportedSnapshotRowCountForTable(tableName) if err != nil { - utils.ErrExit("could not fetch snapshot row count for table %q: %w", tableName, err) + utils.ErrExit("could not fetch snapshot row count for table: %q: %w", tableName, err) } snapshotRowCount.Put(tableName, tableRowCount) } @@ -445,7 +441,7 @@ func CreateMigrationProjectIfNotExists(dbType string, exportDir string) { for _, subdir := range projectSubdirs { err := exec.Command("mkdir", "-p", filepath.Join(projectDirPath, subdir)).Run() if err != nil { - utils.ErrExit("couldn't create sub-directories under %q: %v", projectDirPath, err) + utils.ErrExit("couldn't create sub-directories under: %q: %v", projectDirPath, err) } } @@ -462,7 +458,7 @@ func CreateMigrationProjectIfNotExists(dbType string, exportDir string) { err := exec.Command("mkdir", "-p", filepath.Join(schemaDir, databaseObjectDirName)).Run() if err != nil { - utils.ErrExit("couldn't create sub-directories under %q: %v", schemaDir, err) + utils.ErrExit("couldn't create sub-directories under: %q: %v", schemaDir, err) } } @@ -482,22 +478,78 @@ func initMetaDB(migrationExportDir string) *metadb.MetaDB { if err != nil { utils.ErrExit("could not init migration status record: %w", err) } - msr, err := metaDBInstance.GetMigrationStatusRecord() + + return metaDBInstance +} + +func detectVersionCompatibility(msrVoyagerVersionString string, migrationExportDir string) { + // If the msr VoyagerVersion is less than the PREVIOUS_BREAKING_CHANGE_VERSION, then the export-dir is not compatible with the current Voyager version. + // This version will always be a final release version and never "main" or "rc" version. + previousBreakingChangeVersion, err := version.NewVersion(utils.PREVIOUS_BREAKING_CHANGE_VERSION) if err != nil { - utils.ErrExit("get migration status record: %v", err) + utils.ErrExit("could not create version from %q: %v", utils.PREVIOUS_BREAKING_CHANGE_VERSION, err) + } + + var versionCheckFailed bool + + if msrVoyagerVersionString == "main" { + // If the export-dir was created using the main branch, then the current version should also be the main branch. + if utils.YB_VOYAGER_VERSION != "main" { + versionCheckFailed = true + } + } else if msrVoyagerVersionString != "" { + msrVoyagerFinalVersion := msrVoyagerVersionString + if strings.Contains(msrVoyagerFinalVersion, "rc") { + msrVoyagerFinalVersion, err = utils.GetFinalReleaseVersionFromRCVersion(msrVoyagerFinalVersion) + if err != nil { + utils.ErrExit("could not get final release version from rc version %q: %v", msrVoyagerFinalVersion, err) + } + } + + msrVoyagerVersion, err := version.NewVersion(msrVoyagerFinalVersion) + if err != nil { + utils.ErrExit("could not create version from %q: %v", msrVoyagerFinalVersion, err) + } + + if msrVoyagerVersion.LessThan(previousBreakingChangeVersion) { + versionCheckFailed = true + } else { + // If the export-dir was created using a version greater than or equal to the PREVIOUS_BREAKING_CHANGE_VERSION, + // then if the current voyager version does not match the export-dir version, then just print a note warning the user. + noteString := fmt.Sprintf(color.YellowString("Note: The export-dir %q was created using voyager version %q. "+ + "The current version is %q."), + migrationExportDir, msrVoyagerVersionString, utils.YB_VOYAGER_VERSION) + + if utils.YB_VOYAGER_VERSION == "main" { + // In this case we won't be able to convert the version using version.NewVersion() as "main" is not a valid version. + // Moreover, we know here that the msrVoyagerVersion is not "main" as we have already handled that case above. + // Therefore, the current version and the msrVoyagerVersion will not be equal. + utils.PrintAndLog("%s", noteString) + } else { + currentVersion, err := version.NewVersion(utils.YB_VOYAGER_VERSION) + if err != nil { + utils.ErrExit("could not create version from %q: %v", utils.YB_VOYAGER_VERSION, err) + } + if !currentVersion.Equal(msrVoyagerVersion) { + utils.PrintAndLog("%s", noteString) + } + } + } } - if msr.VoyagerVersion != utils.YB_VOYAGER_VERSION { - userFacingMsg := fmt.Sprintf("Voyager requires the entire migration workflow to be executed using a single Voyager version.\n"+ - "The export-dir %q was created using version %q and the current version is %q. Either use Voyager %q to continue the migration or start afresh "+ - "with a new export-dir.", migrationExportDir, msr.VoyagerVersion, utils.YB_VOYAGER_VERSION, msr.VoyagerVersion) - if msr.VoyagerVersion == "" { //In case the export dir is already started from older version that will not have VoyagerVersion field in MSR - userFacingMsg = fmt.Sprintf("Voyager requires the entire migration workflow to be executed using a single Voyager version.\n"+ - "The export-dir %q was created using older version and the current version is %q. Either use older version to continue the migration or start afresh "+ - "with a new export-dir.", migrationExportDir, utils.YB_VOYAGER_VERSION) + + if versionCheckFailed { + userFacingMsg := fmt.Sprintf("\nThe export-dir %q was created using voyager version %q. "+ + "However, the current version %q requires the export-dir to be created using version %q or later. "+ + "Either use a compatible version to continue the migration or start afresh with a new export-dir. ", + migrationExportDir, msrVoyagerVersionString, utils.YB_VOYAGER_VERSION, utils.PREVIOUS_BREAKING_CHANGE_VERSION) + if msrVoyagerVersionString == "" { //In case the export dir is already started from older version that will not have VoyagerVersion field in MSR + userFacingMsg = fmt.Sprintf("\nThe export-dir %q was created using older version. "+ + "However, the current version %q requires the export-dir to be created using version %q or later. "+ + "Either use a compatible version to continue the migration or start afresh with a new export-dir. ", + migrationExportDir, utils.YB_VOYAGER_VERSION, utils.PREVIOUS_BREAKING_CHANGE_VERSION) } utils.ErrExit(userFacingMsg) } - return metaDBInstance } func initAssessmentDB() { @@ -877,7 +929,7 @@ func renameTableIfRequired(table string) (string, bool) { } defaultSchema, noDefaultSchema := GetDefaultPGSchema(schema, "|") if noDefaultSchema && len(strings.Split(table, ".")) <= 1 { - utils.ErrExit("no default schema found to qualify table %s", table) + utils.ErrExit("no default schema found to qualify table: %s", table) } tableName := sqlname.NewSourceNameFromMaybeQualifiedName(table, defaultSchema) fromTable := tableName.Qualified.Unquoted @@ -885,7 +937,7 @@ func renameTableIfRequired(table string) (string, bool) { if renameTablesMap[fromTable] != "" { tableTup, err := namereg.NameReg.LookupTableName(renameTablesMap[fromTable]) if err != nil { - utils.ErrExit("lookup failed for the table %s", renameTablesMap[fromTable]) + utils.ErrExit("lookup failed for the table: %s", renameTablesMap[fromTable]) } return tableTup.ForMinOutput(), true @@ -898,10 +950,6 @@ func getExportedSnapshotRowsMap(exportSnapshotStatus *ExportSnapshotStatus) (*ut snapshotStatusMap := utils.NewStructMap[sqlname.NameTuple, []string]() for _, tableStatus := range exportSnapshotStatus.Tables { - if tableStatus.FileName == "" { - //in case of root table as well in the tablelist during export an entry with empty file name is there - continue - } nt, err := namereg.NameReg.LookupTableName(tableStatus.TableName) if err != nil { return nil, nil, fmt.Errorf("lookup table [%s] from name registry: %v", tableStatus.TableName, err) @@ -999,157 +1047,56 @@ func storeTableListInMSR(tableList []sqlname.NameTuple) error { return nil } -var ( - UNSUPPORTED_DATATYPE_XML_ISSUE = fmt.Sprintf("%s - xml", UNSUPPORTED_DATATYPE) - UNSUPPORTED_DATATYPE_XID_ISSUE = fmt.Sprintf("%s - xid", UNSUPPORTED_DATATYPE) - APP_CHANGES_HIGH_THRESHOLD = 5 - APP_CHANGES_MEDIUM_THRESHOLD = 1 - SCHEMA_CHANGES_HIGH_THRESHOLD = math.MaxInt32 - SCHEMA_CHANGES_MEDIUM_THRESHOLD = 20 -) - -var appChanges = []string{ - INHERITANCE_ISSUE_REASON, - CONVERSION_ISSUE_REASON, - DEFERRABLE_CONSTRAINT_ISSUE, - UNSUPPORTED_DATATYPE_XML_ISSUE, - UNSUPPORTED_DATATYPE_XID_ISSUE, - UNSUPPORTED_EXTENSION_ISSUE, // will confirm this -} - -func readEnvForAppOrSchemaCounts() { - APP_CHANGES_HIGH_THRESHOLD = utils.GetEnvAsInt("APP_CHANGES_HIGH_THRESHOLD", APP_CHANGES_HIGH_THRESHOLD) - APP_CHANGES_MEDIUM_THRESHOLD = utils.GetEnvAsInt("APP_CHANGES_MEDIUM_THRESHOLD", APP_CHANGES_MEDIUM_THRESHOLD) - SCHEMA_CHANGES_HIGH_THRESHOLD = utils.GetEnvAsInt("SCHEMA_CHANGES_HIGH_THRESHOLD", SCHEMA_CHANGES_HIGH_THRESHOLD) - SCHEMA_CHANGES_MEDIUM_THRESHOLD = utils.GetEnvAsInt("SCHEMA_CHANGES_MEDIUM_THRESHOLD", SCHEMA_CHANGES_MEDIUM_THRESHOLD) -} - -// Migration complexity calculation from the conversion issues -func getMigrationComplexity(sourceDBType string, schemaDirectory string, analysisReport utils.SchemaReport) string { - if analysisReport.MigrationComplexity != "" { - return analysisReport.MigrationComplexity - } - - if sourceDBType == ORACLE { - mc, err := getMigrationComplexityForOracle(schemaDirectory) - if err != nil { - log.Errorf("failed to get migration complexity for oracle: %v", err) - return "NOT AVAILABLE" - } - return mc - } else if sourceDBType != POSTGRESQL { - return "NOT AVAILABLE" - } - - log.Infof("Calculating migration complexity..") - readEnvForAppOrSchemaCounts() - appChangesCount := 0 - for _, issue := range schemaAnalysisReport.Issues { - for _, appChange := range appChanges { - if strings.Contains(issue.Reason, appChange) { - appChangesCount++ - } - } - } - schemaChangesCount := len(schemaAnalysisReport.Issues) - appChangesCount - - if appChangesCount > APP_CHANGES_HIGH_THRESHOLD || schemaChangesCount > SCHEMA_CHANGES_HIGH_THRESHOLD { - return HIGH - } else if appChangesCount > APP_CHANGES_MEDIUM_THRESHOLD || schemaChangesCount > SCHEMA_CHANGES_MEDIUM_THRESHOLD { - return MEDIUM - } - //LOW in case appChanges == 0 or schemaChanges [0-20] - return LOW -} - -// This is a temporary logic to get migration complexity for oracle based on the migration level from ora2pg report. -// Ideally, we should ALSO be considering the schema analysis report to get the migration complexity. -func getMigrationComplexityForOracle(schemaDirectory string) (string, error) { - ora2pgReportPath := filepath.Join(schemaDirectory, "ora2pg_report.csv") - if !utils.FileOrFolderExists(ora2pgReportPath) { - return "", fmt.Errorf("ora2pg report file not found at %s", ora2pgReportPath) - } - file, err := os.Open(ora2pgReportPath) - if err != nil { - return "", fmt.Errorf("failed to read file %s: %w", ora2pgReportPath, err) - } - defer func() { - if err := file.Close(); err != nil { - log.Errorf("Error while closing file %s: %v", ora2pgReportPath, err) - } - }() - // Sample file contents - - // "dbi:Oracle:(DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = xyz)(PORT = 1521))(CONNECT_DATA = (SERVICE_NAME = DMS)))"; - // "Oracle Database 19c Enterprise Edition Release 19.0.0.0.0";"ASSESS_MIGRATION";"261.62 MB";"1 person-day(s)";"A-2"; - // "0/0/0.00";"0/0/0";"0/0/0";"25/0/6.50";"0/0/0.00";"0/0/0";"0/0/0";"0/0/0";"0/0/0";"3/0/1.00";"3/0/1.00"; - // "44/0/4.90";"27/0/2.70";"9/0/1.80";"4/0/16.00";"5/0/3.00";"2/0/2.00";"125/0/58.90" - // - // X/Y/Z - total/invalid/cost for each type of objects(table,function,etc). Last data element is the sum total. - // total cost = 58.90 units (1 unit = 5 minutes). Therefore total cost is approx 1 person-days. - // column 6 is Migration level. - // Migration levels: - // A - Migration that might be run automatically - // B - Migration with code rewrite and a human-days cost up to 5 days - // C - Migration with code rewrite and a human-days cost above 5 days - // Technical levels: - // 1 = trivial: no stored functions and no triggers - // 2 = easy: no stored functions but with triggers, no manual rewriting - // 3 = simple: stored functions and/or triggers, no manual rewriting - // 4 = manual: no stored functions but with triggers or views with code rewriting - // 5 = difficult: stored functions and/or triggers with code rewriting - reader := csv.NewReader(file) - reader.Comma = ';' - rows, err := reader.ReadAll() - if err != nil { - log.Errorf("error reading csv file %s: %v", ora2pgReportPath, err) - return "", fmt.Errorf("error reading csv file %s: %w", ora2pgReportPath, err) - } - if len(rows) > 1 { - return "", fmt.Errorf("invalid ora2pg report file format. Expected 1 row, found %d. contents = %v", len(rows), rows) - } - reportData := rows[0] - migrationLevel := strings.Split(reportData[5], "-")[0] - - switch migrationLevel { - case "A": - return LOW, nil - case "B": - return MEDIUM, nil - case "C": - return HIGH, nil - default: - return "", fmt.Errorf("invalid migration level [%s] found in ora2pg report %v", migrationLevel, reportData) - } -} - // ===================================================================== // TODO: consider merging all unsupported field with single AssessmentReport struct member as AssessmentIssue type AssessmentReport struct { - VoyagerVersion string `json:"VoyagerVersion"` - MigrationComplexity string `json:"MigrationComplexity"` - SchemaSummary utils.SchemaSummary `json:"SchemaSummary"` - Sizing *migassessment.SizingAssessmentReport `json:"Sizing"` - UnsupportedDataTypes []utils.TableColumnsDataTypes `json:"UnsupportedDataTypes"` - UnsupportedDataTypesDesc string `json:"UnsupportedDataTypesDesc"` - UnsupportedFeatures []UnsupportedFeature `json:"UnsupportedFeatures"` - UnsupportedFeaturesDesc string `json:"UnsupportedFeaturesDesc"` - MigrationCaveats []UnsupportedFeature `json:"MigrationCaveats"` - UnsupportedQueryConstructs []utils.UnsupportedQueryConstruct `json:"UnsupportedQueryConstructs"` - TableIndexStats *[]migassessment.TableIndexStats `json:"TableIndexStats"` - Notes []string `json:"Notes"` + VoyagerVersion string `json:"VoyagerVersion"` + TargetDBVersion *ybversion.YBVersion `json:"TargetDBVersion"` + MigrationComplexity string `json:"MigrationComplexity"` + MigrationComplexityExplanation string `json:"MigrationComplexityExplanation"` + SchemaSummary utils.SchemaSummary `json:"SchemaSummary"` + Sizing *migassessment.SizingAssessmentReport `json:"Sizing"` + Issues []AssessmentIssue `json:"-"` // disabled in reports till corresponding UI changes are done(json and html reports) + TableIndexStats *[]migassessment.TableIndexStats `json:"TableIndexStats"` + Notes []string `json:"Notes"` + + // fields going to be deprecated + UnsupportedDataTypes []utils.TableColumnsDataTypes `json:"UnsupportedDataTypes"` + UnsupportedDataTypesDesc string `json:"UnsupportedDataTypesDesc"` + UnsupportedFeatures []UnsupportedFeature `json:"UnsupportedFeatures"` + UnsupportedFeaturesDesc string `json:"UnsupportedFeaturesDesc"` + UnsupportedQueryConstructs []utils.UnsupportedQueryConstruct `json:"UnsupportedQueryConstructs"` + UnsupportedPlPgSqlObjects []UnsupportedFeature `json:"UnsupportedPlPgSqlObjects"` + MigrationCaveats []UnsupportedFeature `json:"MigrationCaveats"` +} + +// Fields apart from Category, CategoryDescription, TypeName and Impact will be populated only if/when available +type AssessmentIssue struct { + Category string // expected values: feature, query_constrcuts, migration_caveats, plpgsql_objects, datatytpe + CategoryDescription string + Type string // Ex: GIN_INDEXES, SECURITY_INVOKER_VIEWS, STORED_GENERATED_COLUMNS + Name string // Ex: "Stored generated columns are not supported." + Description string + Impact string // Level-1, Level-2, Level-3 (default: Level-1 ??) + ObjectType string // For datatype category, ObjectType will be datatype (for eg "geometry") + ObjectName string + SqlStatement string + DocsLink string + MinimumVersionFixedIn map[string]*ybversion.YBVersion } type UnsupportedFeature struct { - FeatureName string `json:"FeatureName"` - Objects []ObjectInfo `json:"Objects"` - DisplayDDL bool `json:"-"` // just used by html format to display the DDL for some feature and object names for other - DocsLink string `json:"DocsLink,omitempty"` - FeatureDescription string `json:"FeatureDescription,omitempty"` + FeatureName string `json:"FeatureName"` + Objects []ObjectInfo `json:"Objects"` + DisplayDDL bool `json:"-"` // just used by html format to display the DDL for some feature and object names for other + DocsLink string `json:"DocsLink,omitempty"` + FeatureDescription string `json:"FeatureDescription,omitempty"` + MinimumVersionsFixedIn map[string]*ybversion.YBVersion `json:"MinimumVersionsFixedIn"` // key: series (2024.1, 2.21, etc) } type ObjectInfo struct { + ObjectType string `json:"ObjectType,omitempty"` ObjectName string SqlStatement string } @@ -1184,24 +1131,26 @@ type AssessMigrationDBConfig struct { type AssessMigrationPayload struct { PayloadVersion string VoyagerVersion string + TargetDBVersion *ybversion.YBVersion MigrationComplexity string SchemaSummary utils.SchemaSummary - AssessmentIssues []AssessmentIssuePayload + AssessmentIssues []AssessmentIssueYugabyteD SourceSizeDetails SourceDBSizeDetails TargetRecommendations TargetSizingRecommendations - ConversionIssues []utils.Issue + ConversionIssues []utils.AnalyzeSchemaIssue // Depreacted: AssessmentJsonReport is depricated; use the fields directly inside struct AssessmentJsonReport AssessmentReport } -type AssessmentIssuePayload struct { - Type string `json:"Type"` // Feature, DataType, MigrationCaveat, UQC - TypeDescription string `json:"TypeDescription"` // Based on AssessmentIssue type - Subtype string `json:"Subtype"` // GIN Indexes, Advisory Locks etc - SubtypeDescription string `json:"SubtypeDescription"` // description based on subtype - ObjectName string `json:"ObjectName"` // Fully qualified object name(empty if NA, eg UQC) - SqlStatement string `json:"SqlStatement"` // DDL or DML(UQC) - DocsLink string `json:"DocsLink"` // docs link based on the subtype +type AssessmentIssueYugabyteD struct { + Type string `json:"Type"` // Feature, DataType, MigrationCaveat, UQC + TypeDescription string `json:"TypeDescription"` // Based on AssessmentIssue type + Subtype string `json:"Subtype"` // GIN Indexes, Advisory Locks etc + SubtypeDescription string `json:"SubtypeDescription"` // description based on subtype + ObjectName string `json:"ObjectName"` // Fully qualified object name(empty if NA, eg UQC) + SqlStatement string `json:"SqlStatement"` // DDL or DML(UQC) + DocsLink string `json:"DocsLink"` // docs link based on the subtype + MinimumVersionsFixedIn map[string]*ybversion.YBVersion `json:"MinimumVersionsFixedIn"` // key: series (2024.1, 2.21, etc) // Store Type-specific details - extensible, can refer any struct Details json.RawMessage `json:"Details,omitempty"` @@ -1228,7 +1177,7 @@ type TargetSizingRecommendations struct { TotalShardedSize int64 } -var ASSESS_MIGRATION_PAYLOAD_VERSION = "1.0" +var ASSESS_MIGRATION_PAYLOAD_VERSION = "1.1" //====== AssesmentReport struct methods ======// @@ -1242,6 +1191,10 @@ func ParseJSONToAssessmentReport(reportPath string) (*AssessmentReport, error) { return &report, nil } +func (ar *AssessmentReport) AppendIssues(issues ...AssessmentIssue) { + ar.Issues = append(ar.Issues, issues...) +} + func (ar *AssessmentReport) GetShardedTablesRecommendation() ([]string, error) { if ar.Sizing == nil { return nil, fmt.Errorf("sizing report is null, can't fetch sharded tables") @@ -1461,31 +1414,41 @@ func PackAndSendCallhomePayloadOnExit() { if callHomeErrorOrCompletePayloadSent { return } + + var errorMsg string + var status string + if utils.ErrExitErr != nil { + errorMsg = utils.ErrExitErr.Error() + status = ERROR + } else { + status = EXIT + } + switch currentCommand { case assessMigrationCmd.CommandPath(): - packAndSendAssessMigrationPayload(EXIT, "Exiting....") + packAndSendAssessMigrationPayload(status, errorMsg) case assessMigrationBulkCmd.CommandPath(): - packAndSendAssessMigrationBulkPayload(EXIT) + packAndSendAssessMigrationBulkPayload(status, errorMsg) case exportSchemaCmd.CommandPath(): - packAndSendExportSchemaPayload(EXIT) + packAndSendExportSchemaPayload(status, errorMsg) case analyzeSchemaCmd.CommandPath(): - packAndSendAnalyzeSchemaPayload(EXIT) + packAndSendAnalyzeSchemaPayload(status, errorMsg) case importSchemaCmd.CommandPath(): - packAndSendImportSchemaPayload(EXIT, "Exiting....") + packAndSendImportSchemaPayload(status, errorMsg) case exportDataCmd.CommandPath(), exportDataFromSrcCmd.CommandPath(): - packAndSendExportDataPayload(EXIT) + packAndSendExportDataPayload(status, errorMsg) case exportDataFromTargetCmd.CommandPath(): - packAndSendExportDataFromTargetPayload(EXIT) + packAndSendExportDataFromTargetPayload(status, errorMsg) case importDataCmd.CommandPath(), importDataToTargetCmd.CommandPath(): - packAndSendImportDataPayload(EXIT) + packAndSendImportDataPayload(status, errorMsg) case importDataToSourceCmd.CommandPath(): - packAndSendImportDataToSourcePayload(EXIT) + packAndSendImportDataToSourcePayload(status, errorMsg) case importDataToSourceReplicaCmd.CommandPath(): - packAndSendImportDataToSrcReplicaPayload(EXIT) + packAndSendImportDataToSrcReplicaPayload(status, errorMsg) case endMigrationCmd.CommandPath(): - packAndSendEndMigrationPayload(EXIT) + packAndSendEndMigrationPayload(status, errorMsg) case importDataFileCmd.CommandPath(): - packAndSendImportDataFilePayload(EXIT) + packAndSendImportDataFilePayload(status, errorMsg) } } @@ -1547,17 +1510,17 @@ func sendCallhomePayloadAtIntervals() { time.Sleep(15 * time.Minute) switch currentCommand { case exportDataCmd.CommandPath(), exportDataFromSrcCmd.CommandPath(): - packAndSendExportDataPayload(INPROGRESS) + packAndSendExportDataPayload(INPROGRESS, "") case exportDataFromTargetCmd.CommandPath(): - packAndSendExportDataFromTargetPayload(INPROGRESS) + packAndSendExportDataFromTargetPayload(INPROGRESS, "") case importDataCmd.CommandPath(), importDataToTargetCmd.CommandPath(): - packAndSendImportDataPayload(INPROGRESS) + packAndSendImportDataPayload(INPROGRESS, "") case importDataToSourceCmd.CommandPath(): - packAndSendImportDataToSourcePayload(INPROGRESS) + packAndSendImportDataToSourcePayload(INPROGRESS, "") case importDataToSourceReplicaCmd.CommandPath(): - packAndSendImportDataToSrcReplicaPayload(INPROGRESS) + packAndSendImportDataToSrcReplicaPayload(INPROGRESS, "") case importDataFileCmd.CommandPath(): - packAndSendImportDataFilePayload(INPROGRESS) + packAndSendImportDataFilePayload(INPROGRESS, "") } } } diff --git a/yb-voyager/cmd/common_test.go b/yb-voyager/cmd/common_test.go new file mode 100644 index 0000000000..9fd9635f3a --- /dev/null +++ b/yb-voyager/cmd/common_test.go @@ -0,0 +1,450 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package cmd + +import ( + "fmt" + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/migassessment" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" + "github.com/yugabyte/yb-voyager/yb-voyager/src/ybversion" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" +) + +func TestAssessmentReportStructs(t *testing.T) { + tests := []struct { + name string + actualType reflect.Type + expectedType interface{} + }{ + { + name: "Validate DBObject Struct Definition", + actualType: reflect.TypeOf(utils.DBObject{}), + expectedType: struct { + ObjectType string `json:"ObjectType"` + TotalCount int `json:"TotalCount"` + InvalidCount int `json:"InvalidCount"` + ObjectNames string `json:"ObjectNames"` + Details string `json:"Details,omitempty"` + }{}, + }, + { + name: "Validate SchemaSummary Struct Definition", + actualType: reflect.TypeOf(utils.SchemaSummary{}), + expectedType: struct { + Description string `json:"Description"` + DBName string `json:"DbName"` + SchemaNames []string `json:"SchemaNames"` + DBVersion string `json:"DbVersion"` + Notes []string `json:"Notes,omitempty"` + DBObjects []utils.DBObject `json:"DatabaseObjects"` + }{}, + }, + { + name: "Validate SizingRecommendation Struct Definition", + actualType: reflect.TypeOf(migassessment.SizingRecommendation{}), + expectedType: struct { + ColocatedTables []string + ColocatedReasoning string + ShardedTables []string + NumNodes float64 + VCPUsPerInstance int + MemoryPerInstance int + OptimalSelectConnectionsPerNode int64 + OptimalInsertConnectionsPerNode int64 + EstimatedTimeInMinForImport float64 + ParallelVoyagerJobs float64 + }{}, + }, + { + name: "Validate TableColumnsDataTypes Struct Definition", + actualType: reflect.TypeOf(utils.TableColumnsDataTypes{}), + expectedType: struct { + SchemaName string `json:"SchemaName"` + TableName string `json:"TableName"` + ColumnName string `json:"ColumnName"` + DataType string `json:"DataType"` + }{}, + }, + { + name: "Validate UnsupportedFeature Struct Definition", + actualType: reflect.TypeOf(UnsupportedFeature{}), + expectedType: struct { + FeatureName string `json:"FeatureName"` + Objects []ObjectInfo `json:"Objects"` + DisplayDDL bool `json:"-"` + DocsLink string `json:"DocsLink,omitempty"` + FeatureDescription string `json:"FeatureDescription,omitempty"` + MinimumVersionsFixedIn map[string]*ybversion.YBVersion `json:"MinimumVersionsFixedIn"` + }{}, + }, + { + name: "Validate UnsupportedQueryConstruct Struct Definition", + actualType: reflect.TypeOf(utils.UnsupportedQueryConstruct{}), + expectedType: struct { + ConstructTypeName string + Query string + DocsLink string + MinimumVersionsFixedIn map[string]*ybversion.YBVersion + }{}, + }, + { + name: "Validate TableIndexStats Struct Definition", + actualType: reflect.TypeOf(migassessment.TableIndexStats{}), + expectedType: struct { + SchemaName string `json:"SchemaName"` + ObjectName string `json:"ObjectName"` + RowCount *int64 `json:"RowCount"` // Pointer to allows null values + ColumnCount *int64 `json:"ColumnCount"` + Reads *int64 `json:"Reads"` + Writes *int64 `json:"Writes"` + ReadsPerSecond *int64 `json:"ReadsPerSecond"` + WritesPerSecond *int64 `json:"WritesPerSecond"` + IsIndex bool `json:"IsIndex"` + ObjectType string `json:"ObjectType"` + ParentTableName *string `json:"ParentTableName"` + SizeInBytes *int64 `json:"SizeInBytes"` + }{}, + }, + { + name: "Validate AssessmentReport Struct Definition", + actualType: reflect.TypeOf(AssessmentReport{}), + expectedType: struct { + VoyagerVersion string `json:"VoyagerVersion"` + TargetDBVersion *ybversion.YBVersion `json:"TargetDBVersion"` + MigrationComplexity string `json:"MigrationComplexity"` + MigrationComplexityExplanation string `json:"MigrationComplexityExplanation"` + SchemaSummary utils.SchemaSummary `json:"SchemaSummary"` + Sizing *migassessment.SizingAssessmentReport `json:"Sizing"` + Issues []AssessmentIssue `json:"-"` + TableIndexStats *[]migassessment.TableIndexStats `json:"TableIndexStats"` + Notes []string `json:"Notes"` + UnsupportedDataTypes []utils.TableColumnsDataTypes `json:"UnsupportedDataTypes"` + UnsupportedDataTypesDesc string `json:"UnsupportedDataTypesDesc"` + UnsupportedFeatures []UnsupportedFeature `json:"UnsupportedFeatures"` + UnsupportedFeaturesDesc string `json:"UnsupportedFeaturesDesc"` + UnsupportedQueryConstructs []utils.UnsupportedQueryConstruct `json:"UnsupportedQueryConstructs"` + UnsupportedPlPgSqlObjects []UnsupportedFeature `json:"UnsupportedPlPgSqlObjects"` + MigrationCaveats []UnsupportedFeature `json:"MigrationCaveats"` + }{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testutils.CompareStructs(t, tt.actualType, reflect.TypeOf(tt.expectedType), tt.name) + }) + } +} + +func TestAssessmentReportJson(t *testing.T) { + reportDir := filepath.Join(os.TempDir(), "assessment_report_test") + reportPath := filepath.Join(reportDir, fmt.Sprintf("%s%s", ASSESSMENT_FILE_NAME, JSON_EXTENSION)) + + newYbVersion, err := ybversion.NewYBVersion("2024.1.1.1") + if err != nil { + t.Fatalf("Failed to create new YBVersion: %v", err) + } + + assessmentReport = AssessmentReport{ + VoyagerVersion: "v1.0.0", + TargetDBVersion: newYbVersion, + MigrationComplexity: "High", + MigrationComplexityExplanation: "", + SchemaSummary: utils.SchemaSummary{ + Description: "Test Schema Summary", + DBName: "test_db", + SchemaNames: []string{"public"}, + DBVersion: "13.3", + DBObjects: []utils.DBObject{ + { + ObjectType: "Table", + TotalCount: 1, + InvalidCount: 0, + ObjectNames: "test_table", + }, + }, + }, + Sizing: &migassessment.SizingAssessmentReport{ + SizingRecommendation: migassessment.SizingRecommendation{ + ColocatedTables: []string{"test_table"}, + ColocatedReasoning: "Test reasoning", + ShardedTables: []string{"test_table"}, + NumNodes: 3, + VCPUsPerInstance: 4, + MemoryPerInstance: 16, + OptimalSelectConnectionsPerNode: 10, + OptimalInsertConnectionsPerNode: 10, + EstimatedTimeInMinForImport: 10, + ParallelVoyagerJobs: 10, + }, + FailureReasoning: "Test failure reasoning", + }, + Issues: nil, + TableIndexStats: &[]migassessment.TableIndexStats{ + { + SchemaName: "public", + ObjectName: "test_table", + RowCount: Int64Ptr(100), + ColumnCount: Int64Ptr(10), + Reads: Int64Ptr(100), + Writes: Int64Ptr(100), + ReadsPerSecond: Int64Ptr(10), + WritesPerSecond: Int64Ptr(10), + IsIndex: true, + ObjectType: "Table", + ParentTableName: StringPtr("parent_table"), + SizeInBytes: Int64Ptr(1024), + }, + }, + Notes: []string{"Test note"}, + UnsupportedDataTypes: []utils.TableColumnsDataTypes{ + { + SchemaName: "public", + TableName: "test_table", + ColumnName: "test_column", + DataType: "test_type", + }, + }, + UnsupportedDataTypesDesc: "Test unsupported data types", + UnsupportedFeatures: []UnsupportedFeature{ + { + FeatureName: "test_feature", + Objects: []ObjectInfo{ + { + ObjectName: "test_object", + ObjectType: "test_type", + SqlStatement: "test_sql", + }, + }, + DisplayDDL: true, + DocsLink: "https://test.com", + FeatureDescription: "Test feature description", + MinimumVersionsFixedIn: map[string]*ybversion.YBVersion{"2024.1.1": newYbVersion}, + }, + }, + UnsupportedFeaturesDesc: "Test unsupported features", + UnsupportedQueryConstructs: []utils.UnsupportedQueryConstruct{ + { + ConstructTypeName: "test_construct", + Query: "test_query", + DocsLink: "https://test.com", + MinimumVersionsFixedIn: map[string]*ybversion.YBVersion{"2024.1.1": newYbVersion}, + }, + }, + UnsupportedPlPgSqlObjects: []UnsupportedFeature{ + { + FeatureName: "test_feature", + Objects: []ObjectInfo{ + { + ObjectName: "test_object", + ObjectType: "test_type", + SqlStatement: "test_sql", + }, + }, + DisplayDDL: true, + DocsLink: "https://test.com", + FeatureDescription: "Test feature description", + MinimumVersionsFixedIn: map[string]*ybversion.YBVersion{"2024.1.1": newYbVersion}, + }, + }, + MigrationCaveats: []UnsupportedFeature{ + { + FeatureName: "test_feature", + Objects: []ObjectInfo{ + { + ObjectName: "test_object", + ObjectType: "test_type", + SqlStatement: "test_sql", + }, + }, + DisplayDDL: true, + DocsLink: "https://test.com", + FeatureDescription: "Test feature description", + MinimumVersionsFixedIn: map[string]*ybversion.YBVersion{"2024.1.1": newYbVersion}, + }, + }, + } + + // Make the report directory + err = os.MkdirAll(reportDir, 0755) + if err != nil { + t.Fatalf("Failed to create report directory: %v", err) + } + + // Clean up the report directory + defer func() { + err := os.RemoveAll(reportDir) + if err != nil { + t.Fatalf("Failed to remove report directory: %v", err) + } + }() + + // Write the assessment report to a JSON file + err = generateAssessmentReportJson(reportDir) + if err != nil { + t.Fatalf("Failed to write assessment report to JSON file: %v", err) + } + // expected JSON + expectedJSON := `{ + "VoyagerVersion": "v1.0.0", + "TargetDBVersion": "2024.1.1.1", + "MigrationComplexity": "High", + "MigrationComplexityExplanation": "", + "SchemaSummary": { + "Description": "Test Schema Summary", + "DbName": "test_db", + "SchemaNames": [ + "public" + ], + "DbVersion": "13.3", + "DatabaseObjects": [ + { + "ObjectType": "Table", + "TotalCount": 1, + "InvalidCount": 0, + "ObjectNames": "test_table" + } + ] + }, + "Sizing": { + "SizingRecommendation": { + "ColocatedTables": [ + "test_table" + ], + "ColocatedReasoning": "Test reasoning", + "ShardedTables": [ + "test_table" + ], + "NumNodes": 3, + "VCPUsPerInstance": 4, + "MemoryPerInstance": 16, + "OptimalSelectConnectionsPerNode": 10, + "OptimalInsertConnectionsPerNode": 10, + "EstimatedTimeInMinForImport": 10, + "ParallelVoyagerJobs": 10 + }, + "FailureReasoning": "Test failure reasoning" + }, + "TableIndexStats": [ + { + "SchemaName": "public", + "ObjectName": "test_table", + "RowCount": 100, + "ColumnCount": 10, + "Reads": 100, + "Writes": 100, + "ReadsPerSecond": 10, + "WritesPerSecond": 10, + "IsIndex": true, + "ObjectType": "Table", + "ParentTableName": "parent_table", + "SizeInBytes": 1024 + } + ], + "Notes": [ + "Test note" + ], + "UnsupportedDataTypes": [ + { + "SchemaName": "public", + "TableName": "test_table", + "ColumnName": "test_column", + "DataType": "test_type" + } + ], + "UnsupportedDataTypesDesc": "Test unsupported data types", + "UnsupportedFeatures": [ + { + "FeatureName": "test_feature", + "Objects": [ + { + "ObjectType": "test_type", + "ObjectName": "test_object", + "SqlStatement": "test_sql" + } + ], + "DocsLink": "https://test.com", + "FeatureDescription": "Test feature description", + "MinimumVersionsFixedIn": { + "2024.1.1": "2024.1.1.1" + } + } + ], + "UnsupportedFeaturesDesc": "Test unsupported features", + "UnsupportedQueryConstructs": [ + { + "ConstructTypeName": "test_construct", + "Query": "test_query", + "DocsLink": "https://test.com", + "MinimumVersionsFixedIn": { + "2024.1.1": "2024.1.1.1" + } + } + ], + "UnsupportedPlPgSqlObjects": [ + { + "FeatureName": "test_feature", + "Objects": [ + { + "ObjectType": "test_type", + "ObjectName": "test_object", + "SqlStatement": "test_sql" + } + ], + "DocsLink": "https://test.com", + "FeatureDescription": "Test feature description", + "MinimumVersionsFixedIn": { + "2024.1.1": "2024.1.1.1" + } + } + ], + "MigrationCaveats": [ + { + "FeatureName": "test_feature", + "Objects": [ + { + "ObjectType": "test_type", + "ObjectName": "test_object", + "SqlStatement": "test_sql" + } + ], + "DocsLink": "https://test.com", + "FeatureDescription": "Test feature description", + "MinimumVersionsFixedIn": { + "2024.1.1": "2024.1.1.1" + } + } + ] +}` + + testutils.CompareJson(t, reportPath, expectedJSON, reportDir) + +} + +func Int64Ptr(i int64) *int64 { + return &i +} + +func StringPtr(s string) *string { + return &s +} diff --git a/yb-voyager/cmd/constants.go b/yb-voyager/cmd/constants.go index 1470c873da..25ee2ecba8 100644 --- a/yb-voyager/cmd/constants.go +++ b/yb-voyager/cmd/constants.go @@ -15,6 +15,11 @@ limitations under the License. */ package cmd +import ( + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" +) + const ( KB = 1024 MB = 1024 * 1024 @@ -57,9 +62,6 @@ const ( ROW_UPDATE_STATUS_IN_PROGRESS = 1 ROW_UPDATE_STATUS_COMPLETED = 3 COLOCATION_CLAUSE = "colocation" - LOW = "LOW" - MEDIUM = "MEDIUM" - HIGH = "HIGH" //phase names used in call-home payload ANALYZE_PHASE = "analyze-schema" EXPORT_SCHEMA_PHASE = "export-schema" @@ -93,15 +95,21 @@ const ( CUTOVER_TO_SOURCE = "cutover-to-source" CUTOVER_TO_SOURCE_REPLICA = "cutover-to-source-replica" + CLIENT_MESSAGES_SESSION_VAR = "SET CLIENT_MIN_MESSAGES" + TRANSACTION_TIMEOUT_SESSION_VAR = "SET TRANSACTION_TIMEOUT" + // unsupported features of assess migration VIRTUAL_COLUMN = "VIRTUAL COLUMN" INHERITED_TYPE = "INHERITED TYPE" REFERENCE_PARTITION = "REFERENCE PARTITION" SYSTEM_PARTITION = "SYSTEM PARTITION" - UNSUPPORTED_FEATURES = "unsupported_features" - UNSUPPORTED_DATATYPES = "unsupported_datatypes" - REPORT_UNSUPPORTED_QUERY_CONSTRUCTS = "REPORT_UNSUPPORTED_QUERY_CONSTRUCTS" + UNSUPPORTED_FEATURES_CATEGORY = "unsupported_features" + UNSUPPORTED_DATATYPES_CATEGORY = "unsupported_datatypes" + UNSUPPORTED_QUERY_CONSTRUCTS_CATEGORY = "unsupported_query_constructs" + UNSUPPORTED_PLPGSQL_OBJECTS_CATEGORY = "unsupported_plpgsql_objects" + MIGRATION_CAVEATS_CATEGORY = "migration_caveats" + REPORT_UNSUPPORTED_QUERY_CONSTRUCTS = "REPORT_UNSUPPORTED_QUERY_CONSTRUCTS" HTML = "html" JSON = "json" @@ -156,6 +164,7 @@ const ( PARTITION_KEY_NOT_PK_DOC_LINK = DOCS_LINK_PREFIX + ORACLE_PREFIX + "#partition-key-column-not-part-of-primary-key-columns" DROP_TEMP_TABLE_DOC_LINK = DOCS_LINK_PREFIX + MYSQL_PREFIX + "#drop-temporary-table-statements-are-not-supported" INDEX_ON_UNSUPPORTED_TYPE = DOCS_LINK_PREFIX + POSTGRESQL_PREFIX + "#indexes-on-some-complex-data-types-are-not-supported" + PK_UK_CONSTRAINT_ON_UNSUPPORTED_TYPE = DOCS_LINK_PREFIX + POSTGRESQL_PREFIX + "#indexes-on-some-complex-data-types-are-not-supported" //Keeping it similar for now, will see if we need to a separate issue on docs UNLOGGED_TABLE_DOC_LINK = DOCS_LINK_PREFIX + POSTGRESQL_PREFIX + "#unlogged-table-is-not-supported" XID_DATATYPE_DOC_LINK = DOCS_LINK_PREFIX + POSTGRESQL_PREFIX + "#xid-functions-is-not-supported" UNSUPPORTED_DATATYPES_DOC_LINK = DOCS_LINK_PREFIX + POSTGRESQL_PREFIX + "#unsupported-datatypes-by-yugabytedb" @@ -168,19 +177,14 @@ const ( List of all the features we are reporting as part of Unsupported features and Migration caveats */ const ( - // Types for AssessmentIssue - FEATURE = "feature" - DATATYPE = "datatype" - QUERY_CONSTRUCT = "query_construct" // confused: in json for some values we are using space separated and for some snake_case - MIGRATION_CAVEATS = "migration_caveats" - // Description - FEATURE_ISSUE_TYPE_DESCRIPTION = "Features of the source database that are not supported on the target YugabyteDB." - DATATYPE_ISSUE_TYPE_DESCRIPTION = "Data types of the source database that are not supported on the target YugabyteDB." - MIGRATION_CAVEATS_TYPE_DESCRIPTION = "Migration Caveats highlights the current limitations with the migration workflow." - UNSUPPORTED_QUERY_CONSTRUTS_DESCRIPTION = "Source database queries not supported in YugabyteDB, identified by scanning system tables." - SCHEMA_SUMMARY_DESCRIPTION = "Objects that will be created on the target YugabyteDB." - SCHEMA_SUMMARY_DESCRIPTION_ORACLE = SCHEMA_SUMMARY_DESCRIPTION + " Some of the index and sequence names might be different from those in the source database." + FEATURE_CATEGORY_DESCRIPTION = "Features of the source database that are not supported on the target YugabyteDB." + DATATYPE_CATEGORY_DESCRIPTION = "Data types of the source database that are not supported on the target YugabyteDB." + MIGRATION_CAVEATS_CATEGORY_DESCRIPTION = "Migration Caveats highlights the current limitations with the migration workflow." + UNSUPPORTED_QUERY_CONSTRUCTS_CATEGORY_DESCRIPTION = "Source database queries not supported in YugabyteDB, identified by scanning system tables." + UNSUPPPORTED_PLPGSQL_OBJECT_CATEGORY_DESCRIPTION = "Source schema objects having unsupported statements on the target YugabyteDB in PL/pgSQL code block" + SCHEMA_SUMMARY_DESCRIPTION = "Objects that will be created on the target YugabyteDB." + SCHEMA_SUMMARY_DESCRIPTION_ORACLE = SCHEMA_SUMMARY_DESCRIPTION + " Some of the index and sequence names might be different from those in the source database." //Unsupported Features @@ -208,20 +212,23 @@ const ( UNLOGGED_TABLE_FEATURE = "Unlogged tables" REFERENCING_TRIGGER_FEATURE = "REFERENCING clause for triggers" BEFORE_FOR_EACH_ROW_TRIGGERS_ON_PARTITIONED_TABLE_FEATURE = "BEFORE ROW triggers on Partitioned tables" + PK_UK_CONSTRAINT_ON_COMPLEX_DATATYPES_FEATURE = "Primary / Unique key constraints on complex datatypes" + REGEX_FUNCTIONS_FEATURE = "Regex Functions" + FETCH_WITH_TIES_FEATURE = "FETCH .. WITH TIES Clause" // Migration caveats //POSTGRESQL - ALTER_PARTITION_ADD_PK_CAVEAT_FEATURE = "Alter partitioned tables to add Primary Key" - FOREIGN_TABLE_CAVEAT_FEATURE = "Foreign tables" - POLICIES_CAVEAT_FEATURE = "Policies" - UNSUPPORTED_DATATYPES_LIVE_CAVEAT_FEATURE = "Unsupported Data Types for Live Migration" - UNSUPPORTED_DATATYPES_LIVE_WITH_FF_FB_CAVEAT_FEATURE = "Unsupported Data Types for Live Migration with Fall-forward/Fallback" - DESCRIPTION_ADD_PK_TO_PARTITION_TABLE = `After export schema, the ALTER table should be merged with CREATE table for partitioned tables as alter of partitioned tables to add primary key is not supported.` - DESCRIPTION_FOREIGN_TABLES = `During the export schema phase, SERVER and USER MAPPING objects are not exported. These should be manually created to make the foreign tables work.` - DESCRIPTION_POLICY_ROLE_ISSUE = `There are some policies that are created for certain users/roles. During the export schema phase, USERs and GRANTs are not exported. Therefore, they will have to be manually created before running import schema.` - UNSUPPORTED_DATATYPES_FOR_LIVE_MIGRATION_ISSUE = "There are some data types in the schema that are not supported by live migration of data. These columns will be excluded when exporting and importing data in live migration workflows." - UNSUPPORTED_DATATYPES_FOR_LIVE_MIGRATION_WITH_FF_FB_ISSUE = "There are some data types in the schema that are not supported by live migration with fall-forward/fall-back. These columns will be excluded when exporting and importing data in live migration workflows." + ALTER_PARTITION_ADD_PK_CAVEAT_FEATURE = "Alter partitioned tables to add Primary Key" + FOREIGN_TABLE_CAVEAT_FEATURE = "Foreign tables" + POLICIES_CAVEAT_FEATURE = "Policies" + UNSUPPORTED_DATATYPES_LIVE_CAVEAT_FEATURE = "Unsupported Data Types for Live Migration" + UNSUPPORTED_DATATYPES_LIVE_WITH_FF_FB_CAVEAT_FEATURE = "Unsupported Data Types for Live Migration with Fall-forward/Fallback" + UNSUPPORTED_DATATYPES_FOR_LIVE_MIGRATION_DESCRIPTION = "There are some data types in the schema that are not supported by live migration of data. These columns will be excluded when exporting and importing data in live migration workflows." + UNSUPPORTED_DATATYPES_FOR_LIVE_MIGRATION_WITH_FF_FB_DESCRIPTION = "There are some data types in the schema that are not supported by live migration with fall-forward/fall-back. These columns will be excluded when exporting and importing data in live migration workflows." + DESCRIPTION_ADD_PK_TO_PARTITION_TABLE = `After export schema, the ALTER table should be merged with CREATE table for partitioned tables as alter of partitioned tables to add primary key is not supported.` + DESCRIPTION_FOREIGN_TABLES = `During the export schema phase, SERVER and USER MAPPING objects are not exported. These should be manually created to make the foreign tables work.` + DESCRIPTION_POLICY_ROLE_DESCRIPTION = `There are some policies that are created for certain users/roles. During the export schema phase, USERs and GRANTs are not exported. Therefore, they will have to be manually created before running import schema.` ) var supportedSourceDBTypes = []string{ORACLE, MYSQL, POSTGRESQL, YUGABYTEDB} @@ -234,3 +241,22 @@ var validSSLModes = map[string][]string{ } var EVENT_BATCH_MAX_RETRY_COUNT = 50 + +// returns the description for a given assessment issue category +func GetCategoryDescription(category string) string { + switch category { + case UNSUPPORTED_FEATURES_CATEGORY, constants.FEATURE: + return FEATURE_CATEGORY_DESCRIPTION + case UNSUPPORTED_DATATYPES_CATEGORY, constants.DATATYPE: + return DATATYPE_CATEGORY_DESCRIPTION + case UNSUPPORTED_QUERY_CONSTRUCTS_CATEGORY, constants.QUERY_CONSTRUCT: + return UNSUPPORTED_QUERY_CONSTRUCTS_CATEGORY_DESCRIPTION + case UNSUPPORTED_PLPGSQL_OBJECTS_CATEGORY, constants.PLPGSQL_OBJECT: + return UNSUPPPORTED_PLPGSQL_OBJECT_CATEGORY_DESCRIPTION + case MIGRATION_CAVEATS_CATEGORY: // or constants.MIGRATION_CAVEATS (identical) + return MIGRATION_CAVEATS_CATEGORY_DESCRIPTION + default: + utils.ErrExit("ERROR: unsupported assessment issue category %q", category) + } + return "" +} diff --git a/yb-voyager/cmd/endMigrationCommand.go b/yb-voyager/cmd/endMigrationCommand.go index f016c8cbd2..61ef467ce0 100644 --- a/yb-voyager/cmd/endMigrationCommand.go +++ b/yb-voyager/cmd/endMigrationCommand.go @@ -43,14 +43,19 @@ var endMigrationCmd = &cobra.Command{ Long: "End the current migration and cleanup all metadata stored in databases(Target, Source-Replica and Source) and export-dir", PreRun: func(cmd *cobra.Command, args []string) { - err := validateEndMigrationFlags(cmd) + if utils.IsDirectoryEmpty(exportDir) { + utils.ErrExit("export directory is empty, nothing to end") + } + + err := retrieveMigrationUUID() + if err != nil { + utils.ErrExit("failed to get migration UUID: %w", err) + } + err = validateEndMigrationFlags(cmd) if err != nil { utils.ErrExit(err.Error()) } - if utils.IsDirectoryEmpty(exportDir) { - utils.ErrExit("export directory is empty, nothing to end") - } }, Run: endMigrationCommandFn, @@ -76,7 +81,6 @@ func endMigrationCommandFn(cmd *cobra.Command, args []string) { utils.ErrExit("error while checking streaming mode: %w\n", err) } - retrieveMigrationUUID() checkIfEndCommandCanBePerformed(msr) // backing up the state from the export directory @@ -97,10 +101,10 @@ func endMigrationCommandFn(cmd *cobra.Command, args []string) { cleanupExportDir() utils.PrintAndLog("Migration ended successfully") - packAndSendEndMigrationPayload(COMPLETE) + packAndSendEndMigrationPayload(COMPLETE, "") } -func packAndSendEndMigrationPayload(status string) { +func packAndSendEndMigrationPayload(status string, errorMsg string) { if !shouldSendCallhome() { return } @@ -115,6 +119,7 @@ func packAndSendEndMigrationPayload(status string) { BackupLogFiles: bool(backupLogFiles), BackupSchemaFiles: bool(backupSchemaFiles), SaveMigrationReports: bool(saveMigrationReports), + Error: callhome.SanitizeErrorMsg(errorMsg), } payload.PhasePayload = callhome.MarshalledJsonString(endMigrationPayload) payload.Status = status @@ -663,7 +668,7 @@ func cleanupExportDir() { for _, subdir := range subdirs { err := os.RemoveAll(filepath.Join(exportDir, subdir)) if err != nil { - utils.ErrExit("removing %s directory: %v", subdir, err) + utils.ErrExit("removing directory: %q: %v", subdir, err) } } } @@ -784,7 +789,7 @@ func stopVoyagerCommand(lockFile *lockfile.Lockfile, signal syscall.Signal) { ongoingCmd := lockFile.GetCmdName() ongoingCmdPID, err := lockFile.GetCmdPID() if err != nil { - utils.ErrExit("getting PID of ongoing voyager command %q: %v", ongoingCmd, err) + utils.ErrExit("getting PID of ongoing voyager command: %q: %v", ongoingCmd, err) } fmt.Printf("stopping the ongoing command: %s\n", ongoingCmd) @@ -810,7 +815,7 @@ func stopDataExportCommand(lockFile *lockfile.Lockfile) { ongoingCmd := lockFile.GetCmdName() ongoingCmdPID, err := lockFile.GetCmdPID() if err != nil { - utils.ErrExit("getting PID of ongoing voyager command %q: %v", ongoingCmd, err) + utils.ErrExit("getting PID of ongoing voyager command: %q: %v", ongoingCmd, err) } fmt.Printf("stopping the ongoing command: %s\n", ongoingCmd) @@ -829,7 +834,7 @@ func areOnDifferentFileSystems(path1 string, path2 string) bool { err2 := syscall.Stat(path2, &stat2) if err1 != nil || err2 != nil { - utils.ErrExit("getting file system info for %s and %s: %v, %v", path1, path2, err1, err2) + utils.ErrExit("getting file system info: for %s and %s: %v, %v", path1, path2, err1, err2) } return stat1.Dev != stat2.Dev diff --git a/yb-voyager/cmd/export.go b/yb-voyager/cmd/export.go index f5a43dc67f..7e7488f4cb 100644 --- a/yb-voyager/cmd/export.go +++ b/yb-voyager/cmd/export.go @@ -18,6 +18,8 @@ package cmd import ( "fmt" "os" + "os/exec" + "strconv" "strings" "github.com/spf13/cobra" @@ -32,6 +34,8 @@ import ( // source struct will be populated by CLI arguments parsing var source srcdb.Source +const MIN_REQUIRED_JAVA_VERSION = 17 + // to disable progress bar during data export and import var disablePb utils.BoolStr var exportType string @@ -39,7 +43,7 @@ var useDebezium bool var runId string var excludeTableListFilePath string var tableListFilePath string -var pgExportDependencies = []string{"pg_dump", "pg_restore", "psql"} +var pgExportCommands = []string{"pg_dump", "pg_restore", "psql"} var exportCmd = &cobra.Command{ Use: "export", @@ -295,7 +299,7 @@ func validateSSLMode() { if source.DBType == ORACLE || slices.Contains(validSSLModes[source.DBType], source.SSLMode) { return } else { - utils.ErrExit("Error: Invalid sslmode: %q. Valid SSL modes are %v", validSSLModes[source.DBType]) + utils.ErrExit("Invalid sslmode: %q. Valid SSL modes are %v", source.SSLMode, validSSLModes[source.DBType]) } } @@ -388,28 +392,62 @@ func saveExportTypeInMSR() { } func checkDependenciesForExport() (binaryCheckIssues []string, err error) { - if source.DBType == POSTGRESQL { + var missingTools []string + switch source.DBType { + case POSTGRESQL: sourceDBVersion := source.DB().GetVersion() - for _, binary := range pgExportDependencies { + for _, binary := range pgExportCommands { _, binaryCheckIssue, err := srcdb.GetAbsPathOfPGCommandAboveVersion(binary, sourceDBVersion) if err != nil { return nil, err } else if binaryCheckIssue != "" { - binaryCheckIssues = append(binaryCheckIssues, binaryCheckIssue) } } - if len(binaryCheckIssues) > 0 { - binaryCheckIssues = append(binaryCheckIssues, "Install or Add the required dependencies to PATH and try again\n") + + missingTools = utils.CheckTools("strings") + + case MYSQL: + // TODO: For mysql and oracle, we can probably remove the ora2pg check in case it is a live migration + // Issue Link: https://github.com/yugabyte/yb-voyager/issues/2102 + missingTools = utils.CheckTools("ora2pg") + + case ORACLE: + missingTools = utils.CheckTools("ora2pg", "sqlplus") + + case YUGABYTEDB: + missingTools = utils.CheckTools("strings") + + default: + return nil, fmt.Errorf("unknown source database type %q", source.DBType) + } + + binaryCheckIssues = append(binaryCheckIssues, missingTools...) + + if changeStreamingIsEnabled(exportType) || useDebezium { + // Check for java + javaIssue, err := checkJavaVersion() + if err != nil { + return nil, err + } + if javaIssue != "" { + binaryCheckIssues = append(binaryCheckIssues, javaIssue) } } + if len(binaryCheckIssues) > 0 { + binaryCheckIssues = append(binaryCheckIssues, "Install or Add the required dependencies to PATH and try again") + } + if changeStreamingIsEnabled(exportType) || useDebezium { // Check for debezium // FindDebeziumDistribution returns an error only if the debezium distribution is not found // So its error mesage will be added to problems - err := dbzm.FindDebeziumDistribution(source.DBType, false) + err = dbzm.FindDebeziumDistribution(source.DBType, false) if err != nil { + if len(binaryCheckIssues) > 0 { + binaryCheckIssues = append(binaryCheckIssues, "") + } binaryCheckIssues = append(binaryCheckIssues, strings.ToUpper(err.Error()[:1])+err.Error()[1:]) binaryCheckIssues = append(binaryCheckIssues, "Please check your Voyager installation and try again") } @@ -417,3 +455,62 @@ func checkDependenciesForExport() (binaryCheckIssues []string, err error) { return binaryCheckIssues, nil } + +func checkJavaVersion() (binaryCheckIssue string, err error) { + javaBinary := "java" + if javaHome := os.Getenv("JAVA_HOME"); javaHome != "" { + javaBinary = javaHome + "/bin/java" + } + + // Execute `java -version` to get the version + cmd := exec.Command(javaBinary, "-version") + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Sprintf("java: required version >= %d", MIN_REQUIRED_JAVA_VERSION), nil + } + + // Example output + // java version "11.0.16" 2022-07-19 LTS + // Java(TM) SE Runtime Environment (build 11.0.16+8-LTS-211) + // Java HotSpot(TM) 64-Bit Server VM (build 11.0.16+8-LTS-211, mixed mode, sharing) + + // Convert output to string + versionOutput := string(output) + + // Extract the line with the version + var versionLine string + lines := strings.Split(versionOutput, "\n") + for _, line := range lines { + if strings.Contains(line, "version") { + versionLine = line + break + } + } + if versionLine == "" { + return "", fmt.Errorf("unable to find java version in output: %s", versionOutput) + } + + // Extract version string from the line (mimics awk -F '"' '/version/ {print $2}') + startIndex := strings.Index(versionLine, "\"") + endIndex := strings.LastIndex(versionLine, "\"") + if startIndex == -1 || endIndex == -1 || startIndex >= endIndex { + return "", fmt.Errorf("unexpected java version output: %s", versionOutput) + } + version := versionLine[startIndex+1 : endIndex] + + // Extract major version + versionNumbers := strings.Split(version, ".") + if len(versionNumbers) < 1 { + return "", fmt.Errorf("unexpected java version output: %s", versionOutput) + } + majorVersion, err := strconv.Atoi(versionNumbers[0]) + if err != nil { + return "", fmt.Errorf("unexpected java version output: %s", versionOutput) + } + + if majorVersion < MIN_REQUIRED_JAVA_VERSION { + return fmt.Sprintf("java: required version >= %d; current version: %s", MIN_REQUIRED_JAVA_VERSION, version), nil + } + + return "", nil +} diff --git a/yb-voyager/cmd/exportData.go b/yb-voyager/cmd/exportData.go index ed50cd63ff..bd9a37f900 100644 --- a/yb-voyager/cmd/exportData.go +++ b/yb-voyager/cmd/exportData.go @@ -112,6 +112,11 @@ func exportDataCommandPreRun(cmd *cobra.Command, args []string) { func exportDataCommandFn(cmd *cobra.Command, args []string) { CreateMigrationProjectIfNotExists(source.DBType, exportDir) + err := retrieveMigrationUUID() + if err != nil { + utils.ErrExit("failed to get migration UUID: %w", err) + } + ExitIfAlreadyCutover(exporterRole) if useDebezium && !changeStreamingIsEnabled(exportType) { utils.PrintAndLog("Note: Beta feature to accelerate data export is enabled by setting BETA_FAST_DATA_EXPORT environment variable") @@ -122,14 +127,9 @@ func exportDataCommandFn(cmd *cobra.Command, args []string) { utils.PrintAndLog("export of data for source type as '%s'", source.DBType) sqlname.SourceDBType = source.DBType - err := retrieveMigrationUUID() - if err != nil { - utils.ErrExit("failed to get migration UUID: %w", err) - } - success := exportData() if success { - sendPayloadAsPerExporterRole(COMPLETE) + sendPayloadAsPerExporterRole(COMPLETE, "") setDataIsExported() color.Green("Export of data complete") @@ -140,24 +140,24 @@ func exportDataCommandFn(cmd *cobra.Command, args []string) { } else { color.Red("Export of data failed! Check %s/logs for more details.", exportDir) log.Error("Export of data failed.") - sendPayloadAsPerExporterRole(ERROR) + sendPayloadAsPerExporterRole(ERROR, "") atexit.Exit(1) } } -func sendPayloadAsPerExporterRole(status string) { +func sendPayloadAsPerExporterRole(status string, errorMsg string) { if !callhome.SendDiagnostics { return } switch exporterRole { case SOURCE_DB_EXPORTER_ROLE: - packAndSendExportDataPayload(status) + packAndSendExportDataPayload(status, errorMsg) case TARGET_DB_EXPORTER_FB_ROLE, TARGET_DB_EXPORTER_FF_ROLE: - packAndSendExportDataFromTargetPayload(status) + packAndSendExportDataFromTargetPayload(status, errorMsg) } } -func packAndSendExportDataPayload(status string) { +func packAndSendExportDataPayload(status string, errorMsg string) { if !shouldSendCallhome() { return @@ -182,6 +182,7 @@ func packAndSendExportDataPayload(status string) { exportDataPayload := callhome.ExportDataPhasePayload{ ParallelJobs: int64(source.NumConnections), StartClean: bool(startClean), + Error: callhome.SanitizeErrorMsg(errorMsg), } updateExportSnapshotDataStatsInPayload(&exportDataPayload) @@ -222,8 +223,8 @@ func exportData() bool { if err != nil { utils.ErrExit("check dependencies for export: %v", err) } else if len(binaryCheckIssues) > 0 { - color.Red("\nMissing dependencies for export data:") - utils.PrintAndLog("\n%s", strings.Join(binaryCheckIssues, "\n")) + headerStmt := color.RedString("Missing dependencies for export data:") + utils.PrintAndLog("\n%s\n%s", headerStmt, strings.Join(binaryCheckIssues, "\n")) utils.ErrExit("") } } @@ -236,17 +237,15 @@ func exportData() bool { res := source.DB().CheckSchemaExists() if !res { - utils.ErrExit("schema %q does not exist", source.Schema) + utils.ErrExit("schema does not exist : %q", source.Schema) } - // Check if source DB has required permissions for export data if source.RunGuardrailsChecks { - checkExportDataPermissions() + checkIfSchemasHaveUsagePermissions() } clearMigrationStateIfRequired() checkSourceDBCharset() - source.DB().CheckRequiredToolsAreInstalled() saveSourceDBConfInMSR() saveExportTypeInMSR() err = InitNameRegistry(exportDir, exporterRole, &source, source.DB(), nil, nil, false) @@ -257,7 +256,16 @@ func exportData() bool { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var partitionsToRootTableMap map[string]string - partitionsToRootTableMap, finalTableList, tablesColumnList := getFinalTableColumnList() + // get initial table list + partitionsToRootTableMap, finalTableList := getInitialTableList() + + // Check if source DB has required permissions for export data + if source.RunGuardrailsChecks { + checkExportDataPermissions(finalTableList) + } + + // finalize table list and column list + finalTableList, tablesColumnList := finalizeTableColumnList(finalTableList) if len(finalTableList) == 0 { utils.PrintAndLog("no tables present to export, exiting...") @@ -293,7 +301,7 @@ func exportData() bool { //Fine to lookup directly as this will root table in case of partitions tuple, err := namereg.NameReg.LookupTableName(renamedTable) if err != nil { - utils.ErrExit("lookup table name %s: %v", renamedTable, err) + utils.ErrExit("lookup table name: %s: %v", renamedTable, err) } currPartitions, ok := leafPartitions.Get(tuple) if !ok { @@ -340,10 +348,6 @@ func exportData() bool { // 2. export snapshot corresponding to replication slot by passing it to pg_dump // 3. start debezium with configration to read changes from the created replication slot, publication. - err := source.DB().ValidateTablesReadyForLiveMigration(finalTableList) - if err != nil { - utils.ErrExit("error: validate if tables are ready for live migration: %v", err) - } if !dataIsExported() { // if snapshot is not already done... err = exportPGSnapshotWithPGdump(ctx, cancel, finalTableList, tablesColumnList, leafPartitions) if err != nil { @@ -437,7 +441,7 @@ func exportData() bool { } } -func checkExportDataPermissions() { +func checkExportDataPermissions(finalTableList []sqlname.NameTuple) { // If source is PostgreSQL or YB, check if the number of existing replicaton slots is less than the max allowed if (source.DBType == POSTGRESQL && changeStreamingIsEnabled(exportType)) || (source.DBType == YUGABYTEDB && !bool(useYBgRPCConnector)) { @@ -455,7 +459,7 @@ func checkExportDataPermissions() { } } - missingPermissions, err := source.DB().GetMissingExportDataPermissions(exportType) + missingPermissions, err := source.DB().GetMissingExportDataPermissions(exportType, finalTableList) if err != nil { utils.ErrExit("get missing export data permissions: %v", err) } @@ -483,6 +487,24 @@ func checkExportDataPermissions() { } } +func checkIfSchemasHaveUsagePermissions() { + schemasMissingUsage, err := source.DB().GetSchemasMissingUsagePermissions() + if err != nil { + utils.ErrExit("get schemas missing usage permissions: %v", err) + } + if len(schemasMissingUsage) > 0 { + utils.PrintAndLog("\n%s[%s]", color.RedString(fmt.Sprintf("Missing USAGE permission for user %s on Schemas: ", source.User)), strings.Join(schemasMissingUsage, ", ")) + + var link string + if changeStreamingIsEnabled(exportType) { + link = "https://docs.yugabyte.com/preview/yugabyte-voyager/migrate/live-migrate/#prepare-the-source-database" + } else { + link = "https://docs.yugabyte.com/preview/yugabyte-voyager/migrate/migrate-steps/#prepare-the-source-database" + } + utils.ErrExit("\nCheck the documentation to prepare the database for migration: %s", color.BlueString(link)) + } +} + func updateCallhomeExportPhase() { if !callhome.SendDiagnostics { return @@ -713,10 +735,10 @@ func reportUnsupportedTables(finalTableList []sqlname.NameTuple) { } } -func getFinalTableColumnList() (map[string]string, []sqlname.NameTuple, *utils.StructMap[sqlname.NameTuple, []string]) { +func getInitialTableList() (map[string]string, []sqlname.NameTuple) { var tableList []sqlname.NameTuple // store table list after filtering unsupported or unnecessary tables - var finalTableList, skippedTableList []sqlname.NameTuple + var finalTableList []sqlname.NameTuple tableListFromDB := source.DB().GetAllTableNames() var err error var fullTableList []sqlname.NameTuple @@ -737,7 +759,7 @@ func getFinalTableColumnList() (map[string]string, []sqlname.NameTuple, *utils.S if parent == "" { tuple, err = namereg.NameReg.LookupTableName(fmt.Sprintf("%s.%s", schema, table)) if err != nil { - utils.ErrExit("lookup for table name %s failed err: %v", table, err) + utils.ErrExit("lookup for table name failed err: %s: %v", table, err) } } fullTableList = append(fullTableList, tuple) @@ -766,11 +788,23 @@ func getFinalTableColumnList() (map[string]string, []sqlname.NameTuple, *utils.S } }) } + var partitionsToRootTableMap map[string]string + isTableListSet := source.TableList != "" + partitionsToRootTableMap, finalTableList, err = addLeafPartitionsInTableList(finalTableList, isTableListSet) + if err != nil { + utils.ErrExit("failed to add the leaf partitions in table list: %w", err) + } + + return partitionsToRootTableMap, finalTableList +} + +func finalizeTableColumnList(finalTableList []sqlname.NameTuple) ([]sqlname.NameTuple, *utils.StructMap[sqlname.NameTuple, []string]) { if changeStreamingIsEnabled(exportType) { reportUnsupportedTables(finalTableList) } - log.Infof("initial all tables table list for data export: %v", tableList) + log.Infof("initial all tables table list for data export: %v", finalTableList) + var skippedTableList []sqlname.NameTuple if !changeStreamingIsEnabled(exportType) { finalTableList, skippedTableList = source.DB().FilterEmptyTables(finalTableList) if len(skippedTableList) != 0 { @@ -787,13 +821,6 @@ func getFinalTableColumnList() (map[string]string, []sqlname.NameTuple, *utils.S })) } - var partitionsToRootTableMap map[string]string - isTableListSet := source.TableList != "" - partitionsToRootTableMap, finalTableList, err = addLeafPartitionsInTableList(finalTableList, isTableListSet) - if err != nil { - utils.ErrExit("failed to add the leaf partitions in table list: %w", err) - } - tablesColumnList, unsupportedTableColumnsMap, err := source.DB().GetColumnsWithSupportedTypes(finalTableList, useDebezium, changeStreamingIsEnabled(exportType)) if err != nil { utils.ErrExit("get columns with supported types: %v", err) @@ -823,7 +850,7 @@ func getFinalTableColumnList() (map[string]string, []sqlname.NameTuple, *utils.S finalTableList = filterTableWithEmptySupportedColumnList(finalTableList, tablesColumnList) } - return partitionsToRootTableMap, finalTableList, tablesColumnList + return finalTableList, tablesColumnList } func exportDataOffline(ctx context.Context, cancel context.CancelFunc, finalTableList []sqlname.NameTuple, tablesColumnList *utils.StructMap[sqlname.NameTuple, []string], snapshotName string) error { @@ -841,8 +868,8 @@ func exportDataOffline(ctx context.Context, cancel context.CancelFunc, finalTabl log.Infoln("Cancel() being called, within exportDataOffline()") cancel() //will cancel/stop both dump tool and progress bar time.Sleep(time.Second * 5) //give sometime for the cancel to complete before this function returns - utils.ErrExit("yb-voyager encountered internal error. "+ - "Check %s/logs/yb-voyager-export-data.log for more details.", exportDir) + utils.ErrExit("yb-voyager encountered internal error: "+ + "Check: %s/logs/yb-voyager-export-data.log for more details.", exportDir) } }() @@ -869,7 +896,7 @@ func exportDataOffline(ctx context.Context, cancel context.CancelFunc, finalTabl for _, seq := range sequenceList { seqTuple, err := namereg.NameReg.LookupTableName(seq) if err != nil { - utils.ErrExit("lookup for sequence %s failed err: %v", seq, err) + utils.ErrExit("lookup for sequence failed: %s: err: %v", seq, err) } finalTableList = append(finalTableList, seqTuple) } @@ -900,11 +927,6 @@ func exportDataOffline(ctx context.Context, cancel context.CancelFunc, finalTabl if source.DBType == POSTGRESQL { //Make leaf partitions data files entry under the name of root table renameDatafileDescriptor(exportDir) - //Similarly for the export snapshot status file - err = renameExportSnapshotStatus(exportSnapshotStatusFile) - if err != nil { - return fmt.Errorf("rename export snapshot status: %w", err) - } } displayExportedRowCountSnapshot(false) @@ -997,7 +1019,7 @@ func clearMigrationStateIfRequired() { dbzm.IsMigrationInStreamingMode(exportDir) { utils.PrintAndLog("Continuing streaming from where we left off...") } else { - utils.ErrExit("%s/data directory is not empty, use --start-clean flag to clean the directories and start", exportDir) + utils.ErrExit("data directory is not empty, use --start-clean flag to clean the directories and start: %s", exportDir) } } } @@ -1025,14 +1047,13 @@ func extractTableListFromString(fullTableList []sqlname.NameTuple, flagTableList result := lo.Filter(fullTableList, func(tableName sqlname.NameTuple, _ int) bool { ok, err := tableName.MatchesPattern(pattern) if err != nil { - utils.ErrExit("Invalid table name pattern %q: %s", err) + utils.ErrExit("Invalid table name pattern: %q: %s", pattern, err) } return ok }) return result } tableList := utils.CsvStringToSlice(flagTableList) - var unqualifiedTables []string var unknownTableNames []string for _, pattern := range tableList { tables := findPatternMatchingTables(pattern) @@ -1041,12 +1062,9 @@ func extractTableListFromString(fullTableList []sqlname.NameTuple, flagTableList } result = append(result, tables...) } - if len(unqualifiedTables) > 0 { - utils.ErrExit("Qualify following table names %v in the %s list with schema name", unqualifiedTables, listName) - } if len(unknownTableNames) > 0 { utils.PrintAndLog("Unknown table names %v in the %s list", unknownTableNames, listName) - utils.ErrExit("Valid table names are %v", lo.Map(fullTableList, func(tableName sqlname.NameTuple, _ int) string { + utils.ErrExit("Valid table names are: %v", lo.Map(fullTableList, func(tableName sqlname.NameTuple, _ int) string { return tableName.ForOutput() })) } @@ -1125,13 +1143,13 @@ func startFallBackSetupIfRequired() { utils.PrintAndLog("Starting import data to source with command:\n %s", color.GreenString(cmdStr)) binary, lookErr := exec.LookPath(os.Args[0]) if lookErr != nil { - utils.ErrExit("could not find yb-voyager - %w", err) + utils.ErrExit("could not find yb-voyager: %w", err) } env := os.Environ() env = slices.Insert(env, 0, "SOURCE_DB_PASSWORD="+source.Password) execErr := syscall.Exec(binary, cmd, env) if execErr != nil { - utils.ErrExit("failed to run yb-voyager import data to source - %w\n Please re-run with command :\n%s", err, cmdStr) + utils.ErrExit("failed to run yb-voyager import data to source: %w\n Please re-run with command :\n%s", err, cmdStr) } } diff --git a/yb-voyager/cmd/exportDataFromTarget.go b/yb-voyager/cmd/exportDataFromTarget.go index 38c557933a..31fe1a248d 100644 --- a/yb-voyager/cmd/exportDataFromTarget.go +++ b/yb-voyager/cmd/exportDataFromTarget.go @@ -112,7 +112,7 @@ func initSourceConfFromTargetConf() error { return nil } -func packAndSendExportDataFromTargetPayload(status string) { +func packAndSendExportDataFromTargetPayload(status string, errorMsg string) { if !shouldSendCallhome() { return } @@ -128,6 +128,7 @@ func packAndSendExportDataFromTargetPayload(status string) { exportDataPayload := callhome.ExportDataPhasePayload{ ParallelJobs: int64(source.NumConnections), StartClean: bool(startClean), + Error: callhome.SanitizeErrorMsg(errorMsg), } exportDataPayload.Phase = exportPhase diff --git a/yb-voyager/cmd/exportDataStatus.go b/yb-voyager/cmd/exportDataStatus.go index 1c2a976a51..c68b102f3e 100644 --- a/yb-voyager/cmd/exportDataStatus.go +++ b/yb-voyager/cmd/exportDataStatus.go @@ -101,6 +101,13 @@ func initializeExportTableMetadata(tableList []sqlname.NameTuple) { Status: utils.TableMetadataStatusMap[tablesProgressMetadata[key].Status], ExportedRowCountSnapshot: int64(0), } + if source.DBType == POSTGRESQL { + //for Postgresql rename the table leaf table names to root table + renamedTable, isRenamed := renameTableIfRequired(key) + if isRenamed { + exportSnapshotStatus.Tables[key].TableName = renamedTable + } + } } err := exportSnapshotStatusFile.Create(exportSnapshotStatus) if err != nil { @@ -196,7 +203,11 @@ func startExportPB(progressContainer *mpb.Progress, mapKey string, quitChan chan // parallel goroutine to calculate and set total to actual row count go func() { - actualRowCount := source.DB().GetTableRowCount(tableMetadata.TableName) + actualRowCount, err := source.DB().GetTableRowCount(tableMetadata.TableName) + if err != nil { + log.Warnf("could not get actual row count for table=%s: %v", tableMetadata.TableName, err) + return + } log.Infof("Replacing actualRowCount=%d inplace of expectedRowCount=%d for table=%s", actualRowCount, tableMetadata.CountTotalRows, tableMetadata.TableName.ForUserQuery()) pbr.SetTotalRowCount(actualRowCount, false) @@ -251,7 +262,7 @@ func startExportPB(progressContainer *mpb.Progress, mapKey string, quitChan chan time.Sleep(100 * time.Millisecond) break } else if err != nil { //error other than EOF - utils.ErrExit("Error while reading file %s: %v", tableDataFile, err) + utils.ErrExit("Error while reading file: %s: %v", tableDataFile.Name(), err) } if isDataLine(line, source.DBType, &insideCopyStmt) { tableMetadata.CountLiveRows += 1 diff --git a/yb-voyager/cmd/exportDataStatusCommand.go b/yb-voyager/cmd/exportDataStatusCommand.go index 5f1c579801..3c3c310ff2 100644 --- a/yb-voyager/cmd/exportDataStatusCommand.go +++ b/yb-voyager/cmd/exportDataStatusCommand.go @@ -20,6 +20,7 @@ import ( "fmt" "io/fs" "path/filepath" + "slices" "sort" "strings" @@ -28,6 +29,7 @@ import ( "github.com/spf13/cobra" "github.com/yugabyte/yb-voyager/yb-voyager/src/dbzm" + "github.com/yugabyte/yb-voyager/yb-voyager/src/metadb" "github.com/yugabyte/yb-voyager/yb-voyager/src/namereg" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/jsonfile" @@ -53,7 +55,7 @@ var exportDataStatusCmd = &cobra.Command{ utils.ErrExit("\nNote: Run the following command to get the current report of live migration:\n" + color.CyanString("yb-voyager get data-migration-report --export-dir %q\n", exportDir)) } - err = InitNameRegistry(exportDir, SOURCE_DB_EXPORTER_ROLE, nil, nil, nil, nil, false) + err = InitNameRegistry(exportDir, namereg.SOURCE_DB_EXPORTER_STATUS_ROLE, nil, nil, nil, nil, false) if err != nil { utils.ErrExit("initializing name registry: %v", err) } @@ -61,12 +63,24 @@ var exportDataStatusCmd = &cobra.Command{ if err != nil { utils.ErrExit("Failed to get migration status record: %s", err) } + if msr == nil { + color.Cyan(exportDataStatusMsg) + return + } useDebezium = msr.IsSnapshotExportedViaDebezium() + + if msr.SourceDBConf == nil { + utils.ErrExit("export data has not started yet. Try running after export has started") + } + source = *msr.SourceDBConf + sqlname.SourceDBType = source.DBType + leafPartitions := getLeafPartitionsFromRootTable() + var rows []*exportTableMigStatusOutputRow if useDebezium { - rows, err = runExportDataStatusCmdDbzm(streamChanges) + rows, err = runExportDataStatusCmdDbzm(streamChanges, leafPartitions, msr) } else { - rows, err = runExportDataStatusCmd() + rows, err = runExportDataStatusCmd(msr, leafPartitions) } if err != nil { utils.ErrExit("error: %s\n", err) @@ -77,7 +91,7 @@ var exportDataStatusCmd = &cobra.Command{ reportFile := jsonfile.NewJsonFile[[]*exportTableMigStatusOutputRow](reportFilePath) err := reportFile.Create(&rows) if err != nil { - utils.ErrExit("creating into json file %s: %v", reportFilePath, err) + utils.ErrExit("creating into json file: %s: %v", reportFilePath, err) } fmt.Print(color.GreenString("Export data status report is written to %s\n", reportFilePath)) return @@ -105,33 +119,35 @@ var InProgressTableSno int // Note that the `export data status` is running in a separate process. It won't have access to the in-memory state // held in the main `export data` process. -func runExportDataStatusCmdDbzm(streamChanges bool) ([]*exportTableMigStatusOutputRow, error) { +func runExportDataStatusCmdDbzm(streamChanges bool, leafPartitions map[string][]string, msr *metadb.MigrationStatusRecord) ([]*exportTableMigStatusOutputRow, error) { exportStatusFilePath := filepath.Join(exportDir, "data", "export_status.json") status, err := dbzm.ReadExportStatus(exportStatusFilePath) if err != nil { - utils.ErrExit("Failed to read export status file %s: %v", exportStatusFilePath, err) + utils.ErrExit("Failed to read export status file: %s: %v", exportStatusFilePath, err) } if status == nil { - return nil, nil + return nil, fmt.Errorf("export data has not started yet. Try running after export has started") } InProgressTableSno = status.InProgressTableSno() var rows []*exportTableMigStatusOutputRow var row *exportTableMigStatusOutputRow for _, tableStatus := range status.Tables { - row = getSnapshotExportStatusRow(&tableStatus) + row = getSnapshotExportStatusRow(&tableStatus, leafPartitions, msr) rows = append(rows, row) } return rows, nil } -func getSnapshotExportStatusRow(tableStatus *dbzm.TableExportStatus) *exportTableMigStatusOutputRow { +func getSnapshotExportStatusRow(tableStatus *dbzm.TableExportStatus, leafPartitions map[string][]string, msr *metadb.MigrationStatusRecord) *exportTableMigStatusOutputRow { nt, err := namereg.NameReg.LookupTableName(fmt.Sprintf("%s.%s", tableStatus.SchemaName, tableStatus.TableName)) if err != nil { - utils.ErrExit("lookup %s in name registry: %v", tableStatus.TableName, err) + utils.ErrExit("lookup in name registry: %s: %v", tableStatus.TableName, err) } + //Using the ForOutput() as a key for leafPartitions map as we are populating the map in that way. + displayTableName := getDisplayName(nt, leafPartitions[nt.ForOutput()], msr.IsExportTableListSet) row := &exportTableMigStatusOutputRow{ - TableName: nt.ForMinOutput(), + TableName: displayTableName, Status: "DONE", ExportedCount: tableStatus.ExportedRowCountSnapshot, } @@ -145,23 +161,29 @@ func getSnapshotExportStatusRow(tableStatus *dbzm.TableExportStatus) *exportTabl return row } -func runExportDataStatusCmd() ([]*exportTableMigStatusOutputRow, error) { - msr, err := metaDB.GetMigrationStatusRecord() - if err != nil { - return nil, fmt.Errorf("error while getting migration status record: %v", err) +func getDisplayName(nt sqlname.NameTuple, partitions []string, isTableListSet bool) string { + displayTableName := nt.ForMinOutput() + //Changing the display of the partition tables in case table-list is set because there can be case where user has passed a subset of leaft tables in the list + if source.DBType == POSTGRESQL && partitions != nil && isTableListSet { + slices.Sort(partitions) + partitions := strings.Join(partitions, ", ") + displayTableName = fmt.Sprintf("%s (%s)", displayTableName, partitions) } + + return displayTableName +} + +func runExportDataStatusCmd(msr *metadb.MigrationStatusRecord, leafPartitions map[string][]string) ([]*exportTableMigStatusOutputRow, error) { tableList := msr.TableListExportedFromSource - source = *msr.SourceDBConf - sqlname.SourceDBType = source.DBType var outputRows []*exportTableMigStatusOutputRow exportSnapshotStatusFilePath := filepath.Join(exportDir, "metainfo", "export_snapshot_status.json") exportSnapshotStatusFile = jsonfile.NewJsonFile[ExportSnapshotStatus](exportSnapshotStatusFilePath) exportStatusSnapshot, err := exportSnapshotStatusFile.Read() if err != nil { if errors.Is(err, fs.ErrNotExist) { - return nil, nil + return nil, fmt.Errorf("export data has not started yet. Try running after export has started") } - utils.ErrExit("Failed to read export status file %s: %v", exportSnapshotStatusFilePath, err) + utils.ErrExit("Failed to read export status file: %s: %v", exportSnapshotStatusFilePath, err) } exportedSnapshotRow, exportedSnapshotStatus, err := getExportedSnapshotRowsMap(exportStatusSnapshot) @@ -169,20 +191,17 @@ func runExportDataStatusCmd() ([]*exportTableMigStatusOutputRow, error) { return nil, fmt.Errorf("error while getting exported snapshot rows map: %v", err) } - leafPartitions := getLeafPartitionsFromRootTable() - for _, tableName := range tableList { finalFullTableName, err := namereg.NameReg.LookupTableName(tableName) if err != nil { return nil, fmt.Errorf("lookup %s in name registry: %v", tableName, err) } - displayTableName := finalFullTableName.ForMinOutput() - partitions := leafPartitions[finalFullTableName.ForOutput()] - if source.DBType == POSTGRESQL && partitions != nil { - partitions := strings.Join(partitions, ", ") - displayTableName = fmt.Sprintf("%s (%s)", displayTableName, partitions) + //Using the ForOutput() as a key for leafPartitions map as we are populating the map in that way. + displayTableName := getDisplayName(finalFullTableName, leafPartitions[finalFullTableName.ForOutput()], msr.IsExportTableListSet) + snapshotStatus, ok := exportedSnapshotStatus.Get(finalFullTableName) + if !ok { + return nil, fmt.Errorf("snapshot status for table %s is not populated in %q file", finalFullTableName.ForMinOutput(), exportSnapshotStatusFilePath) } - snapshotStatus, _ := exportedSnapshotStatus.Get(finalFullTableName) finalStatus := snapshotStatus[0] if len(snapshotStatus) > 1 { // status for root partition wrt leaf partitions exportingLeaf := 0 @@ -199,13 +218,18 @@ func runExportDataStatusCmd() ([]*exportTableMigStatusOutputRow, error) { } if exportingLeaf > 0 { finalStatus = "EXPORTING" - } else if doneLeaf == len(snapshotStatus) { - finalStatus = "DONE" } else if not_started == len(snapshotStatus) { - finalStatus = "NOT_STARTED" + //In case of partition tables in PG, we are clubbing the status of all leafs and then returning the status + //For root table we are sending NOT_STARTED and if only all leaf partitions will have NOT_STARTED else EXPORTING/DONE + finalStatus = "NOT-STARTED" + } else { + finalStatus = "DONE" } } - exportedCount, _ := exportedSnapshotRow.Get(finalFullTableName) + exportedCount, ok := exportedSnapshotRow.Get(finalFullTableName) + if !ok { + return nil, fmt.Errorf("snapshot row count for table %s is not populated in %q file", finalFullTableName.ForMinOutput(), exportSnapshotStatusFilePath) + } row := &exportTableMigStatusOutputRow{ TableName: displayTableName, Status: finalStatus, diff --git a/yb-voyager/cmd/exportDataStatus_test.go b/yb-voyager/cmd/exportDataStatus_test.go new file mode 100644 index 0000000000..20142dba4a --- /dev/null +++ b/yb-voyager/cmd/exportDataStatus_test.go @@ -0,0 +1,112 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package cmd + +import ( + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" +) + +func TestExportSnapshotStatusStructs(t *testing.T) { + + test := []struct { + name string + actualType reflect.Type + expectedType interface{} + }{ + { + name: "Validate TableExportStatus Struct Definition", + actualType: reflect.TypeOf(TableExportStatus{}), + expectedType: struct { + TableName string `json:"table_name"` + FileName string `json:"file_name"` + Status string `json:"status"` + ExportedRowCountSnapshot int64 `json:"exported_row_count_snapshot"` + }{}, + }, + { + name: "Validate ExportSnapshotStatus Struct Definition", + actualType: reflect.TypeOf(ExportSnapshotStatus{}), + expectedType: struct { + Tables map[string]*TableExportStatus `json:"tables"` + }{}, + }, + } + + for _, tt := range test { + t.Run(tt.name, func(t *testing.T) { + testutils.CompareStructs(t, tt.actualType, reflect.TypeOf(tt.expectedType), tt.name) + }) + } +} + +func TestExportSnapshotStatusJson(t *testing.T) { + // Create a table list of type []sqlname.NameTuple + o1 := sqlname.NewObjectName(POSTGRESQL, "public", "public", "table1") + o2 := sqlname.NewObjectName(POSTGRESQL, "public", "schema1", "table2") + tableList := []sqlname.NameTuple{ + {CurrentName: o1, SourceName: o1, TargetName: o1}, + {CurrentName: o2, SourceName: o2, TargetName: o2}, + } + + exportDir = filepath.Join(os.TempDir(), "export_snapshot_status_test") + + // Make export directory + err := os.MkdirAll(filepath.Join(exportDir, "metainfo"), 0755) + if err != nil { + t.Fatalf("failed to create export directory: %v", err) + } + + // Clean up the export directory + defer func() { + err := os.RemoveAll(exportDir) + if err != nil { + t.Fatalf("failed to remove export directory: %v", err) + } + }() + + outputFilePath := filepath.Join(exportDir, "metainfo", "export_snapshot_status.json") + + // Call initializeExportTableMetadata to create the export_snapshot_status.json file + initializeExportTableMetadata(tableList) + + expectedExportSnapshotStatusJSON := `{ + "tables": { + "public.\"table1\"": { + "table_name": "public.\"table1\"", + "file_name": "", + "status": "NOT-STARTED", + "exported_row_count_snapshot": 0 + }, + "schema1.\"table2\"": { + "table_name": "schema1.\"table2\"", + "file_name": "", + "status": "NOT-STARTED", + "exported_row_count_snapshot": 0 + } + } +}` + + // Compare the JSON representation of the sample ExportSnapshotStatus instance + testutils.CompareJson(t, outputFilePath, expectedExportSnapshotStatusJSON, exportDir) +} diff --git a/yb-voyager/cmd/exportSchema.go b/yb-voyager/cmd/exportSchema.go index c594f7b997..4c8f82d720 100644 --- a/yb-voyager/cmd/exportSchema.go +++ b/yb-voyager/cmd/exportSchema.go @@ -22,7 +22,7 @@ import ( "strings" "github.com/fatih/color" - pg_query "github.com/pganalyze/pg_query_go/v5" + pg_query "github.com/pganalyze/pg_query_go/v6" "github.com/samber/lo" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -90,10 +90,15 @@ func exportSchema() error { utils.PrintAndLog("Schema is not exported yet. Ignoring --start-clean flag.\n\n") } CreateMigrationProjectIfNotExists(source.DBType, exportDir) + err := retrieveMigrationUUID() + if err != nil { + log.Errorf("failed to get migration UUID: %v", err) + return fmt.Errorf("failed to get migration UUID: %w", err) + } utils.PrintAndLog("export of schema for source type as '%s'\n", source.DBType) // Check connection with source database. - err := source.DB().Connect() + err = source.DB().Connect() if err != nil { log.Errorf("failed to connect to the source db: %s", err) return fmt.Errorf("failed to connect to the source db: %w", err) @@ -118,7 +123,6 @@ func exportSchema() error { } checkSourceDBCharset() - source.DB().CheckRequiredToolsAreInstalled() sourceDBVersion := source.DB().GetVersion() source.DBVersion = sourceDBVersion source.DBSize, err = source.DB().GetDatabaseSize() @@ -134,7 +138,8 @@ func exportSchema() error { // Check if the source database has the required permissions for exporting schema. if source.RunGuardrailsChecks { - missingPerms, err := source.DB().GetMissingExportSchemaPermissions() + checkIfSchemasHaveUsagePermissions() + missingPerms, err := source.DB().GetMissingExportSchemaPermissions("") if err != nil { return fmt.Errorf("failed to get missing migration permissions: %w", err) } @@ -153,12 +158,6 @@ func exportSchema() error { } } - err = retrieveMigrationUUID() - if err != nil { - log.Errorf("failed to get migration UUID: %v", err) - return fmt.Errorf("failed to get migration UUID: %w", err) - } - exportSchemaStartEvent := createExportSchemaStartedEvent() controlPlane.ExportSchemaStarted(&exportSchemaStartEvent) @@ -176,7 +175,7 @@ func exportSchema() error { utils.PrintAndLog("\nExported schema files created under directory: %s\n\n", filepath.Join(exportDir, "schema")) - packAndSendExportSchemaPayload(COMPLETE) + packAndSendExportSchemaPayload(COMPLETE, "") saveSourceDBConfInMSR() setSchemaIsExported() @@ -186,7 +185,7 @@ func exportSchema() error { return nil } -func packAndSendExportSchemaPayload(status string) { +func packAndSendExportSchemaPayload(status string, errorMsg string) { if !shouldSendCallhome() { return } @@ -204,6 +203,7 @@ func packAndSendExportSchemaPayload(status string) { AppliedRecommendations: assessmentRecommendationsApplied, UseOrafce: bool(source.UseOrafce), CommentsOnObjects: bool(source.CommentsOnObjects), + Error: callhome.SanitizeErrorMsg(errorMsg), } payload.PhasePayload = callhome.MarshalledJsonString(exportSchemaPayload) @@ -468,8 +468,7 @@ func applyShardingRecommendationIfMatching(sqlInfo *sqlInfo, shardedTables []str } // true -> oracle, false -> PG - parsedObjectName := lo.Ternary(relation.Schemaname == "", relation.Relname, - relation.Schemaname+"."+relation.Relname) + parsedObjectName := utils.BuildObjectName(relation.Schemaname, relation.Relname) match := false switch source.DBType { diff --git a/yb-voyager/cmd/exportSchema_test.go b/yb-voyager/cmd/exportSchema_test.go index 432f763330..a399b0e70a 100644 --- a/yb-voyager/cmd/exportSchema_test.go +++ b/yb-voyager/cmd/exportSchema_test.go @@ -1,3 +1,5 @@ +//go:build unit + /* Copyright (c) YugabyteDB, Inc. @@ -17,9 +19,10 @@ limitations under the License. package cmd import ( - "github.com/stretchr/testify/assert" "strings" "testing" + + "github.com/stretchr/testify/assert" ) func TestShardingRecommendations(t *testing.T) { diff --git a/yb-voyager/cmd/getDataMigrationReportCommand.go b/yb-voyager/cmd/getDataMigrationReportCommand.go index 688ee8e68a..f37a5797cd 100644 --- a/yb-voyager/cmd/getDataMigrationReportCommand.go +++ b/yb-voyager/cmd/getDataMigrationReportCommand.go @@ -16,7 +16,9 @@ limitations under the License. package cmd import ( + "errors" "fmt" + "io/fs" "path/filepath" "github.com/fatih/color" @@ -135,7 +137,10 @@ func getDataMigrationReportCmdFn(msr *metadb.MigrationStatusRecord) { exportStatusFilePath := filepath.Join(exportDir, "data", "export_status.json") dbzmStatus, err := dbzm.ReadExportStatus(exportStatusFilePath) if err != nil { - utils.ErrExit("Failed to read export status file %s: %v", exportStatusFilePath, err) + utils.ErrExit("Failed to read export status file: %s: %v", exportStatusFilePath, err) + } + if dbzmStatus == nil { + utils.ErrExit("Export data has not started yet. Try running after export has started.") } dbzmNameTupToRowCount := utils.NewStructMap[sqlname.NameTuple, int64]() @@ -150,7 +155,10 @@ func getDataMigrationReportCmdFn(msr *metadb.MigrationStatusRecord) { if source.DBType == POSTGRESQL { exportSnapshotStatus, err = exportSnapshotStatusFile.Read() if err != nil { - utils.ErrExit("Failed to read export status file %s: %v", exportSnapshotStatusFilePath, err) + if errors.Is(err, fs.ErrNotExist) { + utils.ErrExit("Export data has not started yet. Try running after export has started.") + } + utils.ErrExit("Failed to read export status file: %s: %v", exportSnapshotStatusFilePath, err) } exportedPGSnapshotRowsMap, _, err = getExportedSnapshotRowsMap(exportSnapshotStatus) if err != nil { @@ -161,7 +169,7 @@ func getDataMigrationReportCmdFn(msr *metadb.MigrationStatusRecord) { tableName := fmt.Sprintf("%s.%s", tableExportStatus.SchemaName, tableExportStatus.TableName) nt, err := namereg.NameReg.LookupTableName(tableName) if err != nil { - utils.ErrExit("lookup %s in name registry: %v", tableName, err) + utils.ErrExit("lookup in name registry: %s: %v", tableName, err) } dbzmNameTupToRowCount.Put(nt, tableExportStatus.ExportedRowCountSnapshot) } @@ -298,7 +306,7 @@ func getDataMigrationReportCmdFn(msr *metadb.MigrationStatusRecord) { reportFile := jsonfile.NewJsonFile[[]*rowData](reportFilePath) err := reportFile.Create(&reportData) if err != nil { - utils.ErrExit("creating into json file %s: %v", reportFilePath, err) + utils.ErrExit("creating into json file: %s: %v", reportFilePath, err) } fmt.Print(color.GreenString("Data migration report is written to %s\n", reportFilePath)) return diff --git a/yb-voyager/cmd/import.go b/yb-voyager/cmd/import.go index b3663da9aa..f141545d9f 100644 --- a/yb-voyager/cmd/import.go +++ b/yb-voyager/cmd/import.go @@ -91,6 +91,7 @@ func validateImportFlags(cmd *cobra.Command, importerRole string) error { getSourceDBPassword(cmd) } validateParallelismFlags() + validateTruncateTablesFlag() return nil } @@ -232,13 +233,13 @@ func registerImportDataCommonFlags(cmd *cobra.Command) { cmd.Flags().MarkHidden("truncate-splits") } -func registerImportDataFlags(cmd *cobra.Command) { +func registerImportDataToTargetFlags(cmd *cobra.Command) { BoolVar(cmd.Flags(), &startClean, "start-clean", false, `Starts a fresh import with exported data files present in the export-dir/data directory. If any table on YugabyteDB database is non-empty, it prompts whether you want to continue the import without truncating those tables; If you go ahead without truncating, then yb-voyager starts ingesting the data present in the data files with upsert mode. Note that for the cases where a table doesn't have a primary key, this may lead to insertion of duplicate data. To avoid this, exclude the table using the --exclude-file-list or truncate those tables manually before using the start-clean flag (default false)`) - + BoolVar(cmd.Flags(), &truncateTables, "truncate-tables", false, "Truncate tables on target YugabyteDB before importing data. Only applicable along with --start-clean true (default false)") } func registerImportSchemaFlags(cmd *cobra.Command) { @@ -410,3 +411,9 @@ func validateParallelismFlags() { } } + +func validateTruncateTablesFlag() { + if truncateTables && !startClean { + utils.ErrExit("Error: --truncate-tables true can only be specified along with --start-clean true") + } +} diff --git a/yb-voyager/cmd/importData.go b/yb-voyager/cmd/importData.go index a69a543610..29ce170ec7 100644 --- a/yb-voyager/cmd/importData.go +++ b/yb-voyager/cmd/importData.go @@ -16,7 +16,6 @@ limitations under the License. package cmd import ( - "context" "fmt" "io" "os" @@ -29,7 +28,6 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/fatih/color" - "github.com/jackc/pgx/v4" "github.com/samber/lo" log "github.com/sirupsen/logrus" "github.com/sourcegraph/conc/pool" @@ -82,8 +80,12 @@ var importDataCmd = &cobra.Command{ if importerRole == "" { importerRole = TARGET_DB_IMPORTER_ROLE } + err := retrieveMigrationUUID() + if err != nil { + utils.ErrExit("failed to get migration UUID: %w", err) + } sourceDBType = GetSourceDBTypeFromMSR() - err := validateImportFlags(cmd, importerRole) + err = validateImportFlags(cmd, importerRole) if err != nil { utils.ErrExit("Error: %s", err.Error()) } @@ -217,11 +219,12 @@ func checkImportDataPermissions() { utils.PrintAndLog(output) var link string - if importerRole == SOURCE_REPLICA_DB_IMPORTER_ROLE { + switch importerRole { + case SOURCE_REPLICA_DB_IMPORTER_ROLE: link = "https://docs.yugabyte.com/preview/yugabyte-voyager/migrate/live-fall-forward/#prepare-source-replica-database" - } else if importerRole == SOURCE_DB_IMPORTER_ROLE { + case SOURCE_DB_IMPORTER_ROLE: link = "https://docs.yugabyte.com/preview/yugabyte-voyager/migrate/live-fall-back/#prepare-the-source-database" - } else { + default: if changeStreamingIsEnabled(importType) { link = "https://docs.yugabyte.com/preview/yugabyte-voyager/migrate/live-migrate/#prepare-the-target-database" } else { @@ -231,15 +234,18 @@ func checkImportDataPermissions() { fmt.Println("\nCheck the documentation to prepare the database for migration:", color.BlueString(link)) // Prompt user to continue if missing permissions only if fk and triggers check did not fail - if !fkAndTriggersCheckFailed { - if !utils.AskPrompt("\nDo you want to continue anyway") { - utils.ErrExit("Please grant the required permissions and retry the import.") - } - } else { + if fkAndTriggersCheckFailed { + utils.ErrExit("Please grant the required permissions and retry the import.") + } else if !utils.AskPrompt("\nDo you want to continue anyway") { utils.ErrExit("Please grant the required permissions and retry the import.") } } else { - log.Info("The target database has the required permissions for importing data.") + // If only fk and triggers check failed just simply error out + if fkAndTriggersCheckFailed { + utils.ErrExit("") + } else { + log.Info("The target database has the required permissions for importing data.") + } } } @@ -299,7 +305,7 @@ func startExportDataFromTargetIfRequired() { execErr := syscall.Exec(binary, cmd, env) if execErr != nil { - utils.ErrExit("failed to run yb-voyager export data from target - %w\n Please re-run with command :\n%s", execErr, cmdStr) + utils.ErrExit("failed to run yb-voyager export data from target: %w\n Please re-run with command :\n%s", execErr, cmdStr) } } @@ -412,7 +418,7 @@ func applyTableListFilter(importFileTasks []*ImportFileTask) []*ImportFileTask { } } if len(unqualifiedTables) > 0 { - utils.ErrExit("Qualify following table names %v in the %s list with schema-name.", unqualifiedTables, listName) + utils.ErrExit("Qualify following table names in the %s list with schema-name: %v", listName, unqualifiedTables) } log.Infof("%s tableList: %v", listName, result) return result, unknownTables @@ -468,10 +474,6 @@ func updateTargetConfInMigrationStatus() { } func importData(importFileTasks []*ImportFileTask) { - err := retrieveMigrationUUID() - if err != nil { - utils.ErrExit("failed to get migration UUID: %w", err) - } if (importerRole == TARGET_DB_IMPORTER_ROLE || importerRole == IMPORT_FILE_ROLE) && (tconf.EnableUpsert) { if !utils.AskPrompt(color.RedString("WARNING: Ensure that tables on target YugabyteDB do not have secondary indexes. " + @@ -690,11 +692,11 @@ func importData(importFileTasks []*ImportFileTask) { case TARGET_DB_IMPORTER_ROLE: importDataCompletedEvent := createSnapshotImportCompletedEvent() controlPlane.SnapshotImportCompleted(&importDataCompletedEvent) - packAndSendImportDataPayload(COMPLETE) + packAndSendImportDataPayload(COMPLETE, "") case SOURCE_REPLICA_DB_IMPORTER_ROLE: - packAndSendImportDataToSrcReplicaPayload(COMPLETE) + packAndSendImportDataToSrcReplicaPayload(COMPLETE, "") case SOURCE_DB_IMPORTER_ROLE: - packAndSendImportDataToSourcePayload(COMPLETE) + packAndSendImportDataToSourcePayload(COMPLETE, "") } } @@ -752,7 +754,7 @@ func waitForDebeziumStartIfRequired() error { return nil } -func packAndSendImportDataPayload(status string) { +func packAndSendImportDataPayload(status string, errorMsg string) { if !shouldSendCallhome() { return @@ -771,6 +773,7 @@ func packAndSendImportDataPayload(status string) { ParallelJobs: int64(tconf.Parallelism), StartClean: bool(startClean), EnableUpsert: bool(tconf.EnableUpsert), + Error: callhome.SanitizeErrorMsg(errorMsg), } //Getting the imported snapshot details @@ -842,7 +845,7 @@ func getIdentityColumnsForTables(tables []sqlname.NameTuple, identityType string for _, table := range tables { identityColumns, err := tdb.GetIdentityColumnNamesForTable(table, identityType) if err != nil { - utils.ErrExit("error in getting identity(%s) columns for table %s: %w", identityType, table, err) + utils.ErrExit("error in getting identity(%s) columns for table: %s: %w", identityType, table, err) } if len(identityColumns) > 0 { log.Infof("identity(%s) columns for table %s: %v", identityType, table, identityColumns) @@ -864,13 +867,13 @@ func getImportedProgressAmount(task *ImportFileTask, state *ImportDataState) int if reportProgressInBytes { byteCount, err := state.GetImportedByteCount(task.FilePath, task.TableNameTup) if err != nil { - utils.ErrExit("Failed to get imported byte count for table %s: %s", task.TableNameTup, err) + utils.ErrExit("Failed to get imported byte count for table: %s: %s", task.TableNameTup, err) } return byteCount } else { rowCount, err := state.GetImportedRowCount(task.FilePath, task.TableNameTup) if err != nil { - utils.ErrExit("Failed to get imported row count for table %s: %s", task.TableNameTup, err) + utils.ErrExit("Failed to get imported row count for table: %s: %s", task.TableNameTup, err) } return rowCount } @@ -924,18 +927,29 @@ func cleanImportState(state *ImportDataState, tasks []*ImportFileTask) { nonEmptyTableNames := lo.Map(nonEmptyNts, func(nt sqlname.NameTuple, _ int) string { return nt.ForOutput() }) - utils.PrintAndLog("Non-Empty tables: [%s]", strings.Join(nonEmptyTableNames, ", ")) - utils.PrintAndLog("The above list of tables on target DB are not empty. ") - yes := utils.AskPrompt("Are you sure you want to start afresh without truncating tables") - if !yes { - utils.ErrExit("Aborting import. Manually truncate the tables on target DB before continuing.") + if truncateTables { + // truncate tables only supported for import-data-to-target. + utils.PrintAndLog("Truncating non-empty tables on DB: %v", nonEmptyTableNames) + err := tdb.TruncateTables(nonEmptyNts) + if err != nil { + utils.ErrExit("failed to truncate tables: %s", err) + } + } else { + utils.PrintAndLog("Non-Empty tables: [%s]", strings.Join(nonEmptyTableNames, ", ")) + utils.PrintAndLog("The above list of tables on DB are not empty.") + utils.PrintAndLog("If you wish to truncate them, re-run the import command with --truncate-tables true") + yes := utils.AskPrompt("Do you want to start afresh without truncating tables") + if !yes { + utils.ErrExit("Aborting import.") + } } + } for _, task := range tasks { err := state.Clean(task.FilePath, task.TableNameTup) if err != nil { - utils.ErrExit("failed to clean import data state for table %q: %s", task.TableNameTup, err) + utils.ErrExit("failed to clean import data state for table: %q: %s", task.TableNameTup, err) } } @@ -943,7 +957,7 @@ func cleanImportState(state *ImportDataState, tasks []*ImportFileTask) { if utils.FileOrFolderExists(sqlldrDir) { err := os.RemoveAll(sqlldrDir) if err != nil { - utils.ErrExit("failed to remove sqlldr directory %q: %s", sqlldrDir, err) + utils.ErrExit("failed to remove sqlldr directory: %q: %s", sqlldrDir, err) } } @@ -998,7 +1012,7 @@ func importFile(state *ImportDataState, task *ImportFileTask, updateProgressFn f log.Infof("Collect all interrupted/remaining splits.") pendingBatches, lastBatchNumber, lastOffset, fileFullySplit, err := state.Recover(task.FilePath, task.TableNameTup) if err != nil { - utils.ErrExit("recovering state for table %q: %s", task.TableNameTup, err) + utils.ErrExit("recovering state for table: %q: %s", task.TableNameTup, err) } for _, batch := range pendingBatches { submitBatch(batch, updateProgressFn, importBatchArgsProto) @@ -1016,12 +1030,12 @@ func splitFilesForTable(state *ImportDataState, filePath string, t sqlname.NameT reader, err := dataStore.Open(filePath) if err != nil { - utils.ErrExit("preparing reader for split generation on file %q: %v", filePath, err) + utils.ErrExit("preparing reader for split generation on file: %q: %v", filePath, err) } dataFile, err := datafile.NewDataFile(filePath, reader, dataFileDescriptor) if err != nil { - utils.ErrExit("open datafile %q: %v", filePath, err) + utils.ErrExit("open datafile: %q: %v", filePath, err) } defer dataFile.Close() @@ -1045,13 +1059,13 @@ func splitFilesForTable(state *ImportDataState, filePath string, t sqlname.NameT batchWriter = state.NewBatchWriter(filePath, t, batchNum) err := batchWriter.Init() if err != nil { - utils.ErrExit("initializing batch writer for table %q: %s", t, err) + utils.ErrExit("initializing batch writer for table: %q: %s", t, err) } // Write the header if necessary if header != "" && dataFileDescriptor.FileFormat == datafile.CSV { err = batchWriter.WriteHeader(header) if err != nil { - utils.ErrExit("writing header for table %q: %s", t, err) + utils.ErrExit("writing header for table: %q: %s", t, err) } } } @@ -1089,14 +1103,14 @@ func splitFilesForTable(state *ImportDataState, filePath string, t sqlname.NameT if tconf.TargetDBType == YUGABYTEDB { ybSpecificMsg = ", but should be strictly lower than the the rpc_max_message_size on YugabyteDB (default 267386880 bytes)" } - utils.ErrExit("record num=%d for table %q in file %s is larger than the max batch size %d bytes Max Batch size can be changed using env var MAX_BATCH_SIZE_BYTES%s", numLinesTaken, t.ForOutput(), filePath, tdb.MaxBatchSizeInBytes(), ybSpecificMsg) + utils.ErrExit("record of size %d larger than max batch size: record num=%d for table %q in file %s is larger than the max batch size %d bytes. Max Batch size can be changed using env var MAX_BATCH_SIZE_BYTES%s", currentBytesRead, numLinesTaken, t.ForOutput(), filePath, tdb.MaxBatchSizeInBytes(), ybSpecificMsg) } if line != "" { // can't use importBatchArgsProto.Columns as to use case insenstiive column names columnNames, _ := TableToColumnNames.Get(t) line, err = valueConverter.ConvertRow(t, columnNames, line) if err != nil { - utils.ErrExit("transforming line number=%d for table %q in file %s: %s", numLinesTaken, t.ForOutput(), filePath, err) + utils.ErrExit("transforming line number=%d for table: %q in file %s: %s", numLinesTaken, t.ForOutput(), filePath, err) } // Check if adding this record exceeds the max batch size @@ -1125,7 +1139,7 @@ func splitFilesForTable(state *ImportDataState, filePath string, t sqlname.NameT finalizeBatch(true, numLinesTaken, dataFile.GetBytesRead()) dataFile.ResetBytesRead(0) } else if readLineErr != nil { - utils.ErrExit("read line from data file %q: %s", filePath, readLineErr) + utils.ErrExit("read line from data file: %q: %s", filePath, readLineErr) } } @@ -1162,7 +1176,7 @@ func submitBatch(batch *Batch, updateProgressFn func(int64), importBatchArgsProt func importBatch(batch *Batch, importBatchArgsProto *tgtdb.ImportBatchArgs) { err := batch.MarkPending() if err != nil { - utils.ErrExit("marking batch %d as pending: %s", batch.Number, err) + utils.ErrExit("marking batch as pending: %d: %s", batch.Number, err) } log.Infof("Importing %q", batch.FilePath) @@ -1189,70 +1203,11 @@ func importBatch(batch *Batch, importBatchArgsProto *tgtdb.ImportBatchArgs) { } log.Infof("%q => %d rows affected", batch.FilePath, rowsAffected) if err != nil { - utils.ErrExit("import %q into %s: %s", batch.FilePath, batch.TableNameTup, err) + utils.ErrExit("import batch: %q into %s: %s", batch.FilePath, batch.TableNameTup, err) } err = batch.MarkDone() if err != nil { - utils.ErrExit("marking batch %q as done: %s", batch.FilePath, err) - } -} - -func newTargetConn() *pgx.Conn { - conn, err := pgx.Connect(context.Background(), tconf.GetConnectionUri()) - if err != nil { - utils.WaitChannel <- 1 - <-utils.WaitChannel - utils.ErrExit("connect to target db: %s", err) - } - - setTargetSchema(conn) - - if sourceDBType == ORACLE && enableOrafce { - setOrafceSearchPath(conn) - } - - return conn -} - -// TODO: Eventually get rid of this function in favour of TargetYugabyteDB.setTargetSchema(). -func setTargetSchema(conn *pgx.Conn) { - if sourceDBType == POSTGRESQL || tconf.Schema == YUGABYTEDB_DEFAULT_SCHEMA { - // For PG, schema name is already included in the object name. - // No need to set schema if importing in the default schema. - return - } - checkSchemaExistsQuery := fmt.Sprintf("SELECT count(schema_name) FROM information_schema.schemata WHERE schema_name = '%s'", tconf.Schema) - var cntSchemaName int - - if err := conn.QueryRow(context.Background(), checkSchemaExistsQuery).Scan(&cntSchemaName); err != nil { - utils.ErrExit("run query %q on target %q to check schema exists: %s", checkSchemaExistsQuery, tconf.Host, err) - } else if cntSchemaName == 0 { - utils.ErrExit("schema '%s' does not exist in target", tconf.Schema) - } - - setSchemaQuery := fmt.Sprintf("SET SCHEMA '%s'", tconf.Schema) - _, err := conn.Exec(context.Background(), setSchemaQuery) - if err != nil { - utils.ErrExit("run query %q on target %q: %s", setSchemaQuery, tconf.Host, err) - } -} - -func dropIdx(conn *pgx.Conn, idxName string) error { - dropIdxQuery := fmt.Sprintf("DROP INDEX IF EXISTS %s", idxName) - log.Infof("Dropping index: %q", dropIdxQuery) - _, err := conn.Exec(context.Background(), dropIdxQuery) - if err != nil { - return fmt.Errorf("failed to drop index %q: %w", idxName, err) - } - return nil -} - -func setOrafceSearchPath(conn *pgx.Conn) { - // append oracle schema in the search_path for orafce - updateSearchPath := `SELECT set_config('search_path', current_setting('search_path') || ', oracle', false)` - _, err := conn.Exec(context.Background(), updateSearchPath) - if err != nil { - utils.ErrExit("unable to update search_path for orafce extension: %v", err) + utils.ErrExit("marking batch as done: %q: %s", batch.FilePath, err) } } @@ -1273,63 +1228,6 @@ func getIndexName(sqlQuery string, indexName string) (string, error) { return "", fmt.Errorf("could not find `ON` keyword in the CREATE INDEX statement") } -// TODO: need automation tests for this, covering cases like schema(public vs non-public) or case sensitive names -func beforeIndexCreation(sqlInfo sqlInfo, conn **pgx.Conn, objType string) error { - if !strings.Contains(strings.ToUpper(sqlInfo.stmt), "CREATE INDEX") { - return nil - } - - fullyQualifiedObjName, err := getIndexName(sqlInfo.stmt, sqlInfo.objName) - if err != nil { - return fmt.Errorf("extract qualified index name from DDL [%v]: %w", sqlInfo.stmt, err) - } - if invalidTargetIndexesCache == nil { - invalidTargetIndexesCache, err = getInvalidIndexes(conn) - if err != nil { - return fmt.Errorf("failed to fetch invalid indexes: %w", err) - } - } - - // check index valid or not - if invalidTargetIndexesCache[fullyQualifiedObjName] { - log.Infof("index %q already exists but in invalid state, dropping it", fullyQualifiedObjName) - err = dropIdx(*conn, fullyQualifiedObjName) - if err != nil { - return fmt.Errorf("drop invalid index %q: %w", fullyQualifiedObjName, err) - } - } - - // print the index name as index creation takes time and user can see the progress - color.Yellow("creating index %s ...", fullyQualifiedObjName) - return nil -} - -func getInvalidIndexes(conn **pgx.Conn) (map[string]bool, error) { - var result = make(map[string]bool) - // NOTE: this shouldn't fetch any predefined indexes of pg_catalog schema (assuming they can't be invalid) or indexes of other successful migrations - query := "SELECT indexrelid::regclass FROM pg_index WHERE indisvalid = false" - - rows, err := (*conn).Query(context.Background(), query) - if err != nil { - return nil, fmt.Errorf("querying invalid indexes: %w", err) - } - defer rows.Close() - - for rows.Next() { - var fullyQualifiedIndexName string - err := rows.Scan(&fullyQualifiedIndexName) - if err != nil { - return nil, fmt.Errorf("scanning row for invalid index name: %w", err) - } - // if schema is not provided by catalog table, then it is public schema - if !strings.Contains(fullyQualifiedIndexName, ".") { - fullyQualifiedIndexName = fmt.Sprintf("public.%s", fullyQualifiedIndexName) - } - result[fullyQualifiedIndexName] = true - } - return result, nil -} - // TODO: This function is a duplicate of the one in tgtdb/yb.go. Consolidate the two. func getTargetSchemaName(tableName string) string { parts := strings.Split(tableName, ".") @@ -1339,7 +1237,7 @@ func getTargetSchemaName(tableName string) string { if tconf.TargetDBType == POSTGRESQL { defaultSchema, noDefaultSchema := GetDefaultPGSchema(tconf.Schema, ",") if noDefaultSchema { - utils.ErrExit("no default schema for table %q ", tableName) + utils.ErrExit("no default schema for table: %q ", tableName) } return defaultSchema } @@ -1356,11 +1254,11 @@ func prepareTableToColumns(tasks []*ImportFileTask) { // File is either exported from debezium OR this is `import data file` case. reader, err := dataStore.Open(task.FilePath) if err != nil { - utils.ErrExit("datastore.Open %q: %v", task.FilePath, err) + utils.ErrExit("datastore.Open: %q: %v", task.FilePath, err) } df, err := datafile.NewDataFile(task.FilePath, reader, dataFileDescriptor) if err != nil { - utils.ErrExit("opening datafile %q: %v", task.FilePath, err) + utils.ErrExit("opening datafile: %q: %v", task.FilePath, err) } header := df.GetHeader() columns = strings.Split(header, dataFileDescriptor.Delimiter) @@ -1381,7 +1279,7 @@ func getDfdTableNameToExportedColumns(dataFileDescriptor *datafile.Descriptor) * for tableNameRaw, columnList := range dataFileDescriptor.TableNameToExportedColumns { nt, err := namereg.NameReg.LookupTableName(tableNameRaw) if err != nil { - utils.ErrExit("lookup table [%s] in name registry: %v", tableNameRaw, err) + utils.ErrExit("lookup table in name registry: %q: %v", tableNameRaw, err) } result.Put(nt, columnList) } @@ -1422,8 +1320,8 @@ func init() { registerTargetDBConnFlags(importDataToTargetCmd) registerImportDataCommonFlags(importDataCmd) registerImportDataCommonFlags(importDataToTargetCmd) - registerImportDataFlags(importDataCmd) - registerImportDataFlags(importDataToTargetCmd) + registerImportDataToTargetFlags(importDataCmd) + registerImportDataToTargetFlags(importDataToTargetCmd) } func createSnapshotImportStartedEvent() cp.SnapshotImportStartedEvent { diff --git a/yb-voyager/cmd/importDataFileCommand.go b/yb-voyager/cmd/importDataFileCommand.go index 5db7edf2aa..1e6aa10863 100644 --- a/yb-voyager/cmd/importDataFileCommand.go +++ b/yb-voyager/cmd/importDataFileCommand.go @@ -72,10 +72,13 @@ var importDataFileCmd = &cobra.Command{ sourceDBType = POSTGRESQL // dummy value - this command is not affected by it sqlname.SourceDBType = sourceDBType CreateMigrationProjectIfNotExists(sourceDBType, exportDir) - + err := retrieveMigrationUUID() + if err != nil { + utils.ErrExit("failed to get migration UUID: %w", err) + } tconf.Schema = strings.ToLower(tconf.Schema) tdb = tgtdb.NewTargetDB(&tconf) - err := tdb.Init() + err = tdb.Init() if err != nil { utils.ErrExit("Failed to initialize the target DB: %s", err) } @@ -92,7 +95,7 @@ var importDataFileCmd = &cobra.Command{ importFileTasks := prepareImportFileTasks() prepareForImportDataCmd(importFileTasks) importData(importFileTasks) - packAndSendImportDataFilePayload(COMPLETE) + packAndSendImportDataFilePayload(COMPLETE, "") }, PostRun: func(cmd *cobra.Command, args []string) { @@ -181,7 +184,7 @@ func prepareImportFileTasks() []*ImportFileTask { for _, filePath := range filePaths { fileSize, err := dataStore.FileSize(filePath) if err != nil { - utils.ErrExit("calculating file size of %q in bytes: %v", filePath, err) + utils.ErrExit("calculating file size in bytes: %q: %v", filePath, err) } task := &ImportFileTask{ ID: i, @@ -243,12 +246,12 @@ func checkDataDirFlag() { } dataDirAbs, err := filepath.Abs(dataDir) if err != nil { - utils.ErrExit("unable to resolve absolute path for data-dir(%q): %v", dataDir, err) + utils.ErrExit("unable to resolve absolute path for data-dir: (%q): %v", dataDir, err) } exportDirAbs, err := filepath.Abs(exportDir) if err != nil { - utils.ErrExit("unable to resolve absolute path for export-dir(%q): %v", exportDir, err) + utils.ErrExit("unable to resolve absolute path for export-dir: (%q): %v", exportDir, err) } if strings.HasPrefix(dataDirAbs, exportDirAbs) { @@ -326,7 +329,7 @@ func checkAndParseEscapeAndQuoteChar() { } -func packAndSendImportDataFilePayload(status string) { +func packAndSendImportDataFilePayload(status string, errorMsg string) { if !shouldSendCallhome() { return } @@ -346,6 +349,7 @@ func packAndSendImportDataFilePayload(status string) { ParallelJobs: int64(tconf.Parallelism), StartClean: bool(startClean), DataFileParameters: callhome.MarshalledJsonString(dataFileParameters), + Error: callhome.SanitizeErrorMsg(errorMsg), } switch true { case strings.Contains(dataDir, "s3://"): @@ -489,6 +493,8 @@ If any table on YugabyteDB database is non-empty, it prompts whether you want to If you go ahead without truncating, then yb-voyager starts ingesting the data present in the data files with upsert mode. Note that for the cases where a table doesn't have a primary key, this may lead to insertion of duplicate data. To avoid this, exclude the table from --file-table-map or truncate those tables manually before using the start-clean flag (default false)`) + BoolVar(importDataFileCmd.Flags(), &truncateTables, "truncate-tables", false, "Truncate tables on target YugabyteDB before importing data. Only applicable along with --start-clean true (default false)") + importDataFileCmd.Flags().MarkHidden("table-list") importDataFileCmd.Flags().MarkHidden("exclude-table-list") importDataFileCmd.Flags().MarkHidden("table-list-file-path") diff --git a/yb-voyager/cmd/importDataStatusCommand.go b/yb-voyager/cmd/importDataStatusCommand.go index aabf04ba37..62e4ddf72d 100644 --- a/yb-voyager/cmd/importDataStatusCommand.go +++ b/yb-voyager/cmd/importDataStatusCommand.go @@ -102,7 +102,7 @@ func runImportDataStatusCmd() error { reportFile := jsonfile.NewJsonFile[[]*tableMigStatusOutputRow](reportFilePath) err := reportFile.Create(&rows) if err != nil { - utils.ErrExit("creating into json file %s: %v", reportFilePath, err) + utils.ErrExit("creating into json file: %s: %v", reportFilePath, err) } fmt.Print(color.GreenString("Import data status report is written to %s\n", reportFilePath)) return nil diff --git a/yb-voyager/cmd/importDataToSource.go b/yb-voyager/cmd/importDataToSource.go index a14e5b458d..fc27566953 100644 --- a/yb-voyager/cmd/importDataToSource.go +++ b/yb-voyager/cmd/importDataToSource.go @@ -87,7 +87,7 @@ func initTargetConfFromSourceConf() error { return nil } -func packAndSendImportDataToSourcePayload(status string) { +func packAndSendImportDataToSourcePayload(status string, errorMsg string) { if !shouldSendCallhome() { return @@ -107,6 +107,7 @@ func packAndSendImportDataToSourcePayload(status string) { ParallelJobs: int64(tconf.Parallelism), StartClean: bool(startClean), LiveWorkflowType: FALL_BACK, + Error: callhome.SanitizeErrorMsg(errorMsg), } importDataPayload.Phase = importPhase diff --git a/yb-voyager/cmd/importDataToSourceReplica.go b/yb-voyager/cmd/importDataToSourceReplica.go index 5382fa2997..f8e93ac236 100644 --- a/yb-voyager/cmd/importDataToSourceReplica.go +++ b/yb-voyager/cmd/importDataToSourceReplica.go @@ -67,17 +67,20 @@ func init() { registerCommonImportFlags(importDataToSourceReplicaCmd) registerSourceReplicaDBAsTargetConnFlags(importDataToSourceReplicaCmd) registerFlagsForSourceReplica(importDataToSourceReplicaCmd) - registerStartCleanFlag(importDataToSourceReplicaCmd) + registerStartCleanFlags(importDataToSourceReplicaCmd) registerImportDataCommonFlags(importDataToSourceReplicaCmd) hideImportFlagsInFallForwardOrBackCmds(importDataToSourceReplicaCmd) } -func registerStartCleanFlag(cmd *cobra.Command) { +func registerStartCleanFlags(cmd *cobra.Command) { BoolVar(cmd.Flags(), &startClean, "start-clean", false, `Starts a fresh import with exported data files present in the export-dir/data directory. If any table on source-replica database is non-empty, it prompts whether you want to continue the import without truncating those tables; If you go ahead without truncating, then yb-voyager starts ingesting the data present in the data files without upsert mode. Note that for the cases where a table doesn't have a primary key, this may lead to insertion of duplicate data. To avoid this, exclude the table using the --exclude-file-list or truncate those tables manually before using the start-clean flag (default false)`) + + BoolVar(cmd.Flags(), &truncateTables, "truncate-tables", false, "Truncate tables on source replica DB before importing data. Only applicable along with --start-clean true (default false)") + } func updateFallForwardEnabledInMetaDB() { @@ -89,7 +92,7 @@ func updateFallForwardEnabledInMetaDB() { } } -func packAndSendImportDataToSrcReplicaPayload(status string) { +func packAndSendImportDataToSrcReplicaPayload(status string, errorMsg string) { if !shouldSendCallhome() { return } @@ -108,6 +111,7 @@ func packAndSendImportDataToSrcReplicaPayload(status string) { ParallelJobs: int64(tconf.Parallelism), StartClean: bool(startClean), LiveWorkflowType: FALL_FORWARD, + Error: callhome.SanitizeErrorMsg(errorMsg), } importRowsMap, err := getImportedSnapshotRowsMap("source-replica") if err != nil { diff --git a/yb-voyager/cmd/importSchema.go b/yb-voyager/cmd/importSchema.go index 69fdc9be46..c26d3b89f4 100644 --- a/yb-voyager/cmd/importSchema.go +++ b/yb-voyager/cmd/importSchema.go @@ -47,8 +47,12 @@ var importSchemaCmd = &cobra.Command{ if tconf.TargetDBType == "" { tconf.TargetDBType = YUGABYTEDB } + err := retrieveMigrationUUID() + if err != nil { + utils.ErrExit("failed to get migration UUID: %w", err) + } sourceDBType = GetSourceDBTypeFromMSR() - err := validateImportFlags(cmd, TARGET_DB_IMPORTER_ROLE) + err = validateImportFlags(cmd, TARGET_DB_IMPORTER_ROLE) if err != nil { utils.ErrExit("Error: %s", err.Error()) } @@ -79,10 +83,6 @@ var flagRefreshMViews utils.BoolStr var invalidTargetIndexesCache map[string]bool func importSchema() error { - err := retrieveMigrationUUID() - if err != nil { - return fmt.Errorf("failed to get migration UUID: %w", err) - } tconf.Schema = strings.ToLower(tconf.Schema) @@ -93,7 +93,7 @@ func importSchema() error { // available always and this is just for initialisation of tdb and marking it nil again back. tconf.Schema = "public" tdb = tgtdb.NewTargetDB(&tconf) - err = tdb.Init() + err := tdb.Init() if err != nil { utils.ErrExit("Failed to initialize the target DB: %s", err) } @@ -225,8 +225,14 @@ func importSchema() error { dumpStatements(finalFailedSqlStmts, filepath.Join(exportDir, "schema", "failed.sql")) } - if flagPostSnapshotImport && flagRefreshMViews { - refreshMViews(conn) + if flagPostSnapshotImport { + err = importSchemaInternal(exportDir, []string{"TABLE"}, nil) + if err != nil { + return err + } + if flagRefreshMViews { + refreshMViews(conn) + } } else { utils.PrintAndLog("\nNOTE: Materialized Views are not populated by default. To populate them, pass --refresh-mviews while executing `import schema --post-snapshot-import`.") } @@ -273,12 +279,9 @@ func packAndSendImportSchemaPayload(status string, errMsg string) { parts := strings.Split(stmt, "*/\n") errorsList = append(errorsList, strings.Trim(parts[0], "/*\n")) //trimming the prefix of `/*\n` from parts[0] (the error msg) } - if status == ERROR { - errorsList = append(errorsList, errMsg) - } else { - if len(errorsList) > 0 && status != EXIT { - payload.Status = COMPLETE_WITH_ERRORS - } + + if len(errorsList) > 0 && status != EXIT { + payload.Status = COMPLETE_WITH_ERRORS } //import-schema specific payload details @@ -290,6 +293,7 @@ func packAndSendImportSchemaPayload(status string, errMsg string) { ErrorCount: len(errorsList), PostSnapshotImport: bool(flagPostSnapshotImport), StartClean: bool(startClean), + Error: callhome.SanitizeErrorMsg(errMsg), } payload.PhasePayload = callhome.MarshalledJsonString(importSchemaPayload) err := callhome.SendPayload(&payload) @@ -303,7 +307,7 @@ func isYBDatabaseIsColocated(conn *pgx.Conn) bool { query := "SELECT yb_is_database_colocated();" err := conn.QueryRow(context.Background(), query).Scan(&isColocated) if err != nil { - utils.ErrExit("failed to check if Target DB '%s' is colocated or not: %v", tconf.DBName, err) + utils.ErrExit("failed to check if Target DB is colocated or not: %q: %v", tconf.DBName, err) } log.Infof("target DB '%s' colocoated='%t'", tconf.DBName, isColocated) return isColocated @@ -337,7 +341,7 @@ func dumpStatements(stmts []string, filePath string) { for i := 0; i < len(stmts); i++ { _, err = file.WriteString(stmts[i] + "\n\n") if err != nil { - utils.ErrExit("failed writing in file %s: %v", filePath, err) + utils.ErrExit("failed writing in file: %s: %v", filePath, err) } } @@ -373,7 +377,7 @@ func refreshMViews(conn *pgx.Conn) { query := fmt.Sprintf("REFRESH MATERIALIZED VIEW %s", mViewName) _, err := conn.Exec(context.Background(), query) if err != nil && !strings.Contains(strings.ToLower(err.Error()), "has not been populated") { - utils.ErrExit("error in refreshing the materialized view %s: %v", mViewName, err) + utils.ErrExit("error in refreshing the materialized view: %s: %v", mViewName, err) } } log.Infof("Checking if mviews are refreshed or not - %v", mViewNames) @@ -382,7 +386,7 @@ func refreshMViews(conn *pgx.Conn) { query := fmt.Sprintf("SELECT * from %s LIMIT 1;", mViewName) rows, err := conn.Query(context.Background(), query) if err != nil { - utils.ErrExit("error in checking whether mview %s is refreshed or not: %v", mViewName, err) + utils.ErrExit("error in checking whether mview is refreshed or not: %q: %v", mViewName, err) } if !rows.Next() { mviewsNotRefreshed = append(mviewsNotRefreshed, mViewName) @@ -411,7 +415,7 @@ func createTargetSchemas(conn *pgx.Conn) { schemaAnalysisReport := analyzeSchemaInternal( &srcdb.Source{ DBType: sourceDBType, - }) + }, false) switch sourceDBType { case "postgresql": // in case of postgreSQL as source, there can be multiple schemas present in a database @@ -444,7 +448,7 @@ func createTargetSchemas(conn *pgx.Conn) { utils.PrintAndLog("dropping schema '%s' in target database", targetSchema) _, err := conn.Exec(context.Background(), dropSchemaQuery) if err != nil { - utils.ErrExit("Failed to drop schema %q: %s", targetSchema, err) + utils.ErrExit("Failed to drop schema: %q: %s", targetSchema, err) } } else { utils.PrintAndLog("schema '%s' already present in target database, continuing with it..\n", targetSchema) @@ -461,7 +465,7 @@ func createTargetSchemas(conn *pgx.Conn) { utils.PrintAndLog("creating schema '%s' in target database...", tconf.Schema) _, err := conn.Exec(context.Background(), createSchemaQuery) if err != nil { - utils.ErrExit("Failed to create %q schema in the target DB: %s", tconf.Schema, err) + utils.ErrExit("Failed to create schema in the target DB: %q: %s", tconf.Schema, err) } } @@ -473,7 +477,7 @@ func createTargetSchemas(conn *pgx.Conn) { } func checkIfTargetSchemaExists(conn *pgx.Conn, targetSchema string) bool { - checkSchemaExistQuery := fmt.Sprintf("SELECT schema_name FROM information_schema.schemata WHERE schema_name = '%s'", targetSchema) + checkSchemaExistQuery := fmt.Sprintf("select nspname from pg_namespace n where n.nspname = '%s'", targetSchema) var fetchedSchema string err := conn.QueryRow(context.Background(), checkSchemaExistQuery).Scan(&fetchedSchema) @@ -481,7 +485,7 @@ func checkIfTargetSchemaExists(conn *pgx.Conn, targetSchema string) bool { if err != nil && (strings.Contains(err.Error(), "no rows in result set") && fetchedSchema == "") { return false } else if err != nil { - utils.ErrExit("Failed to check if schema %q exists: %s", targetSchema, err) + utils.ErrExit("Failed to check if schema exists: %q: %s", targetSchema, err) } return fetchedSchema == targetSchema diff --git a/yb-voyager/cmd/importSchemaYugabyteDB.go b/yb-voyager/cmd/importSchemaYugabyteDB.go index ec311172d7..27fae75f2e 100644 --- a/yb-voyager/cmd/importSchemaYugabyteDB.go +++ b/yb-voyager/cmd/importSchemaYugabyteDB.go @@ -23,16 +23,23 @@ import ( "time" "github.com/fatih/color" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/samber/lo" log "github.com/sirupsen/logrus" "golang.org/x/exp/slices" + "github.com/yugabyte/yb-voyager/yb-voyager/src/query/queryparser" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" ) var deferredSqlStmts []sqlInfo var finalFailedSqlStmts []string +// The client message (NOTICE/WARNING) from psql is stored in this global variable. +// as part of the noticeHandler function for every query executed. +var notice *pgconn.Notice + func importSchemaInternal(exportDir string, importObjectList []string, skipFn func(string, string) bool) error { schemaDir := filepath.Join(exportDir, "schema") @@ -49,6 +56,25 @@ func importSchemaInternal(exportDir string, importObjectList []string, return nil } +func isNotValidConstraint(stmt string) (bool, error) { + parseTree, err := queryparser.Parse(stmt) + if err != nil { + return false, fmt.Errorf("error parsing the ddl[%s]: %v", stmt, err) + } + ddlObj, err := queryparser.ProcessDDL(parseTree) + if err != nil { + return false, fmt.Errorf("error in process DDL[%s]:%v", stmt, err) + } + alter, ok := ddlObj.(*queryparser.AlterTable) + if !ok { + return false, nil + } + if alter.IsAddConstraintType() && alter.ConstraintNotValid { + return true, nil + } + return false, nil +} + func executeSqlFile(file string, objType string, skipFn func(string, string) bool) error { log.Infof("Execute SQL file %q on target %q", file, tconf.Host) conn := newTargetConn() @@ -60,7 +86,6 @@ func executeSqlFile(file string, objType string, skipFn func(string, string) boo }() sqlInfoArr := parseSqlFileForObjectType(file, objType) - var err error for _, sqlInfo := range sqlInfoArr { if conn == nil { conn = newTargetConn() @@ -71,15 +96,14 @@ func executeSqlFile(file string, objType string, skipFn func(string, string) boo if !setOrSelectStmt && skipFn != nil && skipFn(objType, sqlInfo.stmt) { continue } - - if objType == "TABLE" { - stmt := strings.ToUpper(sqlInfo.stmt) - skip := strings.Contains(stmt, "ALTER TABLE") && strings.Contains(stmt, "REPLICA IDENTITY") - if skip { - //skipping DDLS like ALTER TABLE ... REPLICA IDENTITY .. as this is not supported in YB - log.Infof("Skipping DDL: %s", sqlInfo.stmt) - continue - } + // Check if the statement should be skipped + skip, err := shouldSkipDDL(sqlInfo.stmt, objType) + if err != nil { + return fmt.Errorf("error checking whether to skip DDL for statement [%s]: %v", sqlInfo.stmt, err) + } + if skip { + log.Infof("Skipping DDL: %s", sqlInfo.stmt) + continue } err = executeSqlStmtWithRetries(&conn, sqlInfo, objType) @@ -90,8 +114,39 @@ func executeSqlFile(file string, objType string, skipFn func(string, string) boo return nil } +func shouldSkipDDL(stmt string, objType string) (bool, error) { + stmt = strings.ToUpper(stmt) + + // pg_dump generate `SET client_min_messages = 'warning';`, but we want to get + // NOTICE severity as well (which is the default), hence skipping this. + //pg_dump 17 gives this SET transaction_timeout = 0; + if strings.Contains(stmt, CLIENT_MESSAGES_SESSION_VAR) || strings.Contains(stmt, TRANSACTION_TIMEOUT_SESSION_VAR) { + return true, nil + } + if objType != TABLE { + return false, nil + } + + skipReplicaIdentity := strings.Contains(stmt, "ALTER TABLE") && strings.Contains(stmt, "REPLICA IDENTITY") + if skipReplicaIdentity { + return true, nil + } + isNotValid, err := isNotValidConstraint(stmt) + if err != nil { + return false, fmt.Errorf("error checking whether stmt is to add not valid constraint: %v", err) + } + skipNotValidWithoutPostImport := isNotValid && !bool(flagPostSnapshotImport) + skipOtherDDLsWithPostImport := (bool(flagPostSnapshotImport) && !isNotValid) + if skipNotValidWithoutPostImport || // Skipping NOT VALID CONSTRAINT in import schema without post-snapshot-mode + skipOtherDDLsWithPostImport { // Skipping other TABLE DDLs than the NOT VALID in post-snapshot-import mode + return true, nil + } + return false, nil +} + func executeSqlStmtWithRetries(conn **pgx.Conn, sqlInfo sqlInfo, objType string) error { var err error + var stmtNotice *pgconn.Notice log.Infof("On %s run query:\n%s\n", tconf.Host, sqlInfo.formattedStmt) for retryCount := 0; retryCount <= DDL_MAX_RETRY_COUNT; retryCount++ { if retryCount > 0 { // Not the first iteration. @@ -108,9 +163,10 @@ func executeSqlStmtWithRetries(conn **pgx.Conn, sqlInfo sqlInfo, objType string) return fmt.Errorf("before index creation: %w", err) } } - _, err = (*conn).Exec(context.Background(), sqlInfo.formattedStmt) + stmtNotice, err = execStmtAndGetNotice(*conn, sqlInfo.formattedStmt) if err == nil { - utils.PrintSqlStmtIfDDL(sqlInfo.stmt, utils.GetObjectFileName(filepath.Join(exportDir, "schema"), objType)) + utils.PrintSqlStmtIfDDL(sqlInfo.stmt, utils.GetObjectFileName(filepath.Join(exportDir, "schema"), objType), + getNoticeMessage(stmtNotice)) return nil } @@ -162,7 +218,8 @@ func executeSqlStmtWithRetries(conn **pgx.Conn, sqlInfo sqlInfo, objType string) if missingRequiredSchemaObject(err) { // Do nothing for deferred case } else { - utils.PrintSqlStmtIfDDL(sqlInfo.stmt, utils.GetObjectFileName(filepath.Join(exportDir, "schema"), objType)) + utils.PrintSqlStmtIfDDL(sqlInfo.stmt, utils.GetObjectFileName(filepath.Join(exportDir, "schema"), objType), + getNoticeMessage(stmtNotice)) color.Red(fmt.Sprintf("%s\n", err.Error())) if tconf.ContinueOnError { log.Infof("appending stmt to failedSqlStmts list: %s\n", utils.GetSqlStmtToPrint(sqlInfo.stmt)) @@ -201,9 +258,14 @@ func importDeferredStatements() { beforeDeferredSqlCount := len(deferredSqlStmts) var failedSqlStmtInIthIteration []string for j := 0; j < len(deferredSqlStmts); j++ { - _, err = conn.Exec(context.Background(), deferredSqlStmts[j].formattedStmt) + var stmtNotice *pgconn.Notice + stmtNotice, err = execStmtAndGetNotice(conn, deferredSqlStmts[j].formattedStmt) if err == nil { utils.PrintAndLog("%s\n", utils.GetSqlStmtToPrint(deferredSqlStmts[j].stmt)) + noticeMsg := getNoticeMessage(stmtNotice) + if noticeMsg != "" { + utils.PrintAndLog(color.YellowString("%s\n", noticeMsg)) + } // removing successfully executed SQL deferredSqlStmts = append(deferredSqlStmts[:j], deferredSqlStmts[j+1:]...) break @@ -270,3 +332,165 @@ func applySchemaObjectFilterFlags(importObjectOrderList []string) []string { } return finalImportObjectList } + +func getInvalidIndexes(conn **pgx.Conn) (map[string]bool, error) { + var result = make(map[string]bool) + // NOTE: this shouldn't fetch any predefined indexes of pg_catalog schema (assuming they can't be invalid) or indexes of other successful migrations + query := "SELECT indexrelid::regclass FROM pg_index WHERE indisvalid = false" + + rows, err := (*conn).Query(context.Background(), query) + if err != nil { + return nil, fmt.Errorf("querying invalid indexes: %w", err) + } + defer rows.Close() + + for rows.Next() { + var fullyQualifiedIndexName string + err := rows.Scan(&fullyQualifiedIndexName) + if err != nil { + return nil, fmt.Errorf("scanning row for invalid index name: %w", err) + } + // if schema is not provided by catalog table, then it is public schema + if !strings.Contains(fullyQualifiedIndexName, ".") { + fullyQualifiedIndexName = fmt.Sprintf("public.%s", fullyQualifiedIndexName) + } + result[fullyQualifiedIndexName] = true + } + return result, nil +} + +// TODO: need automation tests for this, covering cases like schema(public vs non-public) or case sensitive names +func beforeIndexCreation(sqlInfo sqlInfo, conn **pgx.Conn, objType string) error { + if !strings.Contains(strings.ToUpper(sqlInfo.stmt), "CREATE INDEX") { + return nil + } + + fullyQualifiedObjName, err := getIndexName(sqlInfo.stmt, sqlInfo.objName) + if err != nil { + return fmt.Errorf("extract qualified index name from DDL [%v]: %w", sqlInfo.stmt, err) + } + if invalidTargetIndexesCache == nil { + invalidTargetIndexesCache, err = getInvalidIndexes(conn) + if err != nil { + return fmt.Errorf("failed to fetch invalid indexes: %w", err) + } + } + + // check index valid or not + if invalidTargetIndexesCache[fullyQualifiedObjName] { + log.Infof("index %q already exists but in invalid state, dropping it", fullyQualifiedObjName) + err = dropIdx(*conn, fullyQualifiedObjName) + if err != nil { + return fmt.Errorf("drop invalid index %q: %w", fullyQualifiedObjName, err) + } + } + + // print the index name as index creation takes time and user can see the progress + color.Yellow("creating index %s ...", fullyQualifiedObjName) + return nil +} + +func dropIdx(conn *pgx.Conn, idxName string) error { + dropIdxQuery := fmt.Sprintf("DROP INDEX IF EXISTS %s", idxName) + log.Infof("Dropping index: %q", dropIdxQuery) + _, err := conn.Exec(context.Background(), dropIdxQuery) + if err != nil { + return fmt.Errorf("failed to drop index %q: %w", idxName, err) + } + return nil +} + +func newTargetConn() *pgx.Conn { + // save notice in global variable + noticeHandler := func(conn *pgconn.PgConn, n *pgconn.Notice) { + // ALTER TABLE .. ADD PRIMARY KEY throws the following notice in YugabyteDB. + // unlogged=# ALTER TABLE ONLY public.ul ADD CONSTRAINT ul_pkey PRIMARY KEY (id); + // NOTICE: table rewrite may lead to inconsistencies + // DETAIL: Concurrent DMLs may not be reflected in the new table. + // HINT: See https://github.com/yugabyte/yugabyte-db/issues/19860. Set 'ysql_suppress_unsafe_alter_notice' yb-tserver gflag to true to suppress this notice. + + // We ignore this notice because: + // 1. This is an empty table at the time at which we are importing the schema + // and there is no concurrent DMLs + // 2. This would unnecessarily clutter the output with NOTICES for every table, + // and scare the user + noticesToIgnore := []string{ + "table rewrite may lead to inconsistencies", + } + + if n != nil { + if lo.Contains(noticesToIgnore, n.Message) { + notice = nil + return + } + } + notice = n + } + errExit := func(err error) { + if err != nil { + utils.WaitChannel <- 1 + <-utils.WaitChannel + utils.ErrExit("connect to target db: %s", err) + } + } + + conf, err := pgx.ParseConfig(tconf.GetConnectionUri()) + errExit(err) + conf.OnNotice = noticeHandler + + conn, err := pgx.ConnectConfig(context.Background(), conf) + errExit(err) + + setTargetSchema(conn) + + if sourceDBType == ORACLE && enableOrafce { + setOrafceSearchPath(conn) + } + + return conn +} + +func getNoticeMessage(n *pgconn.Notice) string { + if n == nil { + return "" + } + return fmt.Sprintf("%s: %s", n.Severity, n.Message) +} + +// TODO: Eventually get rid of this function in favour of TargetYugabyteDB.setTargetSchema(). +func setTargetSchema(conn *pgx.Conn) { + if sourceDBType == POSTGRESQL || tconf.Schema == YUGABYTEDB_DEFAULT_SCHEMA { + // For PG, schema name is already included in the object name. + // No need to set schema if importing in the default schema. + return + } + checkSchemaExistsQuery := fmt.Sprintf("SELECT count(schema_name) FROM information_schema.schemata WHERE schema_name = '%s'", tconf.Schema) + var cntSchemaName int + + if err := conn.QueryRow(context.Background(), checkSchemaExistsQuery).Scan(&cntSchemaName); err != nil { + utils.ErrExit("run query: %q on target %q to check schema exists: %s", checkSchemaExistsQuery, tconf.Host, err) + } else if cntSchemaName == 0 { + utils.ErrExit("schema does not exist in target: %q", tconf.Schema) + } + + setSchemaQuery := fmt.Sprintf("SET SCHEMA '%s'", tconf.Schema) + _, err := conn.Exec(context.Background(), setSchemaQuery) + if err != nil { + utils.ErrExit("run query: %q on target %q: %s", setSchemaQuery, tconf.Host, err) + } +} + +func setOrafceSearchPath(conn *pgx.Conn) { + // append oracle schema in the search_path for orafce + updateSearchPath := `SELECT set_config('search_path', current_setting('search_path') || ', oracle', false)` + _, err := conn.Exec(context.Background(), updateSearchPath) + if err != nil { + utils.ErrExit("unable to update search_path for orafce extension: %v", err) + } +} + +func execStmtAndGetNotice(conn *pgx.Conn, stmt string) (*pgconn.Notice, error) { + notice = nil // reset notice. + _, err := conn.Exec(context.Background(), stmt) + return notice, err +} diff --git a/yb-voyager/cmd/import_data_test.go b/yb-voyager/cmd/import_data_test.go index e8c3a2300f..59fccddf30 100644 --- a/yb-voyager/cmd/import_data_test.go +++ b/yb-voyager/cmd/import_data_test.go @@ -1,3 +1,20 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ package cmd import ( diff --git a/yb-voyager/cmd/live_migration_test.go b/yb-voyager/cmd/live_migration_test.go index 3ace7907a3..81f9da5466 100644 --- a/yb-voyager/cmd/live_migration_test.go +++ b/yb-voyager/cmd/live_migration_test.go @@ -1,3 +1,5 @@ +//go:build unit + /* Copyright (c) YugabyteDB, Inc. diff --git a/yb-voyager/cmd/logging.go b/yb-voyager/cmd/logging.go index c28b47c28d..21b74ecee2 100644 --- a/yb-voyager/cmd/logging.go +++ b/yb-voyager/cmd/logging.go @@ -81,7 +81,7 @@ func InitLogging(logDir string, logLevel string, disableLogging bool, cmdName st func redactPasswordFromArgs() { for i := 0; i < len(os.Args); i++ { opt := os.Args[i] - if opt == "--source-db-password" || opt == "--target-db-password" || opt == "--ff-db-password" { + if opt == "--source-db-password" || opt == "--target-db-password" || opt == "--source-replica-db-password" { os.Args[i+1] = "XXX" } } diff --git a/yb-voyager/cmd/migration_complexity.go b/yb-voyager/cmd/migration_complexity.go new file mode 100644 index 0000000000..69b71d1d3b --- /dev/null +++ b/yb-voyager/cmd/migration_complexity.go @@ -0,0 +1,343 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package cmd + +import ( + "bytes" + "encoding/csv" + "fmt" + "math" + "os" + "path/filepath" + "strings" + "text/template" + + "github.com/samber/lo" + log "github.com/sirupsen/logrus" + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" + "golang.org/x/exp/slices" +) + +const NOT_AVAILABLE = "NOT AVAILABLE" + +var ( + LEVEL_1_MEDIUM_THRESHOLD = 20 + LEVEL_1_HIGH_THRESHOLD = math.MaxInt32 + LEVEL_2_MEDIUM_THRESHOLD = 10 + LEVEL_2_HIGH_THRESHOLD = 100 + LEVEL_3_MEDIUM_THRESHOLD = 0 + LEVEL_3_HIGH_THRESHOLD = 4 + migrationComplexityRationale string +) + +// Migration complexity calculation based on the detected assessment issues +func calculateMigrationComplexity(sourceDBType string, schemaDirectory string, assessmentReport AssessmentReport) string { + if sourceDBType != ORACLE && sourceDBType != POSTGRESQL { + return NOT_AVAILABLE + } + + log.Infof("calculating migration complexity for %s...", sourceDBType) + switch sourceDBType { + case ORACLE: + migrationComplexity, err := calculateMigrationComplexityForOracle(schemaDirectory) + if err != nil { + log.Errorf("failed to get migration complexity for oracle: %v", err) + return NOT_AVAILABLE + } + return migrationComplexity + case POSTGRESQL: + return calculateMigrationComplexityForPG(assessmentReport) + default: + panic(fmt.Sprintf("unsupported source db type '%s' for migration complexity", sourceDBType)) + } +} + +func calculateMigrationComplexityForPG(assessmentReport AssessmentReport) string { + if assessmentReport.MigrationComplexity != "" { + return assessmentReport.MigrationComplexity + } + + counts := lo.CountValuesBy(assessmentReport.Issues, func(issue AssessmentIssue) string { + return issue.Impact + }) + l1IssueCount := counts[constants.IMPACT_LEVEL_1] + l2IssueCount := counts[constants.IMPACT_LEVEL_2] + l3IssueCount := counts[constants.IMPACT_LEVEL_3] + + log.Infof("issue counts: level-1=%d, level-2=%d, level-3=%d\n", l1IssueCount, l2IssueCount, l3IssueCount) + + // Determine complexity for each level + comp1 := getComplexityForLevel(constants.IMPACT_LEVEL_1, l1IssueCount) + comp2 := getComplexityForLevel(constants.IMPACT_LEVEL_2, l2IssueCount) + comp3 := getComplexityForLevel(constants.IMPACT_LEVEL_3, l3IssueCount) + complexities := []string{comp1, comp2, comp3} + log.Infof("complexities according to each level: %v", complexities) + + finalComplexity := constants.MIGRATION_COMPLEXITY_LOW + // If ANY level is HIGH => final is HIGH + if slices.Contains(complexities, constants.MIGRATION_COMPLEXITY_HIGH) { + finalComplexity = constants.MIGRATION_COMPLEXITY_HIGH + } else if slices.Contains(complexities, constants.MIGRATION_COMPLEXITY_MEDIUM) { + // Else if ANY level is MEDIUM => final is MEDIUM + finalComplexity = constants.MIGRATION_COMPLEXITY_MEDIUM + } + + migrationComplexityRationale = buildRationale(finalComplexity, l1IssueCount, l2IssueCount, l3IssueCount) + return finalComplexity +} + +// This is a temporary logic to get migration complexity for oracle based on the migration level from ora2pg report. +// Ideally, we should ALSO be considering the schema analysis report to get the migration complexity. +func calculateMigrationComplexityForOracle(schemaDirectory string) (string, error) { + ora2pgReportPath := filepath.Join(schemaDirectory, "ora2pg_report.csv") + if !utils.FileOrFolderExists(ora2pgReportPath) { + return "", fmt.Errorf("ora2pg report file not found at %s", ora2pgReportPath) + } + file, err := os.Open(ora2pgReportPath) + if err != nil { + return "", fmt.Errorf("failed to read file %s: %w", ora2pgReportPath, err) + } + defer func() { + if err := file.Close(); err != nil { + log.Errorf("Error while closing file %s: %v", ora2pgReportPath, err) + } + }() + // Sample file contents + + // "dbi:Oracle:(DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = xyz)(PORT = 1521))(CONNECT_DATA = (SERVICE_NAME = DMS)))"; + // "Oracle Database 19c Enterprise Edition Release 19.0.0.0.0";"ASSESS_MIGRATION";"261.62 MB";"1 person-day(s)";"A-2"; + // "0/0/0.00";"0/0/0";"0/0/0";"25/0/6.50";"0/0/0.00";"0/0/0";"0/0/0";"0/0/0";"0/0/0";"3/0/1.00";"3/0/1.00"; + // "44/0/4.90";"27/0/2.70";"9/0/1.80";"4/0/16.00";"5/0/3.00";"2/0/2.00";"125/0/58.90" + // + // X/Y/Z - total/invalid/cost for each type of objects(table,function,etc). Last data element is the sum total. + // total cost = 58.90 units (1 unit = 5 minutes). Therefore total cost is approx 1 person-days. + // column 6 is Migration level. + // Migration levels: + // A - Migration that might be run automatically + // B - Migration with code rewrite and a human-days cost up to 5 days + // C - Migration with code rewrite and a human-days cost above 5 days + // Technical levels: + // 1 = trivial: no stored functions and no triggers + // 2 = easy: no stored functions but with triggers, no manual rewriting + // 3 = simple: stored functions and/or triggers, no manual rewriting + // 4 = manual: no stored functions but with triggers or views with code rewriting + // 5 = difficult: stored functions and/or triggers with code rewriting + reader := csv.NewReader(file) + reader.Comma = ';' + rows, err := reader.ReadAll() + if err != nil { + log.Errorf("error reading csv file %s: %v", ora2pgReportPath, err) + return "", fmt.Errorf("error reading csv file %s: %w", ora2pgReportPath, err) + } + if len(rows) > 1 { + return "", fmt.Errorf("invalid ora2pg report file format. Expected 1 row, found %d. contents = %v", len(rows), rows) + } + reportData := rows[0] + migrationLevel := strings.Split(reportData[5], "-")[0] + + switch migrationLevel { + case "A": + return constants.MIGRATION_COMPLEXITY_LOW, nil + case "B": + return constants.MIGRATION_COMPLEXITY_MEDIUM, nil + case "C": + return constants.MIGRATION_COMPLEXITY_HIGH, nil + default: + return "", fmt.Errorf("invalid migration level [%s] found in ora2pg report %v", migrationLevel, reportData) + } +} + +// getComplexityLevel returns LOW, MEDIUM, or HIGH for a given impact level & count +func getComplexityForLevel(level string, count int) string { + switch level { + // ------------------------------------------------------- + // LEVEL_1: + // - LOW if count <= 20 + // - MEDIUM if 20 < count < math.MaxInt32 + // - HIGH if count >= math.MaxInt32 (not possible) + // ------------------------------------------------------- + case constants.IMPACT_LEVEL_1: + if count <= LEVEL_1_MEDIUM_THRESHOLD { + return constants.MIGRATION_COMPLEXITY_LOW + } else if count <= LEVEL_1_HIGH_THRESHOLD { + return constants.MIGRATION_COMPLEXITY_MEDIUM + } + return constants.MIGRATION_COMPLEXITY_HIGH + + // ------------------------------------------------------- + // LEVEL_2: + // - LOW if count <= 10 + // - MEDIUM if 10 < count <= 100 + // - HIGH if count > 100 + // ------------------------------------------------------- + case constants.IMPACT_LEVEL_2: + if count <= LEVEL_2_MEDIUM_THRESHOLD { + return constants.MIGRATION_COMPLEXITY_LOW + } else if count <= LEVEL_2_HIGH_THRESHOLD { + return constants.MIGRATION_COMPLEXITY_MEDIUM + } + return constants.MIGRATION_COMPLEXITY_HIGH + + // ------------------------------------------------------- + // LEVEL_3: + // - LOW if count == 0 + // - MEDIUM if 0 < count <= 4 + // - HIGH if count > 4 + // ------------------------------------------------------- + case constants.IMPACT_LEVEL_3: + if count <= LEVEL_3_MEDIUM_THRESHOLD { + return constants.MIGRATION_COMPLEXITY_LOW + } else if count <= LEVEL_3_HIGH_THRESHOLD { + return constants.MIGRATION_COMPLEXITY_MEDIUM + } + return constants.MIGRATION_COMPLEXITY_HIGH + + default: + panic(fmt.Sprintf("unknown impact level %s for determining complexity", level)) + } +} + +// ======================================= Migration Complexity Explanation ========================================== + +// TODO: discuss if the html should be in main report or here +const explainTemplateHTML = ` +{{- if .Summaries }} +

Below is a breakdown of the issues detected in different categories for each impact level.

+ + + + + + + + + + + + {{- range .Summaries }} + + + + + + + + {{- end }} + +
CategoryLevel-1Level-2Level-3Total
{{ .Category }}{{ index .ImpactCounts "LEVEL_1" }}{{ index .ImpactCounts "LEVEL_2" }}{{ index .ImpactCounts "LEVEL_3" }}{{ .TotalIssueCount }}
+{{- end }} + +

+ Complexity: {{ .Complexity }}
+ Reasoning: {{ .ComplexityRationale }} +

+ +

+Impact Levels:
+ Level-1: Resolutions are available with minimal effort.
+ Level-2: Resolutions are available requiring moderate effort.
+ Level-3: Resolutions may not be available or are complex. +

+` + +const explainTemplateText = `Reasoning: {{ .ComplexityRationale }}` + +type MigrationComplexityExplanationData struct { + Summaries []MigrationComplexityCategorySummary + Complexity string + ComplexityRationale string // short reasoning or explanation text +} + +type MigrationComplexityCategorySummary struct { + Category string + TotalIssueCount int + ImpactCounts map[string]int // e.g. {"Level-1": 3, "Level-2": 5, "Level-3": 2} +} + +func buildMigrationComplexityExplanation(sourceDBType string, assessmentReport AssessmentReport, reportFormat string) (string, error) { + if sourceDBType != POSTGRESQL { + return "", nil + } + + var explanation MigrationComplexityExplanationData + explanation.Complexity = assessmentReport.MigrationComplexity + explanation.ComplexityRationale = migrationComplexityRationale + + explanation.Summaries = buildCategorySummary(assessmentReport.Issues) + + var tmpl *template.Template + var err error + if reportFormat == "html" { + tmpl, err = template.New("Explain").Parse(explainTemplateHTML) + } else { + tmpl, err = template.New("Explain").Parse(explainTemplateText) + } + + if err != nil { + return "", fmt.Errorf("failed creating the explanation template: %w", err) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, explanation); err != nil { + return "", fmt.Errorf("failed executing the template with data: %w", err) + } + return buf.String(), nil +} + +func buildRationale(finalComplexity string, l1Count int, l2Count int, l3Count int) string { + switch finalComplexity { + case constants.MIGRATION_COMPLEXITY_HIGH: + return fmt.Sprintf("Found %d Level-2 issue(s) and %d Level-3 issue(s), resulting in HIGH migration complexity", l2Count, l3Count) + case constants.MIGRATION_COMPLEXITY_MEDIUM: + return fmt.Sprintf("Found %d Level-1 issue(s), %d Level-2 issue(s) and %d Level-3 issue(s), resulting in MEDIUM migration complexity", l1Count, l2Count, l3Count) + case constants.MIGRATION_COMPLEXITY_LOW: + return fmt.Sprintf("Found %d Level-1 issue(s) and %d Level-2 issue(s), resulting in LOW migration complexity", l1Count, l2Count) + } + return "" +} + +func buildCategorySummary(issues []AssessmentIssue) []MigrationComplexityCategorySummary { + if len(issues) == 0 { + return nil + + } + + summaryMap := make(map[string]*MigrationComplexityCategorySummary) + for _, issue := range issues { + if issue.Category == "" { + continue // skipping unknown category issues + } + + if _, ok := summaryMap[issue.Category]; !ok { + summaryMap[issue.Category] = &MigrationComplexityCategorySummary{ + Category: issue.Category, + TotalIssueCount: 0, + ImpactCounts: make(map[string]int), + } + } + + summaryMap[issue.Category].TotalIssueCount++ + summaryMap[issue.Category].ImpactCounts[issue.Impact]++ + } + + var result []MigrationComplexityCategorySummary + for _, summary := range summaryMap { + summary.Category = utils.SnakeCaseToTitleCase(summary.Category) + result = append(result, *summary) + } + return result +} diff --git a/yb-voyager/cmd/migration_complexity_test.go b/yb-voyager/cmd/migration_complexity_test.go new file mode 100644 index 0000000000..7916c5bc04 --- /dev/null +++ b/yb-voyager/cmd/migration_complexity_test.go @@ -0,0 +1,134 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package cmd + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" +) + +func TestGetComplexityForLevel(t *testing.T) { + testCases := []struct { + level string + count int + expected string + desc string + }{ + // ------------------------------- + // LEVEL_1 test cases + // ------------------------------- + { + level: constants.IMPACT_LEVEL_1, + count: 0, + expected: constants.MIGRATION_COMPLEXITY_LOW, + desc: "L1, count=0 => LOW", + }, + { + level: constants.IMPACT_LEVEL_1, + count: 20, + expected: constants.MIGRATION_COMPLEXITY_LOW, + desc: "L1, count=20 => LOW", + }, + { + level: constants.IMPACT_LEVEL_1, + count: 21, + expected: constants.MIGRATION_COMPLEXITY_MEDIUM, + desc: "L1, count=21 => MEDIUM", + }, + { + level: constants.IMPACT_LEVEL_1, + count: 999999999, + expected: constants.MIGRATION_COMPLEXITY_MEDIUM, + desc: "L1, big count => MEDIUM", + }, + + // ------------------------------- + // LEVEL_2 test cases + // ------------------------------- + { + level: constants.IMPACT_LEVEL_2, + count: 0, + expected: constants.MIGRATION_COMPLEXITY_LOW, + desc: "L2, count=0 => LOW", + }, + { + level: constants.IMPACT_LEVEL_2, + count: 10, + expected: constants.MIGRATION_COMPLEXITY_LOW, + desc: "L2, count=10 => LOW", + }, + { + level: constants.IMPACT_LEVEL_2, + count: 11, + expected: constants.MIGRATION_COMPLEXITY_MEDIUM, + desc: "L2, count=11 => MEDIUM", + }, + { + level: constants.IMPACT_LEVEL_2, + count: 100, + expected: constants.MIGRATION_COMPLEXITY_MEDIUM, + desc: "L2, count=100 => MEDIUM", + }, + { + level: constants.IMPACT_LEVEL_2, + count: 101, + expected: constants.MIGRATION_COMPLEXITY_HIGH, + desc: "L2, count=101 => HIGH", + }, + + // ------------------------------- + // LEVEL_3 test cases + // ------------------------------- + { + level: constants.IMPACT_LEVEL_3, + count: 0, + expected: constants.MIGRATION_COMPLEXITY_LOW, + desc: "L3, count=0 => LOW", + }, + { + level: constants.IMPACT_LEVEL_3, + count: 1, + expected: constants.MIGRATION_COMPLEXITY_MEDIUM, + desc: "L3, count=1 => MEDIUM", + }, + { + level: constants.IMPACT_LEVEL_3, + count: 4, + expected: constants.MIGRATION_COMPLEXITY_MEDIUM, + desc: "L3, count=4 => MEDIUM", + }, + { + level: constants.IMPACT_LEVEL_3, + count: 5, + expected: constants.MIGRATION_COMPLEXITY_HIGH, + desc: "L3, count=5 => HIGH", + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + actual := getComplexityForLevel(tc.level, tc.count) + assert.Equal(t, tc.expected, actual, + "Level=%s, Count=%d => Expected %s, Got %s", + tc.level, tc.count, tc.expected, actual, + ) + }) + } +} diff --git a/yb-voyager/cmd/root.go b/yb-voyager/cmd/root.go index 35a09cfa58..a90ddcd69e 100644 --- a/yb-voyager/cmd/root.go +++ b/yb-voyager/cmd/root.go @@ -23,6 +23,7 @@ import ( "path/filepath" "time" + "github.com/fatih/color" "github.com/google/uuid" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -45,6 +46,7 @@ var ( exportDir string schemaDir string startClean utils.BoolStr + truncateTables utils.BoolStr lockFile *lockfile.Lockfile migrationUUID uuid.UUID perfProfile utils.BoolStr @@ -62,6 +64,7 @@ Refer to docs (https://docs.yugabyte.com/preview/migrate/) for more details like PersistentPreRun: func(cmd *cobra.Command, args []string) { currentCommand = cmd.CommandPath() + if !shouldRunPersistentPreRun(cmd) { return } @@ -86,6 +89,7 @@ Refer to docs (https://docs.yugabyte.com/preview/migrate/) for more details like startTime = time.Now() log.Infof("Start time: %s\n", startTime) + // Initialize the metaDB variable only if the metaDB is already created. For example, resumption of a command. metaDB = initMetaDB(bulkAssessmentDir) if perfProfile { go startPprofServer() @@ -113,11 +117,25 @@ Refer to docs (https://docs.yugabyte.com/preview/migrate/) for more details like startTime = time.Now() log.Infof("Start time: %s\n", startTime) + if shouldRunExportDirInitialisedCheck(cmd) { + checkExportDirInitialised() + } + if callhome.SendDiagnostics { go sendCallhomePayloadAtIntervals() } + + // Initialize the metaDB variable only if the metaDB is already created. For example, resumption of a command. if metaDBIsCreated(exportDir) { metaDB = initMetaDB(exportDir) + msr, err := metaDB.GetMigrationStatusRecord() + if err != nil { + utils.ErrExit("get migration status record: %v", err) + } + + msrVoyagerVersionString := msr.VoyagerVersion + + detectVersionCompatibility(msrVoyagerVersionString, exportDir) } if perfProfile { @@ -145,6 +163,18 @@ Refer to docs (https://docs.yugabyte.com/preview/migrate/) for more details like }, } +func shouldRunExportDirInitialisedCheck(cmd *cobra.Command) bool { + return slices.Contains(exportDirInitialisedCheckNeededList, cmd.CommandPath()) +} + +func checkExportDirInitialised() { + // Check to ensure that this is not the first command in the migration process + isMetaDBPresent := metaDBIsCreated(exportDir) + if !isMetaDBPresent { + utils.ErrExit("Migration has not started yet. Run the commands in the order specified in the documentation: %s", color.BlueString("https://docs.yugabyte.com/preview/yugabyte-voyager/migrate/")) + } +} + func startPprofServer() { // Server for pprof err := http.ListenAndServe("localhost:6060", nil) @@ -160,6 +190,24 @@ func startPprofServer() { */ } +var exportDirInitialisedCheckNeededList = []string{ + "yb-voyager analyze-schema", + "yb-voyager import data", + "yb-voyager import data to target", + "yb-voyager import data to source", + "yb-voyager import data to source-replica", + "yb-voyager import data status", + "yb-voyager export data from target", + "yb-voyager export data status", + "yb-voyager cutover status", + "yb-voyager get data-migration-report", + "yb-voyager archive changes", + "yb-voyager end migration", + "yb-voyager initiate cutover to source", + "yb-voyager initiate cutover to source-replica", + "yb-voyager initiate cutover to target", +} + var noLockNeededList = []string{ "yb-voyager", "yb-voyager version", @@ -271,7 +319,7 @@ func validateExportDirFlag() { utils.ErrExit(`ERROR: required flag "export-dir" not set`) } if !utils.FileOrFolderExists(exportDir) { - utils.ErrExit("export-dir %q doesn't exists.\n", exportDir) + utils.ErrExit("export-dir doesn't exist: %q\n", exportDir) } else { if exportDir == "." { fmt.Println("Note: Using current directory as export-dir") @@ -279,7 +327,7 @@ func validateExportDirFlag() { var err error exportDir, err = filepath.Abs(exportDir) if err != nil { - utils.ErrExit("Failed to get absolute path for export-dir %q: %v\n", exportDir, err) + utils.ErrExit("Failed to get absolute path for export-dir: %q: %v\n", exportDir, err) } exportDir = filepath.Clean(exportDir) } diff --git a/yb-voyager/cmd/templates/migration_assessment_report.template b/yb-voyager/cmd/templates/migration_assessment_report.template index 5e69811908..eeef96253b 100644 --- a/yb-voyager/cmd/templates/migration_assessment_report.template +++ b/yb-voyager/cmd/templates/migration_assessment_report.template @@ -35,7 +35,7 @@ th { background-color: #f2f2f2; } - tr:nth-child(even){background-color: #f9f9f9;} + .formatted_table tr:nth-child(even){background-color: #f9f9f9;} ul { padding-left: 20px; } @@ -59,6 +59,16 @@ .list_item { margin-bottom: 15px; } + pre { + width: 100%; /* Ensure the pre/code content takes full width of the container */ + word-wrap: break-word; /* Break long lines into multiple lines */ + overflow-wrap: break-word; /* Same as word-wrap but for newer browsers */ + white-space: pre-wrap; /* Preserve whitespace and allow wrapping */ + word-break: break-all; /* Prevents long words from overflowing */ + margin: 0; /* Remove default margins */ + padding: 0; /* Remove default padding */ + font-family: monospace; /* Optional: ensure a monospaced font */ + } @@ -78,6 +88,8 @@

Database Version: {{.}}

{{end}} +

Target YB Version: {{.TargetDBVersion}}

+ {{if eq .MigrationComplexity "NOT AVAILABLE"}} {{else}} @@ -89,7 +101,7 @@ - + {{range .SchemaSummary.DBObjects}} @@ -155,11 +167,16 @@ {{ end }} {{end}} + {{if ne .MigrationComplexity "NOT AVAILABLE"}} +

Migration Complexity Explanation

+

{{ .MigrationComplexityExplanation }}

+ {{end}} +

Unsupported Data Types

{{.UnsupportedDataTypesDesc}}

{{ if .UnsupportedDataTypes }}
-
Object TypeTotal CountTotal Objects Object Names
+
@@ -188,6 +205,10 @@ {{ $hasUnsupportedFeatures = true }} {{if .DisplayDDL }}

{{.FeatureName}}

+ {{ $supporterVerStr := getSupportedVersionString .MinimumVersionsFixedIn }} + {{ if $supporterVerStr }} +

Supported in Versions: {{ $supporterVerStr }}

+ {{ end }}
    {{range .Objects}} @@ -197,6 +218,10 @@
{{else}}

{{.FeatureName}}

+ {{ $supporterVerStr := getSupportedVersionString .MinimumVersionsFixedIn }} + {{ if $supporterVerStr }} +

Supported in Versions: {{ $supporterVerStr }}

+ {{ end }}
    {{range .Objects}} @@ -214,23 +239,10 @@

    No unsupported features were present among the ones assessed.

    {{end}} - {{if .Notes}} -
    -
    -
    -

    Notes

    -
      - {{range .Notes}} -
    • {{.}}
    • - {{end}} -
    -
    - {{end}} -

    Unsupported Query Constructs

    {{ if .UnsupportedQueryConstructs}}

    Source database queries not supported in YugabyteDB, identified by scanning system tables:

    -
Schema Table
+
@@ -238,15 +250,18 @@ {{ $currentType := "" }} {{ $docsLink := "" }} + {{ $supporterVerStr := "" }} {{ range $i, $construct := .UnsupportedQueryConstructs }} {{ if ne $construct.ConstructTypeName $currentType }} {{ if ne $currentType "" }} - {{ end }} {{ $docsLink = $construct.DocsLink }} + {{ $supporterVerStr = getSupportedVersionString $construct.MinimumVersionsFixedIn }} @@ -282,6 +301,71 @@

No unsupported query constructs found in the source database for target YugabyteDB.

{{ end }} +

Unsupported PL/pgSQL objects

+ {{ if .UnsupportedPlPgSqlObjects}} +

Source schema objects having unsupported statements in PL/pgSQL code block:

+
Construct Type Queries
+ {{ if $supporterVerStr }} + Supported in Versions: {{ $supporterVerStr }} + {{ end }} {{ if $docsLink }} - Link + Docs Link {{ else }} N/A {{ end }} @@ -254,6 +269,7 @@
{{ $construct.ConstructTypeName }} @@ -269,10 +285,13 @@ + {{ if $supporterVerStr }} + Supported in Versions: {{ $supporterVerStr }}
+ {{ end }} {{ if $docsLink }} - Link + Docs Link {{ else }} - Not Available + N/A {{ end }}
+ + + + + + + + {{ range .UnsupportedPlPgSqlObjects }} + + + {{ $objectsGroupByObjectType := groupByObjectType .Objects }} + {{ $numUniqueObjectNamesOfAllTypes := totalUniqueObjectNamesOfAllTypes $objectsGroupByObjectType }} + {{ $docsLink := .DocsLink }} + {{ $supportedVerStr := getSupportedVersionString .MinimumVersionsFixedIn }} + + {{ $isNextRowRequiredForObjectType := false }} + {{ range $type, $objectsByType := $objectsGroupByObjectType }} + {{ $objectGroupByObjectName := groupByObjectName $objectsByType }} + {{ $numUniqueObjectNames := numKeysInMapStringObjectInfo $objectGroupByObjectName }} + {{ if $isNextRowRequiredForObjectType }} + + {{ end }} + + {{ $isNextRowRequiredForObjectName := false }} + {{ range $name, $objectsByName := $objectGroupByObjectName }} + {{ if $isNextRowRequiredForObjectName }} + + {{ end }} + + + {{ if not $isNextRowRequiredForObjectType }} + + {{ end }} + {{ $isNextRowRequiredForObjectName = true }} + {{ $isNextRowRequiredForObjectType = true }} + + {{ end }} + {{ end }} + {{ end }} +
Feature NameObject typeObject nameStatementDetails
{{ .FeatureName }}
{{ $type }}
{{ $name }} +
+
    + {{ range $objectsByName }} +
  • {{ .SqlStatement }}
  • + {{ end }} +
+
+
+ {{ if $supportedVerStr }} + Supported in Versions: {{ $supportedVerStr }}
+ {{ end }} + {{ if $docsLink }} + Docs Link + {{ else }} + N/A + {{ end }} +
+ {{ else }} +

No unsupported PL/pgSQL objects found in the source database for target YugabyteDB.

+ {{ end }} + {{ if .MigrationCaveats}} {{ $hasMigrationCaveats := false }}

Migration caveats

@@ -290,6 +374,10 @@ {{ $hasMigrationCaveats = true }} {{if .DisplayDDL }}

{{.FeatureName}}

+ {{ $supporterVerStr := getSupportedVersionString .MinimumVersionsFixedIn }} + {{ if $supporterVerStr }} +

Supported in Versions: {{ $supporterVerStr }}

+ {{ end }}

{{.FeatureDescription}}

    @@ -300,6 +388,10 @@
{{else}}

{{.FeatureName}}

+ {{ $supporterVerStr := getSupportedVersionString .MinimumVersionsFixedIn }} + {{ if $supporterVerStr }} +

Supported in Versions: {{ $supporterVerStr }}

+ {{ end }}

{{.FeatureDescription}}

    @@ -319,6 +411,20 @@ {{end}} {{end}} + + {{if .Notes}} +
    +
    +
    +

    Notes

    +
      + {{range .Notes}} +
    • {{.}}
    • + {{end}} +
    +
    + {{end}} +
diff --git a/yb-voyager/cmd/templates/schema_analysis_report.html b/yb-voyager/cmd/templates/schema_analysis_report.html index 8d65af904e..5daff91b06 100644 --- a/yb-voyager/cmd/templates/schema_analysis_report.html +++ b/yb-voyager/cmd/templates/schema_analysis_report.html @@ -44,6 +44,16 @@ font-weight: bold; background-color: #f9f9f9; } + pre { + width: 100%; /* Ensure the pre/code content takes full width of the container */ + word-wrap: break-word; /* Break long lines into multiple lines */ + overflow-wrap: break-word; /* Same as word-wrap but for newer browsers */ + white-space: pre-wrap; /* Preserve whitespace and allow wrapping */ + word-break: break-all; /* Prevents long words from overflowing */ + margin: 0; /* Remove default margins */ + padding: 0; /* Remove default padding */ + font-family: Arial, Helvetica, sans-serif; /* Optional: ensure a monospaced font */ + } @@ -53,23 +63,19 @@

Schema Analysis Report

Migration Information

+ {{if .SchemaSummary.SchemaNames }} {{end}} - {{if eq .MigrationComplexity "NOT AVAILABLE"}} - - {{else}} - - {{end}}
Voyager Version{{ .VoyagerVersion }}
Target DB Version{{ .TargetDBVersion }}
Database Name{{ .SchemaSummary.DBName }}
Schema Name{{ join .SchemaSummary.SchemaNames ", " }}
DB Version{{ .SchemaSummary.DBVersion }}
Migration Complexity: {{ .MigrationComplexity }}

Schema Summary

- + {{ range .SchemaSummary.DBObjects }} {{ if .TotalCount }} @@ -109,8 +115,12 @@

Issue in Object {{ $issue.ObjectType }}

  • Issue Type: {{ $issue.IssueType }}
  • Object Name: {{ $issue.ObjectName }}
  • Reason: {{ $issue.Reason }}
  • -
  • SQL Statement: {{ $issue.SqlStatement }}
  • +
  • SQL Statement:
    {{ $issue.SqlStatement }}
  • File Path: {{ $issue.FilePath }} [Preview]
  • + {{ $supporterVerStr := getSupportedVersionString $issue.MinimumVersionsFixedIn }} + {{ if $supporterVerStr }} +
  • Fixed in Versions: {{ $supporterVerStr }}
  • + {{ end }} {{ if $issue.Suggestion }}
  • Suggestion: {{ $issue.Suggestion }}
  • {{ end }} diff --git a/yb-voyager/cmd/templates/schema_analysis_report.txt b/yb-voyager/cmd/templates/schema_analysis_report.txt index f031f83efd..1c02639f03 100644 --- a/yb-voyager/cmd/templates/schema_analysis_report.txt +++ b/yb-voyager/cmd/templates/schema_analysis_report.txt @@ -9,21 +9,18 @@ Voyager Version : {{ .VoyagerVersion }} Database Name : {{ .SchemaSummary.DBName }} Schema Name(s) : {{ join .SchemaSummary.SchemaNames ", " }} DB Version : {{ .SchemaSummary.DBVersion }} -{{if eq .MigrationComplexity "NOT AVAILABLE"}} -{{else}} -Migration Complexity : {{ .MigrationComplexity }} -{{end}} +Target DB Version : {{ .TargetDBVersion }} Schema Summary --------------- {{ range .SchemaSummary.DBObjects }} -Object Type : {{ .ObjectType }} - - Total Count : {{ .TotalCount }} - - Valid Count : {{ sub .TotalCount .InvalidCount }} - - Invalid Count : {{ .InvalidCount }} - - Object Names : {{ .ObjectNames }}{{ if .Details }} - - Details : {{ .Details }} +Object Type : {{ .ObjectType }} + - Total Objects : {{ .TotalCount }} + - Objects Without Issues : {{ sub .TotalCount .InvalidCount }} + - Objects With Issues : {{ .InvalidCount }} + - Object Names : {{ .ObjectNames }}{{ if .Details }} + - Details : {{ .Details }} {{ end }} {{ end }} @@ -38,6 +35,10 @@ Issues - Suggestion : {{ .Suggestion }} {{ end }}{{ if .GH }} - Github Issue : {{ .GH }}{{ end }}{{ if .DocsLink }} - Docs Link : {{ .DocsLink }}{{ end }} +{{ $supporterVerStr := getSupportedVersionString .MinimumVersionsFixedIn }} +{{ if $supporterVerStr }} + - Fixed in Versions: {{ $supporterVerStr }} +{{ end }} {{ end }}{{ else }} No issues found.{{ end }} diff --git a/yb-voyager/go.mod b/yb-voyager/go.mod index 34b3af391c..7df52aa40d 100644 --- a/yb-voyager/go.mod +++ b/yb-voyager/go.mod @@ -3,7 +3,7 @@ module github.com/yugabyte/yb-voyager/yb-voyager go 1.23.1 require ( - cloud.google.com/go/storage v1.29.0 + cloud.google.com/go/storage v1.38.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 github.com/DATA-DOG/go-sqlmock v1.5.2 @@ -11,50 +11,94 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.18.15 github.com/aws/aws-sdk-go-v2/service/s3 v1.30.5 github.com/davecgh/go-spew v1.1.1 + github.com/deckarep/golang-set/v2 v2.7.0 + github.com/docker/go-connections v0.5.0 github.com/dustin/go-humanize v1.0.1 github.com/fatih/color v1.13.0 github.com/go-sql-driver/mysql v1.7.0 github.com/godror/godror v0.30.2 - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.6.0 github.com/gosuri/uilive v0.0.4 github.com/gosuri/uitable v0.0.4 - github.com/jackc/pgconn v1.13.0 - github.com/jackc/pgx/v4 v4.17.2 + github.com/hashicorp/go-version v1.7.0 + github.com/jackc/pgconn v1.14.3 + github.com/jackc/pgx/v4 v4.18.3 github.com/jackc/pgx/v5 v5.0.3 github.com/mattn/go-sqlite3 v1.14.17 github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2 github.com/mitchellh/go-ps v1.0.0 github.com/nightlyone/lockfile v1.0.0 - github.com/pganalyze/pg_query_go/v5 v5.1.0 + github.com/pganalyze/pg_query_go/v6 v6.0.0 github.com/samber/lo v1.38.1 - github.com/sirupsen/logrus v1.9.0 + github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.6.1 github.com/spf13/viper v1.13.0 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/tebeka/atexit v0.3.0 + github.com/testcontainers/testcontainers-go v0.34.0 + github.com/testcontainers/testcontainers-go/modules/yugabytedb v0.34.0 github.com/vbauerster/mpb/v8 v8.4.0 gocloud.dev v0.29.0 golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa golang.org/x/term v0.24.0 - google.golang.org/api v0.118.0 + google.golang.org/api v0.169.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 + gotest.tools v2.2.0+incompatible ) require ( - github.com/fergusstrange/embedded-postgres v1.29.0 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/containerd/containerd v1.7.18 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cpuguy83/dockercfg v0.3.2 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/docker v27.1.1+incompatible // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/jackc/puddle v1.3.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/lib/pq v1.10.9 // indirect - github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect + github.com/klauspost/compress v1.17.4 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/sys/user v0.1.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.9.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect ) require ( - cloud.google.com/go v0.110.2 // indirect - cloud.google.com/go/compute v1.19.0 // indirect + cloud.google.com/go v0.112.1 // indirect + cloud.google.com/go/compute v1.25.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.13.0 // indirect + cloud.google.com/go/iam v1.1.6 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect @@ -85,23 +129,23 @@ require ( github.com/godror/knownpb v0.1.0 // indirect github.com/golang-jwt/jwt/v4 v4.4.3 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/s2a-go v0.1.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/go-cmp v0.6.0 + github.com/google/s2a-go v0.1.7 // indirect github.com/google/wire v0.5.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.8.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pglogrepl v0.0.0-20231111135425-1627ab1b5780 github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.3.1 // indirect - github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect - github.com/jackc/pgtype v1.12.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.3 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgtype v1.14.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/magiconair/properties v1.8.6 + github.com/magiconair/properties v1.8.7 github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect @@ -119,17 +163,17 @@ require ( github.com/spf13/pflag v1.0.5 github.com/subosito/gotenv v1.4.1 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.23.0 // indirect - golang.org/x/net v0.25.0 // indirect - golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.15.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect - google.golang.org/grpc v1.55.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/grpc v1.64.1 // indirect + google.golang.org/protobuf v1.33.0 gopkg.in/ini.v1 v1.67.0 gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/yb-voyager/go.sum b/yb-voyager/go.sum index e948346735..1c06c76f90 100644 --- a/yb-voyager/go.sum +++ b/yb-voyager/go.sum @@ -39,8 +39,8 @@ cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRY cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= cloud.google.com/go v0.109.0/go.mod h1:2sYycXt75t/CSB5R9M2wPU1tJmire7AQZTPtITcGBVE= -cloud.google.com/go v0.110.2 h1:sdFPBr6xG9/wkBbfhmUz/JmZC7X6LavQgcrVINrKiVA= -cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= @@ -124,8 +124,8 @@ cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARy cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= +cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -218,8 +218,8 @@ cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHD cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iam v0.10.0/go.mod h1:nXAECrMt2qHpF6RZUZseteD6QyanL68reN4OXPw0UWM= -cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= +cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= @@ -240,8 +240,6 @@ cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9 cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.4.0/go.mod h1:eF3Qsw58iX/bkKtVjMTYpH0LRjQ2goDkjkNQTlzq/ZM= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= @@ -369,8 +367,9 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31jI= cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storage v1.38.0 h1:Az68ZRGlnNTpIBbLjSMIV2BDcwwXYlRlQzis0llkpJg= +cloud.google.com/go/storage v1.38.0/go.mod h1:tlUADB0mAb9BgYls9lq+8MGkfzOXuLrnHXlpHmvFJoY= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= @@ -417,8 +416,12 @@ code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd contrib.go.opencensus.io/exporter/aws v0.0.0-20200617204711-c478e41e60e9/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= contrib.go.opencensus.io/exporter/stackdriver v0.13.14/go.mod h1:5pSSGY0Bhuk7waTHuDf4aQ8D2DrhgETRo9fy6k3Xlzc= contrib.go.opencensus.io/integrations/ocsql v0.1.7/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/azure-amqp-common-go/v3 v3.2.3/go.mod h1:7rPmbSfszeovxGfc5fSAXE4ehlXQZHpMja2OtxC2Tas= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v63.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -448,6 +451,7 @@ github.com/Azure/go-amqp v0.17.0/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fw github.com/Azure/go-amqp v0.18.1/go.mod h1:+bg0x3ce5+Q3ahCEXnCsGG3ETpDQe3MEVnOuT2ywPwc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= @@ -494,6 +498,8 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -641,6 +647,8 @@ github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRt github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= @@ -725,6 +733,8 @@ github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTV github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= +github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao= +github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -753,9 +763,13 @@ github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6T github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= @@ -803,12 +817,16 @@ github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= @@ -818,6 +836,8 @@ github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjI github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set/v2 v2.7.0 h1:gIloKvD7yH2oip4VLhsv3JyLLFnC0Y2mlusgcvJYW5k= +github.com/deckarep/golang-set/v2 v2.7.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= @@ -827,6 +847,8 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.78.0/go.mod h1:GBmu8MkjZmNARE7IXRPmkbbnocNN8+uBm0xbEVw2LCs= github.com/digitalocean/godo v1.95.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= @@ -839,13 +861,18 @@ github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.23+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= +github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= @@ -890,8 +917,8 @@ github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYF github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fergusstrange/embedded-postgres v1.29.0 h1:Uv8hdhoiaNMuH0w8UuGXDHr60VoAQPFdgx7Qf3bzXJM= -github.com/fergusstrange/embedded-postgres v1.29.0/go.mod h1:t/MLs0h9ukYM6FSt99R7InCHs1nW0ordoVCcnzmpTYw= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= @@ -938,8 +965,13 @@ github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= @@ -1043,6 +1075,7 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= @@ -1089,11 +1122,12 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -1154,23 +1188,25 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20220318212150-b2ab0324ddda/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.0 h1:3Qm0liEiCErViKERO2Su5wp+9PfMRiuS6XB5FvpKnYQ= -github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.5.0 h1:I7ELFeVBr3yfPIcc8+MWvrjk+3VjbcSzoXm3JVa+jD8= github.com/google/wire v0.5.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -1181,8 +1217,8 @@ github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gax-go/v2 v2.8.0 h1:UBtEZqx1bjXtOQ5BVTkuYghXrr3N4V123VKJK67vJZc= -github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= +github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= @@ -1217,10 +1253,15 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1/go.mod h1:G+WkljZi4mflcqVxYSgvt8MNctRQHjEH8ubKtt1Ka3w= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hanwen/go-fuse/v2 v2.2.0/go.mod h1:B1nGE/6RBFyBRC1RRnf23UpwCdyJ31eukw34oAKukAc= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= @@ -1260,6 +1301,8 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -1316,8 +1359,9 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys= github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pglogrepl v0.0.0-20231111135425-1627ab1b5780 h1:pNK2AKKIRC1MMMvpa6UiNtdtOebpiIloX7q2JZDkfsk= @@ -1335,22 +1379,26 @@ github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvW github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y= github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w= github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E= github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= +github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= +github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= github.com/jackc/pgx/v5 v5.0.3 h1:4flM5ecR/555F0EcnjdaZa6MhBU+nr0QbZIo5vaKjuM= github.com/jackc/pgx/v5 v5.0.3/go.mod h1:JBbvW3Hdw77jKl9uJrEDATUZIFM2VFPzRq4RWIhkF4o= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= @@ -1403,6 +1451,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1430,19 +1480,21 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/linode/linodego v1.4.0/go.mod h1:PVsRxSlOiJyvG4/scTszpmZDTdgS+to3X6eS8pRrWI8= github.com/linode/linodego v1.12.0/go.mod h1:NJlzvlNtdMRRkXb0oN6UWzUkj6t+IBsyveHgZ5Ppjyk= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1520,17 +1572,27 @@ github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -1539,6 +1601,7 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -1606,11 +1669,14 @@ github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1 github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1648,8 +1714,8 @@ github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwb github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pganalyze/pg_query_go/v5 v5.1.0 h1:MlxQqHZnvA3cbRQYyIrjxEjzo560P6MyTgtlaf3pmXg= -github.com/pganalyze/pg_query_go/v5 v5.1.0/go.mod h1:FsglvxidZsVN+Ltw3Ai6nTgPVcK2BPukH3jCDEqc1Ug= +github.com/pganalyze/pg_query_go/v6 v6.0.0 h1:in6RkR/apfqlAtvqgDxd4Y4o87a5Pr8fkKDB4DrDo2c= +github.com/pganalyze/pg_query_go/v6 v6.0.0/go.mod h1:nvTHIuoud6e1SfrUaFwHqT0i4b5Nr+1rPWVds3B5+50= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= @@ -1667,6 +1733,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/alertmanager v0.24.0/go.mod h1:r6fy/D7FRuZh5YbnX6J3MBY0eI4Pb5yPYS7/bPSXXqI= @@ -1759,7 +1827,13 @@ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24 github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.0/go.mod h1:xYtyGBC5Q3kzCNyJg/SjgNpfAa2kvmgA0i5+lQso8x0= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -1775,8 +1849,8 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -1827,6 +1901,8 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1838,8 +1914,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= @@ -1850,7 +1927,15 @@ github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tebeka/atexit v0.3.0 h1:jleL99H7Ywt80oJKR+VWmJNnezcCOG0CuzcN3CIpsdI= github.com/tebeka/atexit v0.3.0/go.mod h1:WJmSUSmMT7WoR7etUOaGBVXk+f5/ZJ+67qwuedq7Fbs= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= +github.com/testcontainers/testcontainers-go v0.34.0 h1:5fbgF0vIN5u+nD3IWabQwRybuB4GY8G2HHgCkbMzMHo= +github.com/testcontainers/testcontainers-go v0.34.0/go.mod h1:6P/kMkQe8yqPHfPWNulFGdFHTD8HB2vLq/231xY2iPQ= +github.com/testcontainers/testcontainers-go/modules/yugabytedb v0.34.0 h1:9wIqSZJwBr4s8Q7R3S+rhe1J2zqHHxH0S1bN17ld+CI= +github.com/testcontainers/testcontainers-go/modules/yugabytedb v0.34.0/go.mod h1:bgHrbdYjpNPSstf8HfxChUxc6XztBCSoqDR0syb1Oeg= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1884,12 +1969,12 @@ github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgk github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= -github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yugabyte/gocql v1.6.0-yb-1 h1:3anNiHsJwKQ8Dn7RdmkTEuIzV1l7e9QJZ8wkOZ87ELg= +github.com/yugabyte/gocql v1.6.0-yb-1/go.mod h1:LAokR6+vevDCrTxk52U7p6ki+4qELu4XU7JUGYa2O2M= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1897,6 +1982,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= @@ -1936,15 +2023,22 @@ go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE= go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.1/go.mod h1:NEu79Xo32iVb+0gVNV8PMd7GoWqnyDXRlj04yFjqz40= @@ -1952,21 +2046,29 @@ go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2/go.mod h1:rqbht/L go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.1/go.mod h1:YJ/JbY5ag/tSQFXzH3mtDmHqzF3aFn3DI/aB1n7pt4w= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2/go.mod h1:5Qn6qvgkMsLDX+sYK64rHb1FPhpn0UtxF+ouX1uhyJE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.6.1/go.mod h1:UJJXJj0rltNIemDMwkOJyggsvyMG9QHfJeFH0HS5JjM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.6.1/go.mod h1:DAKwdo06hFLc0U88O10x4xnb5sc7dDRDqRuiN+io8JE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.2/go.mod h1:GZWSQQky8AgdJj50r1KJm8oiQiIPaAX7uZCFQX9GzC8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= go.opentelemetry.io/otel/sdk v1.11.1/go.mod h1:/l3FE4SupHJ12TduVjUkZtlfFqDCQJlOlithYrdktys= go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rqyLc3SyX9aU= +go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= @@ -1975,11 +2077,15 @@ go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qE go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk= go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.opentelemetry.io/proto/otlp v0.12.1/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -2052,8 +2158,8 @@ golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2192,8 +2298,8 @@ golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmL golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2225,8 +2331,8 @@ golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2327,6 +2433,7 @@ golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2398,6 +2505,9 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2426,8 +2536,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2441,6 +2551,8 @@ golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2538,8 +2650,9 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= @@ -2605,16 +2718,17 @@ google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= -google.golang.org/api v0.118.0 h1:FNfHq9Z2GKULxu7cEhCaB0wWQHg43UpomrrN+24ZRdE= -google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E= +google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= +google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -2751,8 +2865,12 @@ google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -2797,8 +2915,8 @@ google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCD google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/grpc v1.52.1/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= +google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2815,8 +2933,9 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2832,6 +2951,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -2866,9 +2986,12 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/yb-voyager/src/adaptiveparallelism/adaptive_parallelism_test.go b/yb-voyager/src/adaptiveparallelism/adaptive_parallelism_test.go index 7b4438d9e9..139b443f4d 100644 --- a/yb-voyager/src/adaptiveparallelism/adaptive_parallelism_test.go +++ b/yb-voyager/src/adaptiveparallelism/adaptive_parallelism_test.go @@ -1,3 +1,5 @@ +//go:build unit + /* Copyright (c) YugabyteDB, Inc. diff --git a/yb-voyager/src/callhome/diagnostics.go b/yb-voyager/src/callhome/diagnostics.go index 10cfbbbefa..7bf14d6d8b 100644 --- a/yb-voyager/src/callhome/diagnostics.go +++ b/yb-voyager/src/callhome/diagnostics.go @@ -30,6 +30,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" + "github.com/yugabyte/yb-voyager/yb-voyager/src/ybversion" ) // call-home json formats @@ -57,7 +58,7 @@ CREATE TABLE diagnostics ( migration_type TEXT, time_taken_sec int, status TEXT, - host_ip character varying 255 -- set by the callhome service + host_ip character varying (255), -- set in callhome service PRIMARY KEY (migration_uuid, migration_phase, collected_at) ); @@ -76,6 +77,8 @@ type Payload struct { Status string `json:"status"` } +// SHOULD NOT REMOVE THESE (host, db_type, db_version, total_db_size_bytes) FIELDS of SourceDBDetails as parsing these specifically here +// https://github.com/yugabyte/yugabyte-growth/blob/ad5df306c50c05136df77cd6548a1091ae577046/diagnostics_v2/main.py#L549 type SourceDBDetails struct { Host string `json:"host"` //keeping it empty for now, as field is parsed in big query app DBType string `json:"db_type"` @@ -84,6 +87,8 @@ type SourceDBDetails struct { Role string `json:"role,omitempty"` //for differentiating replica details } +// SHOULD NOT REMOVE THESE (host, db_version, node_count, total_cores) FIELDS of TargetDBDetails as parsing these specifically here +// https://github.com/yugabyte/yugabyte-growth/blob/ad5df306c50c05136df77cd6548a1091ae577046/diagnostics_v2/main.py#L556 type TargetDBDetails struct { Host string `json:"host"` DBVersion string `json:"db_version"` @@ -92,26 +97,31 @@ type TargetDBDetails struct { } type UnsupportedFeature struct { - FeatureName string `json:"FeatureName"` - ObjectCount int `json:"ObjectCount"` + FeatureName string `json:"FeatureName"` + Objects []string `json:"Objects,omitempty"` + ObjectCount int `json:"ObjectCount"` + TotalOccurrences int `json:"TotalOccurrences"` } type AssessMigrationPhasePayload struct { - MigrationComplexity string `json:"migration_complexity"` - UnsupportedFeatures string `json:"unsupported_features"` - UnsupportedDatatypes string `json:"unsupported_datatypes"` - UnsupportedQueryConstructs string `json:"unsupported_query_constructs"` - MigrationCaveats string `json:"migration_caveats"` - Error string `json:"error,omitempty"` // Removed it for now, TODO - TableSizingStats string `json:"table_sizing_stats"` - IndexSizingStats string `json:"index_sizing_stats"` - SchemaSummary string `json:"schema_summary"` - SourceConnectivity bool `json:"source_connectivity"` - IopsInterval int64 `json:"iops_interval"` + TargetDBVersion *ybversion.YBVersion `json:"target_db_version"` + MigrationComplexity string `json:"migration_complexity"` + UnsupportedFeatures string `json:"unsupported_features"` + UnsupportedDatatypes string `json:"unsupported_datatypes"` + UnsupportedQueryConstructs string `json:"unsupported_query_constructs"` + MigrationCaveats string `json:"migration_caveats"` + UnsupportedPlPgSqlObjects string `json:"unsupported_plpgsql_objects"` + Error string `json:"error"` + TableSizingStats string `json:"table_sizing_stats"` + IndexSizingStats string `json:"index_sizing_stats"` + SchemaSummary string `json:"schema_summary"` + SourceConnectivity bool `json:"source_connectivity"` + IopsInterval int64 `json:"iops_interval"` } type AssessMigrationBulkPhasePayload struct { - FleetConfigCount int `json:"fleet_config_count"` // Not storing any source info just the count of db configs passed to bulk cmd + FleetConfigCount int `json:"fleet_config_count"` // Not storing any source info just the count of db configs passed to bulk cmd + Error string `json:"error"` } type ObjectSizingStats struct { @@ -123,15 +133,20 @@ type ObjectSizingStats struct { } type ExportSchemaPhasePayload struct { - StartClean bool `json:"start_clean"` - AppliedRecommendations bool `json:"applied_recommendations"` - UseOrafce bool `json:"use_orafce"` - CommentsOnObjects bool `json:"comments_on_objects"` + StartClean bool `json:"start_clean"` + AppliedRecommendations bool `json:"applied_recommendations"` + UseOrafce bool `json:"use_orafce"` + CommentsOnObjects bool `json:"comments_on_objects"` + Error string `json:"error"` } +// SHOULD NOT REMOVE THESE TWO (issues, database_objects) FIELDS of AnalyzePhasePayload as parsing these specifically here +// https://github.com/yugabyte/yugabyte-growth/blob/ad5df306c50c05136df77cd6548a1091ae577046/diagnostics_v2/main.py#L563 type AnalyzePhasePayload struct { - Issues string `json:"issues"` - DatabaseObjects string `json:"database_objects"` + TargetDBVersion *ybversion.YBVersion `json:"target_db_version"` + Issues string `json:"issues"` + DatabaseObjects string `json:"database_objects"` + Error string `json:"error"` } type ExportDataPhasePayload struct { ParallelJobs int64 `json:"parallel_jobs"` @@ -144,16 +159,18 @@ type ExportDataPhasePayload struct { TotalExportedEvents int64 `json:"total_exported_events,omitempty"` EventsExportRate int64 `json:"events_export_rate_3m,omitempty"` LiveWorkflowType string `json:"live_workflow_type,omitempty"` + Error string `json:"error"` } type ImportSchemaPhasePayload struct { - ContinueOnError bool `json:"continue_on_error"` - EnableOrafce bool `json:"enable_orafce"` - IgnoreExist bool `json:"ignore_exist"` - RefreshMviews bool `json:"refresh_mviews"` - ErrorCount int `json:"errors"` // changing it to count of errors only - PostSnapshotImport bool `json:"post_snapshot_import"` - StartClean bool `json:"start_clean"` + ContinueOnError bool `json:"continue_on_error"` + EnableOrafce bool `json:"enable_orafce"` + IgnoreExist bool `json:"ignore_exist"` + RefreshMviews bool `json:"refresh_mviews"` + ErrorCount int `json:"errors"` // changing it to count of errors only + PostSnapshotImport bool `json:"post_snapshot_import"` + StartClean bool `json:"start_clean"` + Error string `json:"error"` } type ImportDataPhasePayload struct { @@ -167,6 +184,7 @@ type ImportDataPhasePayload struct { EventsImportRate int64 `json:"events_import_rate_3m,omitempty"` LiveWorkflowType string `json:"live_workflow_type,omitempty"` EnableUpsert bool `json:"enable_upsert"` + Error string `json:"error"` } type ImportDataFilePhasePayload struct { @@ -176,6 +194,7 @@ type ImportDataFilePhasePayload struct { FileStorageType string `json:"file_storage_type"` StartClean bool `json:"start_clean"` DataFileParameters string `json:"data_file_parameters"` + Error string `json:"error"` } type DataFileParameters struct { @@ -188,10 +207,11 @@ type DataFileParameters struct { } type EndMigrationPhasePayload struct { - BackupDataFiles bool `json:"backup_data_files"` - BackupLogFiles bool `json:"backup_log_files"` - BackupSchemaFiles bool `json:"backup_schema_files"` - SaveMigrationReports bool `json:"save_migration_reports"` + BackupDataFiles bool `json:"backup_data_files"` + BackupLogFiles bool `json:"backup_log_files"` + BackupSchemaFiles bool `json:"backup_schema_files"` + SaveMigrationReports bool `json:"save_migration_reports"` + Error string `json:"error"` } var DoNotStoreFlags = []string{ @@ -271,3 +291,12 @@ func SendPayload(payload *Payload) error { return nil } + +// We want to ensure that no user-specific information is sent to the call-home service. +// Therefore, we only send the segment of the error message before the first ":" as that is the generic error message. +// Note: This is a temporary solution. A better solution would be to have +// properly structured errors and only send the generic error message to callhome. +func SanitizeErrorMsg(errorMsg string) string { + return "" // For now, returning empty string. After thorough testing, we can return the specific error message. + // return strings.Split(errorMsg, ":")[0] +} diff --git a/yb-voyager/src/callhome/diagnostics_test.go b/yb-voyager/src/callhome/diagnostics_test.go new file mode 100644 index 0000000000..b20b6a27de --- /dev/null +++ b/yb-voyager/src/callhome/diagnostics_test.go @@ -0,0 +1,232 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package callhome + +import ( + "reflect" + "testing" + + "github.com/google/uuid" + "github.com/yugabyte/yb-voyager/yb-voyager/src/ybversion" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" +) + +func TestCallhomeStructs(t *testing.T) { + + tests := []struct { + name string + actualType reflect.Type + expectedType interface{} + }{ + { + name: "Validate Payload Struct Definition", + actualType: reflect.TypeOf(Payload{}), + expectedType: struct { + MigrationUUID uuid.UUID `json:"migration_uuid"` + PhaseStartTime string `json:"phase_start_time"` + CollectedAt string `json:"collected_at"` + SourceDBDetails string `json:"source_db_details"` + TargetDBDetails string `json:"target_db_details"` + YBVoyagerVersion string `json:"yb_voyager_version"` + MigrationPhase string `json:"migration_phase"` + PhasePayload string `json:"phase_payload"` + MigrationType string `json:"migration_type"` + TimeTakenSec int `json:"time_taken_sec"` + Status string `json:"status"` + }{}, + }, + { + name: "Validate SourceDBDetails Struct Definition", + actualType: reflect.TypeOf(SourceDBDetails{}), + expectedType: struct { + Host string `json:"host"` + DBType string `json:"db_type"` + DBVersion string `json:"db_version"` + DBSize int64 `json:"total_db_size_bytes"` + Role string `json:"role,omitempty"` + }{}, + }, + { + name: "Validate TargetDBDetails Struct Definition", + actualType: reflect.TypeOf(TargetDBDetails{}), + expectedType: struct { + Host string `json:"host"` + DBVersion string `json:"db_version"` + NodeCount int `json:"node_count"` + Cores int `json:"total_cores"` + }{}, + }, + { + name: "Validate UnsupportedFeature Struct Definition", + actualType: reflect.TypeOf(UnsupportedFeature{}), + expectedType: struct { + FeatureName string `json:"FeatureName"` + Objects []string `json:"Objects,omitempty"` + ObjectCount int `json:"ObjectCount"` + TotalOccurrences int `json:"TotalOccurrences"` + }{}, + }, + { + name: "Validate AssessMigrationPhasePayload Struct Definition", + actualType: reflect.TypeOf(AssessMigrationPhasePayload{}), + expectedType: struct { + TargetDBVersion *ybversion.YBVersion `json:"target_db_version"` + MigrationComplexity string `json:"migration_complexity"` + UnsupportedFeatures string `json:"unsupported_features"` + UnsupportedDatatypes string `json:"unsupported_datatypes"` + UnsupportedQueryConstructs string `json:"unsupported_query_constructs"` + MigrationCaveats string `json:"migration_caveats"` + UnsupportedPlPgSqlObjects string `json:"unsupported_plpgsql_objects"` + Error string `json:"error"` + TableSizingStats string `json:"table_sizing_stats"` + IndexSizingStats string `json:"index_sizing_stats"` + SchemaSummary string `json:"schema_summary"` + SourceConnectivity bool `json:"source_connectivity"` + IopsInterval int64 `json:"iops_interval"` + }{}, + }, + { + name: "Validate AssessMigrationBulkPhasePayload Struct Definition", + actualType: reflect.TypeOf(AssessMigrationBulkPhasePayload{}), + expectedType: struct { + FleetConfigCount int `json:"fleet_config_count"` + Error string `json:"error"` + }{}, + }, + { + name: "Validate ObjectSizingStats Struct Definition", + actualType: reflect.TypeOf(ObjectSizingStats{}), + expectedType: struct { + SchemaName string `json:"schema_name,omitempty"` + ObjectName string `json:"object_name"` + ReadsPerSecond int64 `json:"reads_per_second"` + WritesPerSecond int64 `json:"writes_per_second"` + SizeInBytes int64 `json:"size_in_bytes"` + }{}, + }, + { + name: "Validate ExportSchemaPhasePayload Struct Definition", + actualType: reflect.TypeOf(ExportSchemaPhasePayload{}), + expectedType: struct { + StartClean bool `json:"start_clean"` + AppliedRecommendations bool `json:"applied_recommendations"` + UseOrafce bool `json:"use_orafce"` + CommentsOnObjects bool `json:"comments_on_objects"` + Error string `json:"error"` + }{}, + }, + { + name: "Validate AnalyzePhasePayload Struct Definition", + actualType: reflect.TypeOf(AnalyzePhasePayload{}), + expectedType: struct { + TargetDBVersion *ybversion.YBVersion `json:"target_db_version"` + Issues string `json:"issues"` + DatabaseObjects string `json:"database_objects"` + Error string `json:"error"` + }{}, + }, + { + name: "Validate ExportDataPhasePayload Struct Definition", + actualType: reflect.TypeOf(ExportDataPhasePayload{}), + expectedType: struct { + ParallelJobs int64 `json:"parallel_jobs"` + TotalRows int64 `json:"total_rows_exported"` + LargestTableRows int64 `json:"largest_table_rows_exported"` + StartClean bool `json:"start_clean"` + ExportSnapshotMechanism string `json:"export_snapshot_mechanism,omitempty"` + Phase string `json:"phase,omitempty"` + TotalExportedEvents int64 `json:"total_exported_events,omitempty"` + EventsExportRate int64 `json:"events_export_rate_3m,omitempty"` + LiveWorkflowType string `json:"live_workflow_type,omitempty"` + Error string `json:"error"` + }{}, + }, + { + name: "Validate ImportSchemaPhasePayload Struct Definition", + actualType: reflect.TypeOf(ImportSchemaPhasePayload{}), + expectedType: struct { + ContinueOnError bool `json:"continue_on_error"` + EnableOrafce bool `json:"enable_orafce"` + IgnoreExist bool `json:"ignore_exist"` + RefreshMviews bool `json:"refresh_mviews"` + ErrorCount int `json:"errors"` + PostSnapshotImport bool `json:"post_snapshot_import"` + StartClean bool `json:"start_clean"` + Error string `json:"error"` + }{}, + }, + { + name: "Validate ImportDataPhasePayload Struct Definition", + actualType: reflect.TypeOf(ImportDataPhasePayload{}), + expectedType: struct { + ParallelJobs int64 `json:"parallel_jobs"` + TotalRows int64 `json:"total_rows_imported"` + LargestTableRows int64 `json:"largest_table_rows_imported"` + StartClean bool `json:"start_clean"` + Phase string `json:"phase,omitempty"` + TotalImportedEvents int64 `json:"total_imported_events,omitempty"` + EventsImportRate int64 `json:"events_import_rate_3m,omitempty"` + LiveWorkflowType string `json:"live_workflow_type,omitempty"` + EnableUpsert bool `json:"enable_upsert"` + Error string `json:"error"` + }{}, + }, + { + name: "Validate ImportDataFilePhasePayload Struct Definition", + actualType: reflect.TypeOf(ImportDataFilePhasePayload{}), + expectedType: struct { + ParallelJobs int64 `json:"parallel_jobs"` + TotalSize int64 `json:"total_size_imported"` + LargestTableSize int64 `json:"largest_table_size_imported"` + FileStorageType string `json:"file_storage_type"` + StartClean bool `json:"start_clean"` + DataFileParameters string `json:"data_file_parameters"` + Error string `json:"error"` + }{}, + }, + { + name: "Validate DataFileParameters Struct Definition", + actualType: reflect.TypeOf(DataFileParameters{}), + expectedType: struct { + FileFormat string `json:"FileFormat"` + Delimiter string `json:"Delimiter"` + HasHeader bool `json:"HasHeader"` + QuoteChar string `json:"QuoteChar,omitempty"` + EscapeChar string `json:"EscapeChar,omitempty"` + NullString string `json:"NullString,omitempty"` + }{}, + }, + { + name: "Validate EndMigrationPhasePayload Struct Definition", + actualType: reflect.TypeOf(EndMigrationPhasePayload{}), + expectedType: struct { + BackupDataFiles bool `json:"backup_data_files"` + BackupLogFiles bool `json:"backup_log_files"` + BackupSchemaFiles bool `json:"backup_schema_files"` + SaveMigrationReports bool `json:"save_migration_reports"` + Error string `json:"error"` + }{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testutils.CompareStructs(t, tt.actualType, reflect.TypeOf(tt.expectedType), tt.name) + }) + } +} diff --git a/yb-voyager/src/constants/constants.go b/yb-voyager/src/constants/constants.go new file mode 100644 index 0000000000..c9104002ee --- /dev/null +++ b/yb-voyager/src/constants/constants.go @@ -0,0 +1,47 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package constants + +const ( + // Database Object types + TABLE = "table" + FUNCTION = "function" + COLUMN = "column" + + // Source DB Types + YUGABYTEDB = "yugabytedb" + POSTGRESQL = "postgresql" + ORACLE = "oracle" + MYSQL = "mysql" + + // AssessmentIssue Categoes - used by YugabyteD payload and Migration Complexity Explainability + // TODO: soon to be renamed as SCHEMA, SCHEMA_PLPGSQL, DML_QUERY, MIGRATION_CAVEAT, "DATATYPE" + FEATURE = "feature" + DATATYPE = "datatype" + QUERY_CONSTRUCT = "query_construct" + MIGRATION_CAVEATS = "migration_caveats" + PLPGSQL_OBJECT = "plpgsql_object" + + // constants for the Impact Buckets + IMPACT_LEVEL_1 = "LEVEL_1" // Represents minimal impact like only the schema ddl + IMPACT_LEVEL_2 = "LEVEL_2" // Represents moderate impact like dml queries which might impact a lot of implementation/assumption in app layer + IMPACT_LEVEL_3 = "LEVEL_3" // Represent significant impact like TABLE INHERITANCE, which doesn't have any simple workaround but can impact multiple objects/apps + + // constants for migration complexity + MIGRATION_COMPLEXITY_LOW = "LOW" + MIGRATION_COMPLEXITY_MEDIUM = "MEDIUM" + MIGRATION_COMPLEXITY_HIGH = "HIGH" +) diff --git a/yb-voyager/src/cp/yugabyted/yugabyted_test.go b/yb-voyager/src/cp/yugabyted/yugabyted_test.go new file mode 100644 index 0000000000..8e94839d06 --- /dev/null +++ b/yb-voyager/src/cp/yugabyted/yugabyted_test.go @@ -0,0 +1,195 @@ +//go:build integration + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package yugabyted + +import ( + "context" + "database/sql" + "fmt" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/google/uuid" + "github.com/jackc/pgx/v4/pgxpool" + _ "github.com/jackc/pgx/v5/stdlib" + "github.com/stretchr/testify/assert" + controlPlane "github.com/yugabyte/yb-voyager/yb-voyager/src/cp" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" + testcontainers "github.com/yugabyte/yb-voyager/yb-voyager/test/containers" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" +) + +func TestYugabyteDTableSchema(t *testing.T) { + ctx := context.Background() + + yugabyteDBContainer := testcontainers.NewTestContainer("yugabytedb", nil) + err := yugabyteDBContainer.Start(ctx) + if err != nil { + utils.ErrExit("Failed to start yugabytedb container: %v", err) + } + defer testcontainers.TerminateAllContainers() + assert.NoError(t, err, "Failed to start YugabyteDB container") + + // Connect to the database + dsn := yugabyteDBContainer.GetConnectionString() + db, err := sql.Open("pgx", dsn) + assert.NoError(t, err) + defer db.Close() + + // Wait for the database to be ready + err = testutils.WaitForDBToBeReady(db) + assert.NoError(t, err) + // Export the database connection string to env variable YUGABYTED_DB_CONN_STRING + err = os.Setenv("YUGABYTED_DB_CONN_STRING", dsn) + + exportDir := filepath.Join(os.TempDir(), "yugabyted") + + // Create a temporary export directory for testing + err = os.MkdirAll(exportDir, 0755) + assert.NoError(t, err, "Failed to create temporary export directory") + // Ensure the directory is removed after the test + defer func() { + err := os.RemoveAll(exportDir) + assert.NoError(t, err, "Failed to remove temporary export directory") + }() + + controlPlane := New(exportDir) + controlPlane.eventChan = make(chan MigrationEvent, 100) + controlPlane.rowCountUpdateEventChan = make(chan []VisualizerTableMetrics, 200) + + err = controlPlane.connect() + assert.NoError(t, err, "Failed to connect to YugabyteDB") + + err = controlPlane.setupDatabase() + assert.NoError(t, err, "Failed to setup YugabyteDB database") + + expectedTables := map[string]map[string]testutils.ColumnPropertiesPG{ + QUALIFIED_YUGABYTED_METADATA_TABLE_NAME: { + "migration_uuid": {Type: "uuid", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "migration_phase": {Type: "integer", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "invocation_sequence": {Type: "integer", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "migration_dir": {Type: "character varying", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "database_name": {Type: "character varying", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "schema_name": {Type: "character varying", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "payload": {Type: "text", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "complexity": {Type: "character varying", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "db_type": {Type: "character varying", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "status": {Type: "character varying", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "invocation_timestamp": {Type: "timestamp with time zone", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "host_ip": {Type: "character varying", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "port": {Type: "integer", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "db_version": {Type: "character varying", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "voyager_info": {Type: "character varying", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + }, + YUGABYTED_TABLE_METRICS_TABLE_NAME: { + "migration_uuid": {Type: "uuid", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "table_name": {Type: "character varying", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "schema_name": {Type: "character varying", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "migration_phase": {Type: "integer", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "status": {Type: "integer", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "count_live_rows": {Type: "integer", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "count_total_rows": {Type: "integer", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "invocation_timestamp": {Type: "timestamp with time zone", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + }, + } + + // Validate the schema and tables + t.Run("Check all the expected tables and no extra tables", func(t *testing.T) { + testutils.CheckTableExistencePG(t, db, VISUALIZER_METADATA_SCHEMA, expectedTables) + }) + + // Validate columns for each table + for tableName, expectedColumns := range expectedTables { + t.Run(fmt.Sprintf("Check columns for %s table", tableName), func(t *testing.T) { + table := strings.Split(tableName, ".")[1] + testutils.CheckTableStructurePG(t, db, VISUALIZER_METADATA_SCHEMA, table, expectedColumns) + }) + } +} + +func TestYugabyteDStructs(t *testing.T) { + // Test the structs used in YugabyteD + + expectedVoyagerInstance := struct { + IP string + OperatingSystem string + DiskSpaceAvailable uint64 + ExportDirectory string + }{} + + t.Run("Validate VoyagerInstance Struct Definition", func(t *testing.T) { + testutils.CompareStructs(t, reflect.TypeOf(controlPlane.VoyagerInstance{}), reflect.TypeOf(expectedVoyagerInstance), "VoyagerInstance") + }) + + expectedMigrationEvent := struct { + MigrationUUID uuid.UUID `json:"migration_uuid"` + MigrationPhase int `json:"migration_phase"` + InvocationSequence int `json:"invocation_sequence"` + MigrationDirectory string `json:"migration_dir"` + DatabaseName string `json:"database_name"` + SchemaName string `json:"schema_name"` + DBIP string `json:"db_ip"` + Port int `json:"port"` + DBVersion string `json:"db_version"` + Payload string `json:"payload"` + VoyagerInfo string `json:"voyager_info"` + DBType string `json:"db_type"` + Status string `json:"status"` + InvocationTimestamp string `json:"invocation_timestamp"` + }{} + + t.Run("Validate MigrationEvent Struct Definition", func(t *testing.T) { + testutils.CompareStructs(t, reflect.TypeOf(MigrationEvent{}), reflect.TypeOf(expectedMigrationEvent), "MigrationEvent") + }) + + expectedVisualizerTableMetrics := struct { + MigrationUUID uuid.UUID `json:"migration_uuid"` + TableName string `json:"table_name"` + Schema string `json:"schema_name"` + MigrationPhase int `json:"migration_phase"` + Status int `json:"status"` + CountLiveRows int64 `json:"count_live_rows"` + CountTotalRows int64 `json:"count_total_rows"` + InvocationTimestamp string `json:"invocation_timestamp"` + }{} + + t.Run("Validate VisualizerTableMetrics Struct Definition", func(t *testing.T) { + testutils.CompareStructs(t, reflect.TypeOf(VisualizerTableMetrics{}), reflect.TypeOf(expectedVisualizerTableMetrics), "VisualizerTableMetrics") + }) + + expectedYugabyteD := struct { + sync.Mutex + migrationDirectory string + voyagerInfo *controlPlane.VoyagerInstance + waitGroup sync.WaitGroup + eventChan chan (MigrationEvent) + rowCountUpdateEventChan chan ([]VisualizerTableMetrics) + connPool *pgxpool.Pool + lastRowCountUpdate map[string]time.Time + latestInvocationSequence int + }{} + + t.Run("Validate YugabyteD Struct Definition", func(t *testing.T) { + testutils.CompareStructs(t, reflect.TypeOf(&YugabyteD{}).Elem(), reflect.TypeOf(&expectedYugabyteD).Elem(), "YugabyteD") + }) +} diff --git a/yb-voyager/src/datafile/descriptor_test.go b/yb-voyager/src/datafile/descriptor_test.go new file mode 100644 index 0000000000..7c8b5eace3 --- /dev/null +++ b/yb-voyager/src/datafile/descriptor_test.go @@ -0,0 +1,129 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datafile + +import ( + "os" + "path/filepath" + "reflect" + "testing" + + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" +) + +func TestDescriptorStructs(t *testing.T) { + // Define the expected structure for FileEntry + expectedFileEntry := struct { + FilePath string `json:"FilePath"` + TableName string `json:"TableName"` + RowCount int64 `json:"RowCount"` + FileSize int64 `json:"FileSize"` + }{} + + // Define the expected structure for Descriptor + expectedDescriptor := struct { + FileFormat string `json:"FileFormat"` + Delimiter string `json:"Delimiter"` + HasHeader bool `json:"HasHeader"` + ExportDir string `json:"-"` + QuoteChar byte `json:"QuoteChar,omitempty"` + EscapeChar byte `json:"EscapeChar,omitempty"` + NullString string `json:"NullString,omitempty"` + DataFileList []*FileEntry `json:"FileList"` + TableNameToExportedColumns map[string][]string `json:"TableNameToExportedColumns"` + }{} + + t.Run("Validate FileEntry Struct Definition", func(t *testing.T) { + testutils.CompareStructs(t, reflect.TypeOf(FileEntry{}), reflect.TypeOf(expectedFileEntry), "FileEntry") + }) + + t.Run("Validate Descriptor Struct Definition", func(t *testing.T) { + testutils.CompareStructs(t, reflect.TypeOf(Descriptor{}), reflect.TypeOf(expectedDescriptor), "Descriptor") + }) +} + +func TestDescriptorJson(t *testing.T) { + // Set up the temporary export directory + exportDir := filepath.Join(os.TempDir(), "descriptor_test") + outputFilePath := filepath.Join(exportDir, DESCRIPTOR_PATH) + + // Create a sample Descriptor instance + descriptor := Descriptor{ + FileFormat: "csv", + Delimiter: ",", + HasHeader: true, + ExportDir: exportDir, + QuoteChar: '"', + EscapeChar: '\\', + NullString: "NULL", + DataFileList: []*FileEntry{ + { + FilePath: "file.csv", // Use relative path for testing absolute path handling. + TableName: "public.my_table", + RowCount: 100, + FileSize: 2048, + }, + }, + TableNameToExportedColumns: map[string][]string{ + "public.my_table": {"id", "name", "age"}, + }, + } + + // Ensure the export directory exists + if err := os.MkdirAll(filepath.Join(exportDir, "metainfo"), 0755); err != nil { + t.Fatalf("Failed to create export directory: %v", err) + } + + // Clean up the export directory + defer func() { + if err := os.RemoveAll(exportDir); err != nil { + t.Fatalf("Failed to remove export directory: %v", err) + } + }() + + // Save the Descriptor to JSON + descriptor.Save() + + expectedJSON := `{ + "FileFormat": "csv", + "Delimiter": ",", + "HasHeader": true, + "QuoteChar": 34, + "EscapeChar": 92, + "NullString": "NULL", + "FileList": [ + { + "FilePath": "file.csv", + "TableName": "public.my_table", + "RowCount": 100, + "FileSize": 2048 + } + ], + "TableNameToExportedColumns": { + "public.my_table": [ + "id", + "name", + "age" + ] + } +}` + + // Compare the output JSON with the expected JSON + testutils.CompareJson(t, outputFilePath, expectedJSON, exportDir) +} diff --git a/yb-voyager/src/datastore/azDatastore.go b/yb-voyager/src/datastore/azDatastore.go index dbd06e7791..a2c1b432f5 100644 --- a/yb-voyager/src/datastore/azDatastore.go +++ b/yb-voyager/src/datastore/azDatastore.go @@ -35,7 +35,7 @@ type AzDataStore struct { func NewAzDataStore(dataDir string) *AzDataStore { url, err := url.Parse(dataDir) if err != nil { - utils.ErrExit("invalid azure resource URL %v", dataDir) + utils.ErrExit("invalid azure resource URL: %v", dataDir) } return &AzDataStore{url: url} } @@ -80,7 +80,7 @@ func (ds *AzDataStore) Open(objectPath string) (io.ReadCloser, error) { // if objectPath is hidden underneath a symlink for az blobs... objectPath, err := os.Readlink(objectPath) if err != nil { - utils.ErrExit("unable to resolve symlink %v to gcs resource: %w", objectPath, err) + utils.ErrExit("unable to resolve symlink: %v to gcs resource: %w", objectPath, err) } return az.NewObjectReader(objectPath) } diff --git a/yb-voyager/src/datastore/gcsDatastore.go b/yb-voyager/src/datastore/gcsDatastore.go index 312843e756..5e6bdbe7ef 100644 --- a/yb-voyager/src/datastore/gcsDatastore.go +++ b/yb-voyager/src/datastore/gcsDatastore.go @@ -17,12 +17,12 @@ limitations under the License. package datastore import ( + "fmt" "io" "net/url" "os" "regexp" "strings" - "fmt" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/gcs" @@ -36,7 +36,7 @@ type GCSDataStore struct { func NewGCSDataStore(resourceName string) *GCSDataStore { url, err := url.Parse(resourceName) if err != nil { - utils.ErrExit("invalid gcs resource URL %v", resourceName) + utils.ErrExit("invalid gcs resource URL: %v", resourceName) } return &GCSDataStore{url: url, bucketName: url.Host} } @@ -48,13 +48,13 @@ func (ds *GCSDataStore) Glob(pattern string) ([]string, error) { return nil, fmt.Errorf("listing all objects of %q: %w", pattern, err) } pattern = strings.Replace(pattern, "*", ".*", -1) - pattern = ds.url.String() + "/" + pattern + pattern = ds.url.String() + "/" + pattern re := regexp.MustCompile(pattern) var resultSet []string for _, objectName := range objectNames { objectName = ds.url.String() + "/" + objectName if re.MatchString(objectName) { - resultSet = append(resultSet, objectName) + resultSet = append(resultSet, objectName) } } return resultSet, nil @@ -68,7 +68,7 @@ func (ds *GCSDataStore) AbsolutePath(filePath string) (string, error) { func (ds *GCSDataStore) FileSize(filePath string) (int64, error) { objAttrs, err := gcs.GetObjAttrs(filePath) if err != nil { - return 0, fmt.Errorf("get attributes of %q: %w",filePath, err) + return 0, fmt.Errorf("get attributes of %q: %w", filePath, err) } return objAttrs.Size, nil } @@ -80,7 +80,7 @@ func (ds *GCSDataStore) Open(resourceName string) (io.ReadCloser, error) { // if resourceName is hidden underneath a symlink for gcs objects... objectPath, err := os.Readlink(resourceName) if err != nil { - utils.ErrExit("unable to resolve symlink %v to gcs resource: %w", resourceName, err) + utils.ErrExit("unable to resolve symlink: %v to gcs resource: %w", resourceName, err) } return gcs.NewObjectReader(objectPath) } diff --git a/yb-voyager/src/datastore/localDatastore.go b/yb-voyager/src/datastore/localDatastore.go index 723ce454d7..55ba4aad44 100644 --- a/yb-voyager/src/datastore/localDatastore.go +++ b/yb-voyager/src/datastore/localDatastore.go @@ -31,7 +31,7 @@ type LocalDataStore struct { func NewLocalDataStore(dataDir string) *LocalDataStore { dataDir, err := filepath.Abs(dataDir) if err != nil { - utils.ErrExit("failed to get absolute path of directory %q: %s", dataDir, err) + utils.ErrExit("failed to get absolute path of directory: %q: %s", dataDir, err) } dataDir = filepath.Clean(dataDir) return &LocalDataStore{dataDir: dataDir} diff --git a/yb-voyager/src/datastore/s3Datastore.go b/yb-voyager/src/datastore/s3Datastore.go index 60fb280539..909e3a34e7 100644 --- a/yb-voyager/src/datastore/s3Datastore.go +++ b/yb-voyager/src/datastore/s3Datastore.go @@ -17,10 +17,10 @@ limitations under the License. package datastore import ( + "fmt" "io" "net/url" "os" - "fmt" "regexp" "strings" @@ -36,7 +36,7 @@ type S3DataStore struct { func NewS3DataStore(resourceName string) *S3DataStore { url, err := url.Parse(resourceName) if err != nil { - utils.ErrExit("invalid s3 resource URL %v", resourceName) + utils.ErrExit("invalid s3 resource URL: %v", resourceName) } return &S3DataStore{url: url, bucketName: url.Host} } @@ -80,7 +80,7 @@ func (ds *S3DataStore) Open(resourceName string) (io.ReadCloser, error) { // if resourceName is hidden underneath a symlink for s3 objects... objectPath, err := os.Readlink(resourceName) if err != nil { - utils.ErrExit("unable to resolve symlink %v to s3 resource: %w", resourceName, err) + utils.ErrExit("unable to resolve symlink to s3 resource: %v: %w", resourceName, err) } return s3.NewObjectReader(objectPath) } diff --git a/yb-voyager/src/dbzm/config.go b/yb-voyager/src/dbzm/config.go index 9a12d484eb..b8b2909e6f 100644 --- a/yb-voyager/src/dbzm/config.go +++ b/yb-voyager/src/dbzm/config.go @@ -81,6 +81,7 @@ type Config struct { var baseConfigTemplate = ` debezium.format.value=connect debezium.format.key=connect +quarkus.http.port=%d quarkus.log.console.json=false quarkus.log.level=%s ` @@ -295,10 +296,19 @@ func (c *Config) String() string { } else { log.Infof("QUEUE_SEGMENT_MAX_BYTES: %d", queueSegmentMaxBytes) } + + quarkusLogPort, err := utils.GetFreePort() + if err != nil { + log.Warnf("failed to get a free port for quarkus http server, falling back to 8080: %v", err) + quarkusLogPort = 8080 + } + log.Infof("using port number %d for quarkus http server", quarkusLogPort) + var conf string switch c.SourceDBType { case "postgresql": conf = fmt.Sprintf(postgresConfigTemplate, + quarkusLogPort, c.LogLevel, c.Username, c.SnapshotMode, @@ -332,6 +342,7 @@ func (c *Config) String() string { case "yugabytedb": if !c.UseYBgRPCConnector { conf = fmt.Sprintf(yugabyteLogicalReplicationConfigTemplate, + quarkusLogPort, c.LogLevel, c.Username, "never", @@ -359,6 +370,7 @@ func (c *Config) String() string { } } else { conf = fmt.Sprintf(yugabyteConfigTemplate, + quarkusLogPort, c.LogLevel, c.Username, "never", @@ -391,6 +403,7 @@ func (c *Config) String() string { } case "oracle": conf = fmt.Sprintf(oracleConfigTemplate, + quarkusLogPort, c.LogLevel, c.Username, c.SnapshotMode, @@ -422,6 +435,7 @@ func (c *Config) String() string { case "mysql": conf = fmt.Sprintf(mysqlConfigTemplate, + quarkusLogPort, c.LogLevel, c.Username, c.SnapshotMode, diff --git a/yb-voyager/src/dbzm/status.go b/yb-voyager/src/dbzm/status.go index d176272498..5e0c0d2838 100644 --- a/yb-voyager/src/dbzm/status.go +++ b/yb-voyager/src/dbzm/status.go @@ -82,7 +82,7 @@ func IsMigrationInStreamingMode(exportDir string) bool { statusFilePath := filepath.Join(exportDir, "data", "export_status.json") status, err := ReadExportStatus(statusFilePath) if err != nil { - utils.ErrExit("Failed to read export status file %s: %v", statusFilePath, err) + utils.ErrExit("Failed to read export status file: %s: %v", statusFilePath, err) } return status != nil && status.Mode == MODE_STREAMING } diff --git a/yb-voyager/src/dbzm/status_test.go b/yb-voyager/src/dbzm/status_test.go new file mode 100644 index 0000000000..cfd1f4deaf --- /dev/null +++ b/yb-voyager/src/dbzm/status_test.go @@ -0,0 +1,67 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package dbzm + +import ( + "reflect" + "testing" + + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" +) + +func TestExportStatusStructs(t *testing.T) { + test := []struct { + name string + actualType reflect.Type + expectedType interface{} + }{ + { + name: "Validate TableExportStatus Struct Definition", + actualType: reflect.TypeOf(TableExportStatus{}), + expectedType: struct { + Sno int `json:"sno"` + DatabaseName string `json:"database_name"` + SchemaName string `json:"schema_name"` + TableName string `json:"table_name"` + FileName string `json:"file_name"` + ExportedRowCountSnapshot int64 `json:"exported_row_count_snapshot"` + }{}, + }, + { + name: "Validate ExportStatus Struct Definition", + actualType: reflect.TypeOf(ExportStatus{}), + expectedType: struct { + Mode string `json:"mode"` + Tables []TableExportStatus `json:"tables"` + Sequences map[string]int64 `json:"sequences"` + }{}, + }, + } + + for _, tt := range test { + t.Run(tt.name, func(t *testing.T) { + testutils.CompareStructs(t, tt.actualType, reflect.TypeOf(tt.expectedType), tt.name) + }) + } +} + +// TODO: Implement this test +// The export status json file is created by debezium and currently we dont have infrastructure to test it. +// To test this we need to create a json file (using dbzm code) and read it back (here) and compare the values. +// func TestReadExportStatus(t *testing.T) { +//} diff --git a/yb-voyager/src/issue/constants.go b/yb-voyager/src/issue/constants.go deleted file mode 100644 index fdd94c0ea2..0000000000 --- a/yb-voyager/src/issue/constants.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright (c) YugabyteDB, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package issue - -// Types -const ( - ADVISORY_LOCKS = "ADVISORY_LOCKS" - SYSTEM_COLUMNS = "SYSTEM_COLUMNS" - XML_FUNCTIONS = "XML_FUNCTIONS" -) - -// Object types -const ( - TABLE_OBJECT_TYPE = "TABLE" - FUNCTION_OBJECT_TYPE = "FUNCTION" - DML_QUERY_OBJECT_TYPE = "DML_QUERY" -) diff --git a/yb-voyager/src/issue/dml.go b/yb-voyager/src/issue/dml.go deleted file mode 100644 index 1479ff9370..0000000000 --- a/yb-voyager/src/issue/dml.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright (c) YugabyteDB, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package issue - -var advisoryLocksIssue = Issue{ - Type: ADVISORY_LOCKS, - TypeName: "Advisory Locks", - TypeDescription: "", - Suggestion: "", - GH: "", - DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", -} - -func NewAdvisoryLocksIssue(objectType string, objectName string, sqlStatement string) IssueInstance { - return newIssueInstance(advisoryLocksIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) -} - -var systemColumnsIssue = Issue{ - Type: SYSTEM_COLUMNS, - TypeName: "System Columns", - TypeDescription: "", - Suggestion: "", - GH: "", - DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#system-columns-is-not-yet-supported", -} - -func NewSystemColumnsIssue(objectType string, objectName string, sqlStatement string) IssueInstance { - return newIssueInstance(systemColumnsIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) -} - -var xmlFunctionsIssue = Issue{ - Type: XML_FUNCTIONS, - TypeName: "XML Functions", - TypeDescription: "", - Suggestion: "", - GH: "", - DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", -} - -func NewXmlFunctionsIssue(objectType string, objectName string, sqlStatement string) IssueInstance { - return newIssueInstance(xmlFunctionsIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) -} diff --git a/yb-voyager/src/issue/issue.go b/yb-voyager/src/issue/issue.go index 2e43ebbada..47bf663fbf 100644 --- a/yb-voyager/src/issue/issue.go +++ b/yb-voyager/src/issue/issue.go @@ -16,29 +16,61 @@ limitations under the License. package issue +import ( + "github.com/yugabyte/yb-voyager/yb-voyager/src/ybversion" +) + type Issue struct { - Type string // (advisory_locks, index_not_supported, etc) - TypeName string // for display - TypeDescription string - Suggestion string - GH string - DocsLink string + Type string // (advisory_locks, index_not_supported, etc) + Name string // for display + Description string + Impact string + Suggestion string + GH string + DocsLink string + MinimumVersionsFixedIn map[string]*ybversion.YBVersion // key: series (2024.1, 2.21, etc) } -type IssueInstance struct { - Issue - ObjectType string // TABLE, FUNCTION, DML_QUERY? - ObjectName string // table name/function name/etc - SqlStatement string - Details map[string]interface{} // additional details about the issue +func (i Issue) IsFixedIn(v *ybversion.YBVersion) (bool, error) { + if i.MinimumVersionsFixedIn == nil { + return false, nil + } + minVersionFixedInSeries, ok := i.MinimumVersionsFixedIn[v.Series()] + if !ok { + return false, nil + } + return v.GreaterThanOrEqual(minVersionFixedInSeries), nil } -func newIssueInstance(issue Issue, objectType string, objectName string, sqlStatement string, details map[string]interface{}) IssueInstance { - return IssueInstance{ - Issue: issue, - ObjectType: objectType, - ObjectName: objectName, - SqlStatement: sqlStatement, - Details: details, +/* + Dynamic Impact Determination (TODO) + - We can define the impact calculator function based on issue type wherever/whenever needed + - Map will have functions only for issue type with dynamic impact determination + + For example: + + type ImpactCalcFunc func(issue QueryIssue, stats *PgStats) string + + var impactCalculators = map[string]ImpactCalcFunc{ + INHERITED_TABLE: inheritedTableImpactCalc, + // etc... } -} + + // Example dynamic function + func inheritedTableImpactCalc(i QueryIssue, stats *PgStats) string { + usage := stats.GetUsage(i.ObjectName) // e.g. how many reads/writes + if usage.WritesPerDay > 1000 { + return "LEVEL_2" + } + return "LEVEL_3" + } + + func (i Issue) GetImpact(stats *PgStats) string { + if calc, ok := impactCalculators[i.Type]; ok { + return calc(i, stats) + } + + return lo.Ternary(i.Impact != "", i.Impact, constants.IMPACT_LEVEL_1) + } + +*/ diff --git a/yb-voyager/src/issue/issue_test.go b/yb-voyager/src/issue/issue_test.go new file mode 100644 index 0000000000..4dd58a5649 --- /dev/null +++ b/yb-voyager/src/issue/issue_test.go @@ -0,0 +1,125 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package issue + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/yugabyte/yb-voyager/yb-voyager/src/ybversion" +) + +func TestIssueFixedInStable(t *testing.T) { + fixedVersion, err := ybversion.NewYBVersion("2024.1.1.0") + assert.NoError(t, err) + issue := Issue{ + Type: "ADVISORY_LOCKS", + MinimumVersionsFixedIn: map[string]*ybversion.YBVersion{ + ybversion.SERIES_2024_1: fixedVersion, + }, + } + + versionsToCheck := map[string]bool{ + "2024.1.1.0": true, + "2024.1.1.1": true, + "2024.1.0.0": false, + } + for v, expected := range versionsToCheck { + ybVersion, err := ybversion.NewYBVersion(v) + assert.NoError(t, err) + + fixed, err := issue.IsFixedIn(ybVersion) + assert.NoError(t, err) + assert.Equalf(t, expected, fixed, "comparing ybv %s to fixed %s", ybVersion, fixedVersion) + } +} + +func TestIssueFixedInPreview(t *testing.T) { + fixedVersion, err := ybversion.NewYBVersion("2.21.4.5") + assert.NoError(t, err) + issue := Issue{ + Type: "ADVISORY_LOCKS", + MinimumVersionsFixedIn: map[string]*ybversion.YBVersion{ + ybversion.SERIES_2_21: fixedVersion, + }, + } + + versionsToCheck := map[string]bool{ + "2.21.4.5": true, + "2.21.5.5": true, + "2.21.4.1": false, + } + for v, expected := range versionsToCheck { + ybVersion, err := ybversion.NewYBVersion(v) + assert.NoError(t, err) + + fixed, err := issue.IsFixedIn(ybVersion) + assert.NoError(t, err) + assert.Equalf(t, expected, fixed, "comparing ybv %s to fixed %s", ybVersion, fixedVersion) + } +} + +func TestIssueFixedInStableOld(t *testing.T) { + fixedVersionStableOld, err := ybversion.NewYBVersion("2.20.7.1") + assert.NoError(t, err) + fixedVersionStable, err := ybversion.NewYBVersion("2024.1.1.1") + assert.NoError(t, err) + + issue := Issue{ + Type: "ADVISORY_LOCKS", + MinimumVersionsFixedIn: map[string]*ybversion.YBVersion{ + ybversion.SERIES_2024_1: fixedVersionStable, + ybversion.SERIES_2_20: fixedVersionStableOld, + }, + } + + versionsToCheck := map[string]bool{ + "2.20.0.0": false, + "2.20.7.0": false, + "2.20.7.1": true, + "2024.1.1.1": true, + "2024.1.1.2": true, + "2024.1.1.0": false, + } + for v, expected := range versionsToCheck { + ybVersion, err := ybversion.NewYBVersion(v) + assert.NoError(t, err) + + fixed, err := issue.IsFixedIn(ybVersion) + assert.NoError(t, err) + assert.Equalf(t, expected, fixed, "comparing ybv %s to fixed [%s, %s]", ybVersion, fixedVersionStableOld, fixedVersionStable) + } +} + +func TestIssueFixedFalseWhenMinimumNotSpecified(t *testing.T) { + issue := Issue{ + Type: "ADVISORY_LOCKS", + } + + versionsToCheck := []string{"2024.1.0.0", "2.20.7.4", "2.21.1.1"} + + for _, v := range versionsToCheck { + ybVersion, err := ybversion.NewYBVersion(v) + assert.NoError(t, err) + + fixed, err := issue.IsFixedIn(ybVersion) + assert.NoError(t, err) + // If the minimum fixed version is not specified, the issue is not fixed in any version. + assert.Falsef(t, fixed, "comparing ybv %s to fixed should be false", ybVersion) + } +} diff --git a/yb-voyager/src/metadb/metadataDB_test.go b/yb-voyager/src/metadb/metadataDB_test.go new file mode 100644 index 0000000000..bee70480ea --- /dev/null +++ b/yb-voyager/src/metadb/metadataDB_test.go @@ -0,0 +1,118 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package metadb + +import ( + "database/sql" + "fmt" + "os" + "testing" + + _ "github.com/mattn/go-sqlite3" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" +) + +// Test the initMetaDB function +func TestInitMetaDB(t *testing.T) { + // Define the expected columns and their types for each table + expectedTables := map[string]map[string]testutils.ColumnPropertiesSqlite{ + QUEUE_SEGMENT_META_TABLE_NAME: { + "segment_no": {Type: "INTEGER", PrimaryKey: 1}, + "file_path": {Type: "TEXT"}, + "size_committed": {Type: "INTEGER"}, + "total_events": {Type: "INTEGER"}, + "exporter_role": {Type: "TEXT"}, + "imported_by_target_db_importer": {Type: "INTEGER", Default: sql.NullString{String: "0", Valid: true}}, + "imported_by_source_replica_db_importer": {Type: "INTEGER", Default: sql.NullString{String: "0", Valid: true}}, + "imported_by_source_db_importer": {Type: "INTEGER", Default: sql.NullString{String: "0", Valid: true}}, + "archived": {Type: "INTEGER", Default: sql.NullString{String: "0", Valid: true}}, + "deleted": {Type: "INTEGER", Default: sql.NullString{String: "0", Valid: true}}, + "archive_location": {Type: "TEXT"}, + }, + EXPORTED_EVENTS_STATS_TABLE_NAME: { + // TODO: We have a composite primary key here (run_id, exporter_role, timestamp) + "run_id": {Type: "TEXT", PrimaryKey: 1}, + "exporter_role": {Type: "TEXT", PrimaryKey: 2}, + "timestamp": {Type: "INTEGER", PrimaryKey: 3}, + "num_total": {Type: "INTEGER"}, + "num_inserts": {Type: "INTEGER"}, + "num_updates": {Type: "INTEGER"}, + "num_deletes": {Type: "INTEGER"}, + }, + EXPORTED_EVENTS_STATS_PER_TABLE_TABLE_NAME: { + "exporter_role": {Type: "TEXT", PrimaryKey: 1}, + "schema_name": {Type: "TEXT", PrimaryKey: 2}, + "table_name": {Type: "TEXT", PrimaryKey: 3}, + "num_total": {Type: "INTEGER"}, + "num_inserts": {Type: "INTEGER"}, + "num_updates": {Type: "INTEGER"}, + "num_deletes": {Type: "INTEGER"}, + }, + JSON_OBJECTS_TABLE_NAME: { + "key": {Type: "TEXT", PrimaryKey: 1}, + "json_text": {Type: "TEXT"}, + }, + } + + // Create a temporary SQLite database file for testing + tempFile, err := os.CreateTemp(os.TempDir(), "test_meta_db_*.db") + if err != nil { + t.Fatalf("Failed to create temporary file: %v", err) + } + + // remove the temporary file + defer func() { + err := os.Remove(tempFile.Name()) + if err != nil { + t.Fatalf("Failed to remove temporary file: %v", err) + } + }() + + // Call initMetaDB with the path to the temporary file + err = initMetaDB(tempFile.Name()) // Pass the temp file path to initMetaDB + if err != nil { + t.Fatalf("Failed to initialize database: %v", err) + } else { + t.Logf("Database initialized successfully") + } + + // Open the temporary database for verification + db, err := sql.Open("sqlite3", tempFile.Name()) + if err != nil { + t.Fatalf("Failed to open temporary database: %v", err) + } + defer db.Close() + + // Verify the existence of each table and no extra tables + t.Run("Check table existence and no extra tables", func(t *testing.T) { + err := testutils.CheckTableExistenceSqlite(t, db, expectedTables) + if err != nil { + t.Errorf("Table existence mismatch: %v", err) + } + }) + + // Verify the structure of each table + for table, expectedColumns := range expectedTables { + t.Run(fmt.Sprintf("Check structure of %s table", table), func(t *testing.T) { + err := testutils.CheckTableStructureSqlite(db, table, expectedColumns) + if err != nil { + t.Errorf("Table %s structure mismatch: %v", table, err) + } + }) + } +} diff --git a/yb-voyager/src/migassessment/assessmentDB.go b/yb-voyager/src/migassessment/assessmentDB.go index c4700ccb63..687919122a 100644 --- a/yb-voyager/src/migassessment/assessmentDB.go +++ b/yb-voyager/src/migassessment/assessmentDB.go @@ -59,7 +59,7 @@ type TableIndexStats struct { SizeInBytes *int64 `json:"SizeInBytes"` } -func GetSourceMetadataDBFilePath() string { +var GetSourceMetadataDBFilePath = func() string { return filepath.Join(AssessmentDir, "dbs", "assessment.db") } diff --git a/yb-voyager/src/migassessment/assessmentDB_test.go b/yb-voyager/src/migassessment/assessmentDB_test.go new file mode 100644 index 0000000000..204acbb128 --- /dev/null +++ b/yb-voyager/src/migassessment/assessmentDB_test.go @@ -0,0 +1,148 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package migassessment + +import ( + "database/sql" + "fmt" + "os" + "testing" + + _ "github.com/mattn/go-sqlite3" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" +) + +func TestInitAssessmentDB(t *testing.T) { + expectedTables := map[string]map[string]testutils.ColumnPropertiesSqlite{ + TABLE_INDEX_IOPS: { + "schema_name": {Type: "TEXT", PrimaryKey: 1}, + "object_name": {Type: "TEXT", PrimaryKey: 2}, + "object_type": {Type: "TEXT"}, + "seq_reads": {Type: "INTEGER"}, + "row_writes": {Type: "INTEGER"}, + "measurement_type": {Type: "TEXT", PrimaryKey: 3}, + }, + TABLE_INDEX_SIZES: { + "schema_name": {Type: "TEXT", PrimaryKey: 1}, + "object_name": {Type: "TEXT", PrimaryKey: 2}, + "object_type": {Type: "TEXT"}, + "size_in_bytes": {Type: "INTEGER"}, + }, + TABLE_ROW_COUNTS: { + "schema_name": {Type: "TEXT", PrimaryKey: 1}, + "table_name": {Type: "TEXT", PrimaryKey: 2}, + "row_count": {Type: "INTEGER"}, + }, + TABLE_COLUMNS_COUNT: { + "schema_name": {Type: "TEXT", PrimaryKey: 1}, + "object_name": {Type: "TEXT", PrimaryKey: 2}, + "object_type": {Type: "TEXT"}, + "column_count": {Type: "INTEGER"}, + }, + INDEX_TO_TABLE_MAPPING: { + "index_schema": {Type: "TEXT", PrimaryKey: 1}, + "index_name": {Type: "TEXT", PrimaryKey: 2}, + "table_schema": {Type: "TEXT"}, + "table_name": {Type: "TEXT"}, + }, + OBJECT_TYPE_MAPPING: { + "schema_name": {Type: "TEXT", PrimaryKey: 1}, + "object_name": {Type: "TEXT", PrimaryKey: 2}, + "object_type": {Type: "TEXT"}, + }, + TABLE_COLUMNS_DATA_TYPES: { + "schema_name": {Type: "TEXT", PrimaryKey: 1}, + "table_name": {Type: "TEXT", PrimaryKey: 2}, + "column_name": {Type: "TEXT", PrimaryKey: 3}, + "data_type": {Type: "TEXT"}, + }, + TABLE_INDEX_STATS: { + "schema_name": {Type: "TEXT", PrimaryKey: 1}, + "object_name": {Type: "TEXT", PrimaryKey: 2}, + "row_count": {Type: "INTEGER"}, + "column_count": {Type: "INTEGER"}, + "reads": {Type: "INTEGER"}, + "writes": {Type: "INTEGER"}, + "reads_per_second": {Type: "INTEGER"}, + "writes_per_second": {Type: "INTEGER"}, + "is_index": {Type: "BOOLEAN"}, + "object_type": {Type: "TEXT"}, + "parent_table_name": {Type: "TEXT"}, + "size_in_bytes": {Type: "INTEGER"}, + }, + DB_QUERIES_SUMMARY: { + "queryid": {Type: "BIGINT"}, + "query": {Type: "TEXT"}, + }, + } + + // Create a temporary SQLite database file for testing + tempFile, err := os.CreateTemp(os.TempDir(), "test_assessment_db_*.db") + if err != nil { + t.Fatalf("Failed to create temporary file: %v", err) + } + // Ensure the file is removed after the test + defer func() { + err := os.Remove(tempFile.Name()) + if err != nil { + t.Fatalf("Failed to remove temporary file: %v", err) + } + }() + + GetSourceMetadataDBFilePath = func() string { + return tempFile.Name() + } + + err = InitAssessmentDB() + if err != nil { + t.Fatalf("Failed to initialize database: %v", err) + } else { + t.Logf("Database initialized successfully") + } + + // Open the temporary database for verification + db, err := sql.Open("sqlite3", tempFile.Name()) + if err != nil { + t.Fatalf("Failed to open temporary database: %v", err) + } + defer db.Close() + + // Verify the existence of each table and no extra tables + t.Run("Check table existence and no extra tables", func(t *testing.T) { + err := testutils.CheckTableExistenceSqlite(t, db, expectedTables) + if err != nil { + t.Errorf("Table existence mismatch: %v", err) + } + }) + + // Verify the structure of each table + for table, expectedColumns := range expectedTables { + t.Run(fmt.Sprintf("Check structure of %s table", table), func(t *testing.T) { + err := testutils.CheckTableStructureSqlite(db, table, expectedColumns) + if err != nil { + t.Errorf("Table %s structure mismatch: %v", table, err) + } + }) + } + +} + +// Helper function to create a string pointer +func stringPointer(s string) *string { + return &s +} diff --git a/yb-voyager/src/migassessment/sizing.go b/yb-voyager/src/migassessment/sizing.go index 1d45a1167b..05383341a1 100644 --- a/yb-voyager/src/migassessment/sizing.go +++ b/yb-voyager/src/migassessment/sizing.go @@ -25,6 +25,7 @@ import ( "net/http" "os" "path/filepath" + "strings" "github.com/samber/lo" @@ -139,6 +140,12 @@ func getExperimentDBPath() string { //go:embed resources/yb_2024_0_source.db var experimentData20240 []byte +var SourceMetadataObjectTypesToUse = []string{ + "%table%", + "%index%", + "materialized view", +} + func SizingAssessment() error { log.Infof("loading metadata files for sharding assessment") @@ -1228,6 +1235,14 @@ Returns: float64: The total size of the source database in gigabytes. */ func getSourceMetadata(sourceDB *sql.DB) ([]SourceDBMetadata, []SourceDBMetadata, float64, error) { + // Construct the WHERE clause dynamically using LIKE + var likeConditions []string + for _, pattern := range SourceMetadataObjectTypesToUse { + likeConditions = append(likeConditions, fmt.Sprintf("object_type LIKE '%s'", pattern)) + } + // Join the LIKE conditions with OR + whereClause := strings.Join(likeConditions, " OR ") + query := fmt.Sprintf(` SELECT schema_name, object_name, @@ -1239,8 +1254,9 @@ func getSourceMetadata(sourceDB *sql.DB) ([]SourceDBMetadata, []SourceDBMetadata size_in_bytes, column_count FROM %v + WHERE %s ORDER BY IFNULL(size_in_bytes, 0) ASC - `, GetTableIndexStatName()) + `, GetTableIndexStatName(), whereClause) rows, err := sourceDB.Query(query) if err != nil { return nil, nil, 0.0, fmt.Errorf("failed to query source metadata with query [%s]: %w", query, err) diff --git a/yb-voyager/src/migassessment/sizing_test.go b/yb-voyager/src/migassessment/sizing_test.go index bdf28050fc..f65bfeb5fb 100644 --- a/yb-voyager/src/migassessment/sizing_test.go +++ b/yb-voyager/src/migassessment/sizing_test.go @@ -1,3 +1,5 @@ +//go:build unit + /* Copyright (c) YugabyteDB, Inc. @@ -27,7 +29,7 @@ import ( "github.com/stretchr/testify/assert" ) -var AssessmentDbSelectQuery = fmt.Sprintf("(?i)SELECT schema_name,.* FROM %v ORDER BY .* ASC", TABLE_INDEX_STATS) +var AssessmentDbSelectQuery = fmt.Sprintf("(?i)SELECT schema_name,.* FROM %v .* ORDER BY .* ASC", TABLE_INDEX_STATS) var AssessmentDBColumns = []string{"schema_name", "object_name", "row_count", "reads_per_second", "writes_per_second", "is_index", "parent_table_name", "size_in_bytes", "column_count"} diff --git a/yb-voyager/src/namereg/namereg.go b/yb-voyager/src/namereg/namereg.go index 46f35ce464..f1b7890d3e 100644 --- a/yb-voyager/src/namereg/namereg.go +++ b/yb-voyager/src/namereg/namereg.go @@ -8,23 +8,18 @@ import ( "github.com/samber/lo" log "github.com/sirupsen/logrus" + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/jsonfile" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" ) -const ( - YUGABYTEDB = sqlname.YUGABYTEDB - POSTGRESQL = sqlname.POSTGRESQL - ORACLE = sqlname.ORACLE - MYSQL = sqlname.MYSQL -) - const ( TARGET_DB_IMPORTER_ROLE = "target_db_importer" SOURCE_DB_IMPORTER_ROLE = "source_db_importer" // Fallback. SOURCE_REPLICA_DB_IMPORTER_ROLE = "source_replica_db_importer" // Fall-forward. SOURCE_DB_EXPORTER_ROLE = "source_db_exporter" + SOURCE_DB_EXPORTER_STATUS_ROLE = "source_db_exporter_status" TARGET_DB_EXPORTER_FF_ROLE = "target_db_exporter_ff" TARGET_DB_EXPORTER_FB_ROLE = "target_db_exporter_fb" IMPORT_FILE_ROLE = "import_file" @@ -129,7 +124,7 @@ func (reg *NameRegistry) registerNames() (bool, error) { return reg.registerYBNames() case reg.params.Role == SOURCE_REPLICA_DB_IMPORTER_ROLE && reg.DefaultSourceReplicaDBSchemaName == "": log.Infof("setting default source replica schema name in the name registry: %s", reg.DefaultSourceDBSchemaName) - defaultSchema := lo.Ternary(reg.SourceDBType == POSTGRESQL, reg.DefaultSourceDBSchemaName, reg.params.TargetDBSchema) + defaultSchema := lo.Ternary(reg.SourceDBType == constants.POSTGRESQL, reg.DefaultSourceDBSchemaName, reg.params.TargetDBSchema) reg.setDefaultSourceReplicaDBSchemaName(defaultSchema) return true, nil } @@ -147,6 +142,9 @@ func (reg *NameRegistry) UnRegisterYBNames() error { } func (reg *NameRegistry) registerSourceNames() (bool, error) { + if reg.params.SDB == nil { + return false, fmt.Errorf("source db connection is not available") + } reg.SourceDBType = reg.params.SourceDBType reg.initSourceDBSchemaNames() m := make(map[string][]string) @@ -158,7 +156,7 @@ func (reg *NameRegistry) registerSourceNames() (bool, error) { m[schemaName] = tableNames seqNames, err := reg.params.SDB.GetAllSequencesRaw(schemaName) if err != nil { - return false, fmt.Errorf("get all table names: %w", err) + return false, fmt.Errorf("get all sequence names: %w", err) } m[schemaName] = append(m[schemaName], seqNames...) } @@ -170,11 +168,11 @@ func (reg *NameRegistry) initSourceDBSchemaNames() { // source.Schema contains only one schema name for MySQL and Oracle; whereas // it contains a pipe separated list for postgres. switch reg.params.SourceDBType { - case ORACLE: + case constants.ORACLE: reg.SourceDBSchemaNames = []string{strings.ToUpper(reg.params.SourceDBSchema)} - case MYSQL: + case constants.MYSQL: reg.SourceDBSchemaNames = []string{reg.params.SourceDBName} - case POSTGRESQL: + case constants.POSTGRESQL: reg.SourceDBSchemaNames = lo.Map(strings.Split(reg.params.SourceDBSchema, "|"), func(s string, _ int) string { return strings.ToLower(s) }) @@ -194,11 +192,11 @@ func (reg *NameRegistry) registerYBNames() (bool, error) { m := make(map[string][]string) reg.DefaultYBSchemaName = reg.params.TargetDBSchema - if reg.SourceDBTableNames != nil && reg.SourceDBType == POSTGRESQL { + if reg.SourceDBTableNames != nil && reg.SourceDBType == constants.POSTGRESQL { reg.DefaultYBSchemaName = reg.DefaultSourceDBSchemaName } switch reg.SourceDBType { - case POSTGRESQL: + case constants.POSTGRESQL: reg.YBSchemaNames = reg.SourceDBSchemaNames default: reg.YBSchemaNames = []string{reg.params.TargetDBSchema} @@ -211,7 +209,7 @@ func (reg *NameRegistry) registerYBNames() (bool, error) { m[schemaName] = tableNames seqNames, err := yb.GetAllSequencesRaw(schemaName) if err != nil { - return false, fmt.Errorf("get all table names: %w", err) + return false, fmt.Errorf("get all sequence names: %w", err) } m[schemaName] = append(m[schemaName], seqNames...) } @@ -290,9 +288,9 @@ func (reg *NameRegistry) LookupTableName(tableNameArg string) (sqlname.NameTuple if errors.As(err, &errObj) { // Case insensitive match. caseInsensitiveName := tableName - if reg.SourceDBType == POSTGRESQL || reg.SourceDBType == YUGABYTEDB { + if reg.SourceDBType == constants.POSTGRESQL || reg.SourceDBType == constants.YUGABYTEDB { caseInsensitiveName = strings.ToLower(tableName) - } else if reg.SourceDBType == ORACLE { + } else if reg.SourceDBType == constants.ORACLE { caseInsensitiveName = strings.ToUpper(tableName) } if lo.Contains(errObj.Names, caseInsensitiveName) { @@ -308,14 +306,14 @@ func (reg *NameRegistry) LookupTableName(tableNameArg string) (sqlname.NameTuple } if reg.YBTableNames != nil { // nil in `export` mode. targetName, err = reg.lookup( - YUGABYTEDB, reg.YBTableNames, reg.DefaultYBSchemaName, schemaName, tableName) + constants.YUGABYTEDB, reg.YBTableNames, reg.DefaultYBSchemaName, schemaName, tableName) if err != nil { errObj := &ErrMultipleMatchingNames{} if errors.As(err, &errObj) { // A special case. if lo.Contains(errObj.Names, strings.ToLower(tableName)) { targetName, err = reg.lookup( - YUGABYTEDB, reg.YBTableNames, reg.DefaultYBSchemaName, schemaName, strings.ToLower(tableName)) + constants.YUGABYTEDB, reg.YBTableNames, reg.DefaultYBSchemaName, schemaName, strings.ToLower(tableName)) } } if err != nil { @@ -403,7 +401,7 @@ func NewNameTuple(role string, sourceName *sqlname.ObjectName, targetName *sqlna t.CurrentName = t.SourceName case SOURCE_REPLICA_DB_IMPORTER_ROLE: t.CurrentName = t.SourceName - case SOURCE_DB_EXPORTER_ROLE: + case SOURCE_DB_EXPORTER_ROLE, SOURCE_DB_EXPORTER_STATUS_ROLE: t.CurrentName = t.SourceName case TARGET_DB_EXPORTER_FF_ROLE, TARGET_DB_EXPORTER_FB_ROLE: t.CurrentName = t.TargetName diff --git a/yb-voyager/src/namereg/namereg_test.go b/yb-voyager/src/namereg/namereg_test.go index 279ae9a919..f4c1430daf 100644 --- a/yb-voyager/src/namereg/namereg_test.go +++ b/yb-voyager/src/namereg/namereg_test.go @@ -1,19 +1,41 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ package namereg import ( "fmt" "os" + "path/filepath" + "reflect" + "strings" "testing" "github.com/samber/lo" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" ) var oracleToYBNameRegistry = &NameRegistry{ - SourceDBType: ORACLE, + SourceDBType: constants.ORACLE, params: NameRegistryParams{ Role: TARGET_DB_IMPORTER_ROLE, }, @@ -37,15 +59,15 @@ func buildNameTuple(reg *NameRegistry, sourceSchema, sourceTable, targetSchema, sourceName = sqlname.NewObjectName(reg.SourceDBType, sourceSchema, sourceSchema, sourceTable) } if targetSchema != "" && targetTable != "" { - targetName = sqlname.NewObjectName(YUGABYTEDB, targetSchema, targetSchema, targetTable) + targetName = sqlname.NewObjectName(constants.YUGABYTEDB, targetSchema, targetSchema, targetTable) } return NewNameTuple(reg.params.Role, sourceName, targetName) } func TestNameTuple(t *testing.T) { assert := assert.New(t) - sourceName := sqlname.NewObjectName(ORACLE, "SAKILA", "SAKILA", "TABLE1") - targetName := sqlname.NewObjectName(YUGABYTEDB, "public", "public", "table1") + sourceName := sqlname.NewObjectName(constants.ORACLE, "SAKILA", "SAKILA", "TABLE1") + targetName := sqlname.NewObjectName(constants.YUGABYTEDB, "public", "public", "table1") ntup := NewNameTuple(TARGET_DB_IMPORTER_ROLE, sourceName, targetName) @@ -72,8 +94,8 @@ func TestNameTuple(t *testing.T) { func TestNameTupleMatchesPattern(t *testing.T) { assert := assert.New(t) - sourceName := sqlname.NewObjectName(ORACLE, "SAKILA", "SAKILA", "TABLE1") - targetName := sqlname.NewObjectName(YUGABYTEDB, "public", "sakila", "table1") + sourceName := sqlname.NewObjectName(constants.ORACLE, "SAKILA", "SAKILA", "TABLE1") + targetName := sqlname.NewObjectName(constants.YUGABYTEDB, "public", "sakila", "table1") ntup := NewNameTuple(TARGET_DB_IMPORTER_ROLE, sourceName, targetName) testCases := []struct { @@ -108,8 +130,8 @@ func TestNameTupleMatchesPattern(t *testing.T) { func TestNameTupleMatchesPatternMySQL(t *testing.T) { assert := assert.New(t) - sourceName := sqlname.NewObjectName(MYSQL, "test", "test", "Table1") - targetName := sqlname.NewObjectName(YUGABYTEDB, "public", "test", "table1") + sourceName := sqlname.NewObjectName(constants.MYSQL, "test", "test", "Table1") + targetName := sqlname.NewObjectName(constants.YUGABYTEDB, "public", "test", "table1") ntup := NewNameTuple(TARGET_DB_IMPORTER_ROLE, sourceName, targetName) testCases := []struct { pattern string @@ -146,7 +168,7 @@ func TestNameMatchesPattern(t *testing.T) { require := require.New(t) reg := &NameRegistry{ - SourceDBType: ORACLE, + SourceDBType: constants.ORACLE, params: NameRegistryParams{ Role: SOURCE_DB_EXPORTER_ROLE, }, @@ -377,7 +399,7 @@ func TestDifferentSchemaInSameDBAsSourceReplica2(t *testing.T) { //===================================================== type dummySourceDB struct { - tableNames map[string][]string // schemaName -> tableNames + tableNames map[string][]string // schemaName -> tableNames sequenceNames map[string][]string // schemaName -> sequenceNames } @@ -398,8 +420,8 @@ func (db *dummySourceDB) GetAllSequencesRaw(schemaName string) ([]string, error) } type dummyTargetDB struct { - tableNames map[string][]string // schemaName -> tableNames - sequenceNames map[string][]string // schemaName -> sequenceNames + tableNames map[string][]string // schemaName -> tableNames + sequenceNames map[string][]string // schemaName -> sequenceNames } func (db *dummyTargetDB) GetAllSchemaNamesRaw() ([]string, error) { @@ -414,7 +436,6 @@ func (db *dummyTargetDB) GetAllTableNamesRaw(schemaName string) ([]string, error return tableNames, nil } - func (db *dummyTargetDB) GetAllSequencesRaw(schemaName string) ([]string, error) { sequenceNames, ok := db.sequenceNames[schemaName] if !ok { @@ -448,18 +469,18 @@ func TestNameRegistryWithDummyDBs(t *testing.T) { } sourceNamesMap := make(map[string][]string) - for k,v := range dummySdb.tableNames { + for k, v := range dummySdb.tableNames { sourceNamesMap[k] = append(sourceNamesMap[k], v...) } - for k,v := range dummySdb.sequenceNames { + for k, v := range dummySdb.sequenceNames { sourceNamesMap[k] = append(sourceNamesMap[k], v...) } targetNamesMap := make(map[string][]string) - for k,v := range dummyTdb.tableNames { + for k, v := range dummyTdb.tableNames { targetNamesMap[k] = append(targetNamesMap[k], v...) } - for k,v := range dummyTdb.sequenceNames { + for k, v := range dummyTdb.sequenceNames { targetNamesMap[k] = append(targetNamesMap[k], v...) } @@ -469,7 +490,7 @@ func TestNameRegistryWithDummyDBs(t *testing.T) { params := NameRegistryParams{ FilePath: "", Role: currentMode, - SourceDBType: ORACLE, + SourceDBType: constants.ORACLE, SourceDBSchema: "SAKILA", SourceDBName: "ORCLPDB1", TargetDBSchema: tSchema, @@ -487,7 +508,7 @@ func TestNameRegistryWithDummyDBs(t *testing.T) { err := reg.Init() require.Nil(err) - assert.Equal(ORACLE, reg.SourceDBType) + assert.Equal(constants.ORACLE, reg.SourceDBType) assert.Equal("SAKILA", reg.DefaultSourceDBSchemaName) assert.Equal(sourceNamesMap, reg.SourceDBTableNames) table1 := buildNameTuple(reg, "SAKILA", "TABLE1", "", "") @@ -497,13 +518,13 @@ func TestNameRegistryWithDummyDBs(t *testing.T) { seq1 := buildNameTuple(reg, "SAKILA", "SEQ1", "", "") stup, err := reg.LookupTableName("SEQ1") require.Nil(err) - assert.Equal(seq1, stup) + assert.Equal(seq1, stup) // When `export data` restarts, the registry should be reloaded from the file. reg = newNameRegistry("") err = reg.Init() require.Nil(err) - assert.Equal(ORACLE, reg.SourceDBType) + assert.Equal(constants.ORACLE, reg.SourceDBType) assert.Equal("SAKILA", reg.DefaultSourceDBSchemaName) assert.Equal(sourceNamesMap, reg.SourceDBTableNames) ntup, err = reg.LookupTableName("TABLE1") @@ -549,3 +570,132 @@ func TestNameRegistryWithDummyDBs(t *testing.T) { assert.Equal(table1, ntup) assert.Equal(`SAKILA_FF."TABLE1"`, table1.ForUserQuery()) } + +// Unit tests for breaking changes in NameRegistry. + +func TestNameRegistryStructs(t *testing.T) { + + tests := []struct { + name string + actualType reflect.Type + expectedType interface{} + }{ + { + name: "Validate NameRegistryParams Struct Definition", + actualType: reflect.TypeOf(NameRegistryParams{}), + expectedType: struct { + FilePath string + Role string + SourceDBType string + SourceDBSchema string + SourceDBName string + SDB SourceDBInterface + TargetDBSchema string + YBDB YBDBInterface + }{}, + }, + { + name: "Validate NameRegistry Struct Definition", + actualType: reflect.TypeOf(NameRegistry{}), + expectedType: struct { + SourceDBType string + SourceDBSchemaNames []string + DefaultSourceDBSchemaName string + SourceDBTableNames map[string][]string + YBSchemaNames []string + DefaultYBSchemaName string + YBTableNames map[string][]string + DefaultSourceReplicaDBSchemaName string + params NameRegistryParams + }{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testutils.CompareStructs(t, tt.actualType, reflect.TypeOf(tt.expectedType), tt.name) + }) + } +} + +func TestNameRegistryJson(t *testing.T) { + exportDir := filepath.Join(os.TempDir(), "namereg") + outputFilePath := filepath.Join(exportDir, "test_dummy_name_registry.json") + + // Create a sample NameRegistry instance + reg := &NameRegistry{ + SourceDBType: constants.ORACLE, + params: NameRegistryParams{ + FilePath: outputFilePath, + Role: TARGET_DB_IMPORTER_ROLE, + SourceDBType: constants.ORACLE, + SourceDBSchema: "SAKILA", + SourceDBName: "ORCLPDB1", + TargetDBSchema: "ybsakila", + }, + SourceDBSchemaNames: []string{"SAKILA"}, + DefaultSourceDBSchemaName: "SAKILA", + SourceDBTableNames: map[string][]string{ + "SAKILA": {"TABLE1", "TABLE2", "MixedCaps", "lower_caps"}, + }, + YBSchemaNames: []string{"ybsakila"}, + DefaultYBSchemaName: "ybsakila", + YBTableNames: map[string][]string{ + "ybsakila": {"table1", "table2", "mixedcaps", "lower_caps"}, + }, + DefaultSourceReplicaDBSchemaName: "SAKILA_FF", + } + + // Ensure the export directory exists + if err := os.MkdirAll(exportDir, 0755); err != nil { + t.Fatalf("Failed to create export directory: %v", err) + } + + // Clean up the export directory + defer func() { + if err := os.RemoveAll(exportDir); err != nil { + t.Fatalf("Failed to remove export directory: %v", err) + } + }() + + // Marshal the NameRegistry instance to JSON + err := reg.save() + if err != nil { + t.Fatalf("Failed to save NameRegistry to JSON: %v", err) + } + + // TODO: Use a single string instead of a slice of strings for the expected JSON + expectedJSON := strings.Join([]string{ + "{", + ` "SourceDBType": "oracle",`, + ` "SourceDBSchemaNames": [`, + ` "SAKILA"`, + " ],", + ` "DefaultSourceDBSchemaName": "SAKILA",`, + ` "SourceDBTableNames": {`, + ` "SAKILA": [`, + ` "TABLE1",`, + ` "TABLE2",`, + ` "MixedCaps",`, + ` "lower_caps"`, + " ]", + " },", + ` "YBSchemaNames": [`, + ` "ybsakila"`, + " ],", + ` "DefaultYBSchemaName": "ybsakila",`, + ` "YBTableNames": {`, + ` "ybsakila": [`, + ` "table1",`, + ` "table2",`, + ` "mixedcaps",`, + ` "lower_caps"`, + " ]", + " },", + ` "DefaultSourceReplicaDBSchemaName": "SAKILA_FF"`, + "}", + }, "\n") + + // Read the JSON file and compare it with the expected JSON + testutils.CompareJson(t, outputFilePath, expectedJSON, exportDir) +} diff --git a/yb-voyager/src/query/queryissue/constants.go b/yb-voyager/src/query/queryissue/constants.go new file mode 100644 index 0000000000..80cce2e1da --- /dev/null +++ b/yb-voyager/src/query/queryissue/constants.go @@ -0,0 +1,105 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queryissue + +// Types +const ( + REFERENCED_TYPE_DECLARATION = "REFERENCED_TYPE_DECLARATION" + STORED_GENERATED_COLUMNS = "STORED_GENERATED_COLUMNS" + UNLOGGED_TABLE = "UNLOGGED_TABLE" + UNSUPPORTED_INDEX_METHOD = "UNSUPPORTED_INDEX_METHOD" + STORAGE_PARAMETER = "STORAGE_PARAMETER" + ALTER_TABLE_SET_COLUMN_ATTRIBUTE = "ALTER_TABLE_SET_COLUMN_ATTRIBUTE" + ALTER_TABLE_CLUSTER_ON = "ALTER_TABLE_CLUSTER_ON" + ALTER_TABLE_DISABLE_RULE = "ALTER_TABLE_DISABLE_RULE" + EXCLUSION_CONSTRAINTS = "EXCLUSION_CONSTRAINTS" + DEFERRABLE_CONSTRAINTS = "DEFERRABLE_CONSTRAINTS" + MULTI_COLUMN_GIN_INDEX = "MULTI_COLUMN_GIN_INDEX" + ORDERED_GIN_INDEX = "ORDERED_GIN_INDEX" + POLICY_WITH_ROLES = "POLICY_WITH_ROLES" + CONSTRAINT_TRIGGER = "CONSTRAINT_TRIGGER" + REFERENCING_CLAUSE_IN_TRIGGER = "REFERENCING_CLAUSE_IN_TRIGGER" + BEFORE_ROW_TRIGGER_ON_PARTITIONED_TABLE = "BEFORE_ROW_TRIGGER_ON_PARTITIONED_TABLE" + ALTER_TABLE_ADD_PK_ON_PARTITIONED_TABLE = "ALTER_TABLE_ADD_PK_ON_PARTITIONED_TABLE" + EXPRESSION_PARTITION_WITH_PK_UK = "EXPRESSION_PARTITION_WITH_PK_UK" + MULTI_COLUMN_LIST_PARTITION = "MULTI_COLUMN_LIST_PARTITION" + INSUFFICIENT_COLUMNS_IN_PK_FOR_PARTITION = "INSUFFICIENT_COLUMNS_IN_PK_FOR_PARTITION" + XML_DATATYPE = "XML_DATATYPE" + XID_DATATYPE = "XID_DATATYPE" + POSTGIS_DATATYPES = "POSTGIS_DATATYPES" + UNSUPPORTED_DATATYPES = "UNSUPPORTED_TYPES" + UNSUPPORTED_DATATYPES_LIVE_MIGRATION = "UNSUPPORTED_DATATYPES_LIVE_MIGRATION" + UNSUPPORTED_DATATYPES_LIVE_MIGRATION_WITH_FF_FB = "UNSUPPORTED_DATATYPES_LIVE_MIGRATION_WITH_FF_FB" + PK_UK_ON_COMPLEX_DATATYPE = "PK_UK_ON_COMPLEX_DATATYPE" + INDEX_ON_COMPLEX_DATATYPE = "INDEX_ON_COMPLEX_DATATYPE" + FOREIGN_TABLE = "FOREIGN_TABLE" + INHERITANCE = "INHERITANCE" + + AGGREGATE_FUNCTION = "AGGREGATE_FUNCTION" + AGGREGATION_FUNCTIONS_NAME = "Aggregate Functions" + JSON_TYPE_PREDICATE = "JSON_TYPE_PREDICATE" + JSON_TYPE_PREDICATE_NAME = "Json Type Predicate" + JSON_CONSTRUCTOR_FUNCTION = "JSON_CONSTRUCTOR_FUNCTION" + JSON_CONSTRUCTOR_FUNCTION_NAME = "Json Constructor Functions" + JSON_QUERY_FUNCTION = "JSON_QUERY_FUNCTION" + JSON_QUERY_FUNCTIONS_NAME = "Json Query Functions" + LARGE_OBJECT_DATATYPE = "LARGE_OBJECT_DATATYPE" + LARGE_OBJECT_FUNCTIONS = "LARGE_OBJECT_FUNCTIONS" + LARGE_OBJECT_FUNCTIONS_NAME = "Large Object Functions" + + SECURITY_INVOKER_VIEWS = "SECURITY_INVOKER_VIEWS" + SECURITY_INVOKER_VIEWS_NAME = "Security Invoker Views" + + ADVISORY_LOCKS = "ADVISORY_LOCKS" + SYSTEM_COLUMNS = "SYSTEM_COLUMNS" + XML_FUNCTIONS = "XML_FUNCTIONS" + ADVISORY_LOCKS_NAME = "Advisory Locks" + SYSTEM_COLUMNS_NAME = "System Columns" + XML_FUNCTIONS_NAME = "XML Functions" + FETCH_WITH_TIES = "FETCH_WITH_TIES" + REGEX_FUNCTIONS = "REGEX_FUNCTIONS" + + JSONB_SUBSCRIPTING = "JSONB_SUBSCRIPTING" + JSONB_SUBSCRIPTING_NAME = "Jsonb Subscripting" + MULTI_RANGE_DATATYPE = "MULTI_RANGE_DATATYPE" + COPY_FROM_WHERE = "COPY FROM ... WHERE" + COPY_ON_ERROR = "COPY ... ON_ERROR" + + DETERMINISTIC_OPTION_WITH_COLLATION = "DETERMINISTIC_OPTION_WITH_COLLATION" + DETERMINISTIC_OPTION_WITH_COLLATION_NAME = "Deterministic attribute in collation" + + MERGE_STATEMENT = "MERGE_STATEMENT" + MERGE_STATEMENT_NAME = "Merge Statement" + FOREIGN_KEY_REFERENCES_PARTITIONED_TABLE = "FOREIGN_KEY_REFERENCED_PARTITIONED_TABLE" + FOREIGN_KEY_REFERENCES_PARTITIONED_TABLE_NAME = "Foreign key constraint references partitioned table" + + UNIQUE_NULLS_NOT_DISTINCT = "UNIQUE_NULLS_NOT_DISTINCT" + UNIQUE_NULLS_NOT_DISTINCT_NAME = "Unique Nulls Not Distinct" +) + +// Object types +const ( + CONSTRAINT_NAME = "ConstraintName" + FUNCTION_NAMES = "FunctionNames" + TABLE_OBJECT_TYPE = "TABLE" + FOREIGN_TABLE_OBJECT_TYPE = "FOREIGN TABLE" + FUNCTION_OBJECT_TYPE = "FUNCTION" + INDEX_OBJECT_TYPE = "INDEX" + POLICY_OBJECT_TYPE = "POLICY" + TRIGGER_OBJECT_TYPE = "TRIGGER" + DML_QUERY_OBJECT_TYPE = "DML_QUERY" +) diff --git a/yb-voyager/src/query/queryissue/detectors.go b/yb-voyager/src/query/queryissue/detectors.go new file mode 100644 index 0000000000..c62f473edb --- /dev/null +++ b/yb-voyager/src/query/queryissue/detectors.go @@ -0,0 +1,556 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package queryissue + +import ( + "slices" + + mapset "github.com/deckarep/golang-set/v2" + log "github.com/sirupsen/logrus" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/query/queryparser" +) + +// To Add a new unsupported query construct implement this interface for all possible nodes for that construct +// each detector will work on specific type of node +type UnsupportedConstructDetector interface { + Detect(msg protoreflect.Message) error + GetIssues() []QueryIssue +} + +type FuncCallDetector struct { + query string + + advisoryLocksFuncsDetected mapset.Set[string] + xmlFuncsDetected mapset.Set[string] + aggFuncsDetected mapset.Set[string] + regexFuncsDetected mapset.Set[string] + loFuncsDetected mapset.Set[string] +} + +func NewFuncCallDetector(query string) *FuncCallDetector { + return &FuncCallDetector{ + query: query, + advisoryLocksFuncsDetected: mapset.NewThreadUnsafeSet[string](), + xmlFuncsDetected: mapset.NewThreadUnsafeSet[string](), + aggFuncsDetected: mapset.NewThreadUnsafeSet[string](), + regexFuncsDetected: mapset.NewThreadUnsafeSet[string](), + loFuncsDetected: mapset.NewThreadUnsafeSet[string](), + } +} + +// Detect checks if a FuncCall node uses an unsupported function. +func (d *FuncCallDetector) Detect(msg protoreflect.Message) error { + if queryparser.GetMsgFullName(msg) != queryparser.PG_QUERY_FUNCCALL_NODE { + return nil + } + + _, funcName := queryparser.GetFuncNameFromFuncCall(msg) + log.Debugf("fetched function name from %s node: %q", queryparser.PG_QUERY_FUNCCALL_NODE, funcName) + + if unsupportedAdvLockFuncs.ContainsOne(funcName) { + d.advisoryLocksFuncsDetected.Add(funcName) + } + if unsupportedXmlFunctions.ContainsOne(funcName) { + d.xmlFuncsDetected.Add(funcName) + } + if unsupportedRegexFunctions.ContainsOne(funcName) { + d.regexFuncsDetected.Add(funcName) + } + + if unsupportedAggFunctions.ContainsOne(funcName) { + d.aggFuncsDetected.Add(funcName) + } + if unsupportedLargeObjectFunctions.ContainsOne(funcName) { + d.loFuncsDetected.Add(funcName) + } + + return nil +} + +func (d *FuncCallDetector) GetIssues() []QueryIssue { + var issues []QueryIssue + if d.advisoryLocksFuncsDetected.Cardinality() > 0 { + issues = append(issues, NewAdvisoryLocksIssue(DML_QUERY_OBJECT_TYPE, "", d.query)) + } + if d.xmlFuncsDetected.Cardinality() > 0 { + issues = append(issues, NewXmlFunctionsIssue(DML_QUERY_OBJECT_TYPE, "", d.query)) + } + if d.aggFuncsDetected.Cardinality() > 0 { + issues = append(issues, NewAggregationFunctionIssue(DML_QUERY_OBJECT_TYPE, "", d.query, d.aggFuncsDetected.ToSlice())) + } + if d.regexFuncsDetected.Cardinality() > 0 { + issues = append(issues, NewRegexFunctionsIssue(DML_QUERY_OBJECT_TYPE, "", d.query)) + } + if d.loFuncsDetected.Cardinality() > 0 { + issues = append(issues, NewLOFuntionsIssue(DML_QUERY_OBJECT_TYPE, "", d.query, d.loFuncsDetected.ToSlice())) + } + return issues +} + +type ColumnRefDetector struct { + query string + unsupportedSystemColumnsDetected mapset.Set[string] +} + +func NewColumnRefDetector(query string) *ColumnRefDetector { + return &ColumnRefDetector{ + query: query, + unsupportedSystemColumnsDetected: mapset.NewThreadUnsafeSet[string](), + } +} + +// Detect checks if a ColumnRef node uses an unsupported system column +func (d *ColumnRefDetector) Detect(msg protoreflect.Message) error { + if queryparser.GetMsgFullName(msg) != queryparser.PG_QUERY_COLUMNREF_NODE { + return nil + } + + _, colName := queryparser.GetColNameFromColumnRef(msg) + log.Debugf("fetched column name from %s node: %q", queryparser.PG_QUERY_COLUMNREF_NODE, colName) + + if unsupportedSysCols.ContainsOne(colName) { + d.unsupportedSystemColumnsDetected.Add(colName) + } + return nil +} + +func (d *ColumnRefDetector) GetIssues() []QueryIssue { + var issues []QueryIssue + if d.unsupportedSystemColumnsDetected.Cardinality() > 0 { + issues = append(issues, NewSystemColumnsIssue(DML_QUERY_OBJECT_TYPE, "", d.query)) + } + return issues +} + +type XmlExprDetector struct { + query string + detected bool +} + +func NewXmlExprDetector(query string) *XmlExprDetector { + return &XmlExprDetector{ + query: query, + } +} + +// Detect checks if a XmlExpr node is present, means Xml type/functions are used +func (d *XmlExprDetector) Detect(msg protoreflect.Message) error { + if queryparser.GetMsgFullName(msg) == queryparser.PG_QUERY_XMLEXPR_NODE { + d.detected = true + } + return nil +} + +func (d *XmlExprDetector) GetIssues() []QueryIssue { + var issues []QueryIssue + if d.detected { + issues = append(issues, NewXmlFunctionsIssue(DML_QUERY_OBJECT_TYPE, "", d.query)) + } + return issues +} + +/* +RangeTableFunc node manages functions that produce tables, structuring output into rows and columns +for SQL queries. Example: XMLTABLE() + +ASSUMPTION: +- RangeTableFunc is used for representing XMLTABLE() only as of now +- Comments from Postgres code: + - RangeTableFunc - raw form of "table functions" such as XMLTABLE + - Note: JSON_TABLE is also a "table function", but it uses JsonTable node, + - not RangeTableFunc. + +- link: https://github.com/postgres/postgres/blob/ea792bfd93ab8ad4ef4e3d1a741b8595db143677/src/include/nodes/parsenodes.h#L651 +*/ +type RangeTableFuncDetector struct { + query string + detected bool +} + +func NewRangeTableFuncDetector(query string) *RangeTableFuncDetector { + return &RangeTableFuncDetector{ + query: query, + } +} + +// Detect checks if a RangeTableFunc node is present for a XMLTABLE() function +func (d *RangeTableFuncDetector) Detect(msg protoreflect.Message) error { + if queryparser.GetMsgFullName(msg) == queryparser.PG_QUERY_RANGETABLEFUNC_NODE { + if queryparser.IsXMLTable(msg) { + d.detected = true + } + } + return nil +} + +func (d *RangeTableFuncDetector) GetIssues() []QueryIssue { + var issues []QueryIssue + if d.detected { + issues = append(issues, NewXmlFunctionsIssue(DML_QUERY_OBJECT_TYPE, "", d.query)) + } + return issues +} + +type JsonbSubscriptingDetector struct { + query string + jsonbColumns []string + detected bool + jsonbFunctions []string +} + +func NewJsonbSubscriptingDetector(query string, jsonbColumns []string, jsonbFunctions []string) *JsonbSubscriptingDetector { + return &JsonbSubscriptingDetector{ + query: query, + jsonbColumns: jsonbColumns, + jsonbFunctions: jsonbFunctions, + } +} + +func (j *JsonbSubscriptingDetector) Detect(msg protoreflect.Message) error { + + if queryparser.GetMsgFullName(msg) != queryparser.PG_QUERY_A_INDIRECTION_NODE { + return nil + } + aIndirectionNode, ok := queryparser.GetAIndirectionNode(msg) + if !ok { + return nil + } + + /* + Indirection node is to determine if subscripting is happening in the query e.g. data['name'] - jsonb, numbers[1] - array type, and ('{"a": {"b": {"c": 1}}}'::jsonb)['a']['b']['c']; + Arg is the data on which subscripting is happening e.g data, numbers (columns) and constant data type casted to jsonb ('{"a": {"b": {"c": 1}}}'::jsonb) + Indices are the actual fields that are being accessed while subscripting or the index in case of array type e.g. name, 1, a, b etc. + So we are checking the arg is of jsonb type here + */ + arg := aIndirectionNode.GetArg() + if arg == nil { + return nil + } + /* + Caveats - + + Still with this approach we won't be able to cover all cases e.g. + + select ab_data['name'] from (select Data as ab_data from test_jsonb);`, + + parseTree - stmts:{stmt:{select_stmt:{target_list:{res_target:{val:{a_indirection:{arg:{column_ref:{fields:{string:{sval:"ab_data"}} location:9}} + indirection:{a_indices:{uidx:{a_const:{sval:{sval:"name"} location:17}}}}}} location:9}} from_clause:{range_subselect:{subquery:{select_stmt:{ + target_list:{res_target:{name:"ab_data" val:{column_ref:{fields:{string:{sval:"data"}} location:38}} location:38}} + from_clause:{range_var:{relname:"test_jsonb" inh:true relpersistence:"p" location:59}} limit_option:LIMIT_OPTION_DEFAULT op:SETOP_NONE}}}} + limit_option:LIMIT_OPTION_DEFAULT op:SETOP_NONE}} + */ + if queryparser.DoesNodeHandleJsonbData(arg, j.jsonbColumns, j.jsonbFunctions) { + j.detected = true + } + return nil +} + +func (j *JsonbSubscriptingDetector) GetIssues() []QueryIssue { + var issues []QueryIssue + if j.detected { + issues = append(issues, NewJsonbSubscriptingIssue(DML_QUERY_OBJECT_TYPE, "", j.query)) + } + return issues +} + +type SelectStmtDetector struct { + query string + limitOptionWithTiesDetected bool +} + +func NewSelectStmtDetector(query string) *SelectStmtDetector { + return &SelectStmtDetector{ + query: query, + } +} + +func (d *SelectStmtDetector) Detect(msg protoreflect.Message) error { + if queryparser.GetMsgFullName(msg) == queryparser.PG_QUERY_SELECTSTMT_NODE { + selectStmtNode, err := queryparser.ProtoAsSelectStmt(msg) + if err != nil { + return err + } + // checks if a SelectStmt node uses a FETCH clause with TIES + // https://www.postgresql.org/docs/13/sql-select.html#SQL-LIMIT + if selectStmtNode.LimitOption == queryparser.LIMIT_OPTION_WITH_TIES { + d.limitOptionWithTiesDetected = true + } + } + return nil +} + +func (d *SelectStmtDetector) GetIssues() []QueryIssue { + var issues []QueryIssue + if d.limitOptionWithTiesDetected { + issues = append(issues, NewFetchWithTiesIssue(DML_QUERY_OBJECT_TYPE, "", d.query)) + } + return issues +} + +type CopyCommandUnsupportedConstructsDetector struct { + query string + copyFromWhereConstructDetected bool + copyOnErrorConstructDetected bool +} + +func NewCopyCommandUnsupportedConstructsDetector(query string) *CopyCommandUnsupportedConstructsDetector { + return &CopyCommandUnsupportedConstructsDetector{ + query: query, + } +} + +// Detect if COPY command uses unsupported syntax i.e. COPY FROM ... WHERE and COPY... ON_ERROR +func (d *CopyCommandUnsupportedConstructsDetector) Detect(msg protoreflect.Message) error { + // Check if the message is a COPY statement + if msg.Descriptor().FullName() != queryparser.PG_QUERY_COPY_STMT_NODE { + return nil // Not a COPY statement, nothing to detect + } + + // Check for COPY FROM ... WHERE clause + fromField := queryparser.GetBoolField(msg, "is_from") + whereField := queryparser.GetMessageField(msg, "where_clause") + if fromField && whereField != nil { + d.copyFromWhereConstructDetected = true + } + + // Check for COPY ... ON_ERROR clause + defNames, err := queryparser.TraverseAndExtractDefNamesFromDefElem(msg) + if err != nil { + log.Errorf("error extracting defnames from COPY statement: %v", err) + } + if slices.Contains(defNames, "on_error") { + d.copyOnErrorConstructDetected = true + } + + return nil +} + +func (d *CopyCommandUnsupportedConstructsDetector) GetIssues() []QueryIssue { + var issues []QueryIssue + if d.copyFromWhereConstructDetected { + issues = append(issues, NewCopyFromWhereIssue(DML_QUERY_OBJECT_TYPE, "", d.query)) + } + if d.copyOnErrorConstructDetected { + issues = append(issues, NewCopyOnErrorIssue(DML_QUERY_OBJECT_TYPE, "", d.query)) + } + return issues +} + +type JsonConstructorFuncDetector struct { + query string + unsupportedJsonConstructorFunctionsDetected mapset.Set[string] +} + +func NewJsonConstructorFuncDetector(query string) *JsonConstructorFuncDetector { + return &JsonConstructorFuncDetector{ + query: query, + unsupportedJsonConstructorFunctionsDetected: mapset.NewThreadUnsafeSet[string](), + } +} + +func (j *JsonConstructorFuncDetector) Detect(msg protoreflect.Message) error { + switch queryparser.GetMsgFullName(msg) { + case queryparser.PG_QUERY_JSON_ARRAY_AGG_NODE: + j.unsupportedJsonConstructorFunctionsDetected.Add(JSON_ARRAYAGG) + case queryparser.PG_QUERY_JSON_ARRAY_CONSTRUCTOR_AGG_NODE: + j.unsupportedJsonConstructorFunctionsDetected.Add(JSON_ARRAY) + case queryparser.PG_QUERY_JSON_OBJECT_AGG_NODE: + j.unsupportedJsonConstructorFunctionsDetected.Add(JSON_OBJECTAGG) + case queryparser.PG_QUERY_JSON_OBJECT_CONSTRUCTOR_NODE: + j.unsupportedJsonConstructorFunctionsDetected.Add(JSON_OBJECT) + } + return nil +} + +func (d *JsonConstructorFuncDetector) GetIssues() []QueryIssue { + var issues []QueryIssue + if d.unsupportedJsonConstructorFunctionsDetected.Cardinality() > 0 { + issues = append(issues, NewJsonConstructorFunctionIssue(DML_QUERY_OBJECT_TYPE, "", d.query, d.unsupportedJsonConstructorFunctionsDetected.ToSlice())) + } + return issues +} + +type JsonQueryFunctionDetector struct { + query string + unsupportedJsonQueryFunctionsDetected mapset.Set[string] +} + +func NewJsonQueryFunctionDetector(query string) *JsonQueryFunctionDetector { + return &JsonQueryFunctionDetector{ + query: query, + unsupportedJsonQueryFunctionsDetected: mapset.NewThreadUnsafeSet[string](), + } +} + +func (j *JsonQueryFunctionDetector) Detect(msg protoreflect.Message) error { + if queryparser.GetMsgFullName(msg) == queryparser.PG_QUERY_JSON_TABLE_NODE { + /* + SELECT * FROM json_table( + '[{"a":10,"b":20},{"a":30,"b":40}]'::jsonb, + '$[*]' + COLUMNS ( + column_a int4 path '$.a', + column_b int4 path '$.b' + ) + ); + stmts:{stmt:{select_stmt:{target_list:{res_target:{val:{column_ref:{fields:{a_star:{}} location:530}} location:530}} + from_clause:{json_table:{context_item:{raw_expr:{type_cast:{arg:{a_const:{sval:{sval:"[{\"a\":10,\"b\":20},{\"a\":30,\"b\":40}]"} + location:553}} type_name:{names:{string:{sval:"jsonb"}} ..... name_location:-1 location:601} + columns:{json_table_column:{coltype:JTC_REGULAR name:"column_a" type_name:{names:{string:{sval:"int4"}} typemod:-1 location:639} + pathspec:{string:{a_const:{sval:{sval:"$.a"} location:649}} name_location:-1 location:649} ... + */ + j.unsupportedJsonQueryFunctionsDetected.Add(JSON_TABLE) + return nil + } + if queryparser.GetMsgFullName(msg) != queryparser.PG_QUERY_JSON_FUNC_EXPR_NODE { + return nil + } + /* + JsonExprOp - + enumeration of SQL/JSON query function types + typedef enum JsonExprOp + { + 1. JSON_EXISTS_OP, JSON_EXISTS() + 2. JSON_QUERY_OP, JSON_QUERY() + 3. JSON_VALUE_OP, JSON_VALUE() + 4. JSON_TABLE_OP, JSON_TABLE() + } JsonExprOp; + */ + jsonExprFuncOpNum := queryparser.GetEnumNumField(msg, "op") + switch jsonExprFuncOpNum { + case 1: + j.unsupportedJsonQueryFunctionsDetected.Add(JSON_EXISTS) + case 2: + j.unsupportedJsonQueryFunctionsDetected.Add(JSON_QUERY) + case 3: + j.unsupportedJsonQueryFunctionsDetected.Add(JSON_VALUE) + case 4: + j.unsupportedJsonQueryFunctionsDetected.Add(JSON_TABLE) + } + return nil +} + +func (d *JsonQueryFunctionDetector) GetIssues() []QueryIssue { + var issues []QueryIssue + if d.unsupportedJsonQueryFunctionsDetected.Cardinality() > 0 { + issues = append(issues, NewJsonQueryFunctionIssue(DML_QUERY_OBJECT_TYPE, "", d.query, d.unsupportedJsonQueryFunctionsDetected.ToSlice())) + } + return issues +} + +type MergeStatementDetector struct { + query string + isMergeStatementDetected bool +} + +func NewMergeStatementDetector(query string) *MergeStatementDetector { + return &MergeStatementDetector{ + query: query, + } +} + +func (m *MergeStatementDetector) Detect(msg protoreflect.Message) error { + if queryparser.GetMsgFullName(msg) == queryparser.PG_QUERY_MERGE_STMT_NODE { + m.isMergeStatementDetected = true + } + return nil + +} + +func (m *MergeStatementDetector) GetIssues() []QueryIssue { + var issues []QueryIssue + if m.isMergeStatementDetected { + issues = append(issues, NewMergeStatementIssue(DML_QUERY_OBJECT_TYPE, "", m.query)) + } + return issues +} + +type UniqueNullsNotDistinctDetector struct { + query string + detected bool +} + +func NewUniqueNullsNotDistinctDetector(query string) *UniqueNullsNotDistinctDetector { + return &UniqueNullsNotDistinctDetector{ + query: query, + } +} + +// Detect checks if a unique constraint is defined which has nulls not distinct +func (d *UniqueNullsNotDistinctDetector) Detect(msg protoreflect.Message) error { + if queryparser.GetMsgFullName(msg) == queryparser.PG_QUERY_INDEX_STMT_NODE { + indexStmt, err := queryparser.ProtoAsIndexStmt(msg) + if err != nil { + return err + } + + if indexStmt.Unique && indexStmt.NullsNotDistinct { + d.detected = true + } + } else if queryparser.GetMsgFullName(msg) == queryparser.PG_QUERY_CONSTRAINT_NODE { + constraintNode, err := queryparser.ProtoAsTableConstraint(msg) + if err != nil { + return err + } + + if constraintNode.Contype == queryparser.UNIQUE_CONSTR_TYPE && constraintNode.NullsNotDistinct { + d.detected = true + } + } + + return nil +} + +func (d *UniqueNullsNotDistinctDetector) GetIssues() []QueryIssue { + var issues []QueryIssue + if d.detected { + issues = append(issues, NewUniqueNullsNotDistinctIssue(DML_QUERY_OBJECT_TYPE, "", d.query)) + } + return issues +} + +type JsonPredicateExprDetector struct { + query string + detected bool +} + +func NewJsonPredicateExprDetector(query string) *JsonPredicateExprDetector { + return &JsonPredicateExprDetector{ + query: query, + } +} +func (j *JsonPredicateExprDetector) Detect(msg protoreflect.Message) error { + if queryparser.GetMsgFullName(msg) == queryparser.PG_QUERY_JSON_IS_PREDICATE_NODE { + /* + SELECT js IS JSON "json?" FROM (VALUES ('123')) foo(js); + stmts:{stmt:{select_stmt:{target_list:{res_target:{val:{column_ref:{fields:{string:{sval:"js"}} location:337}} location:337}} + target_list:{res_target:{name:"json?" val:{json_is_predicate:{expr:{column_ref:{fields:{string:{sval:"js"}} location:341}} + format:{format_type:JS_FORMAT_DEFAULT encoding:JS_ENC_DEFAULT location:-1} item_type:JS_TYPE_ANY location:341}} location:341}} ... + */ + j.detected = true + } + return nil +} + +func (j *JsonPredicateExprDetector) GetIssues() []QueryIssue { + var issues []QueryIssue + if j.detected { + issues = append(issues, NewJsonPredicateIssue(DML_QUERY_OBJECT_TYPE, "", j.query)) + } + return issues +} diff --git a/yb-voyager/src/query/queryissue/detectors_ddl.go b/yb-voyager/src/query/queryissue/detectors_ddl.go new file mode 100644 index 0000000000..a5259aaeb6 --- /dev/null +++ b/yb-voyager/src/query/queryissue/detectors_ddl.go @@ -0,0 +1,680 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queryissue + +import ( + "fmt" + "slices" + + "github.com/samber/lo" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/query/queryparser" + "github.com/yugabyte/yb-voyager/yb-voyager/src/srcdb" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" +) + +// DDLIssueDetector interface defines methods for detecting issues in DDL objects +type DDLIssueDetector interface { + DetectIssues(queryparser.DDLObject) ([]QueryIssue, error) +} + +//=============TABLE ISSUE DETECTOR =========================== + +// TableIssueDetector handles detection of table-related issues +type TableIssueDetector struct { + ParserIssueDetector +} + +func (d *TableIssueDetector) DetectIssues(obj queryparser.DDLObject) ([]QueryIssue, error) { + table, ok := obj.(*queryparser.Table) + if !ok { + return nil, fmt.Errorf("invalid object type: expected Table") + } + + var issues []QueryIssue + + // Check for generated columns + if len(table.GeneratedColumns) > 0 { + issues = append(issues, NewGeneratedColumnsIssue( + obj.GetObjectType(), + table.GetObjectName(), + "", // query string + table.GeneratedColumns, + )) + } + + // Check for unlogged table + if table.IsUnlogged { + issues = append(issues, NewUnloggedTableIssue( + obj.GetObjectType(), + table.GetObjectName(), + "", // query string + )) + } + + if table.IsInherited { + issues = append(issues, NewInheritanceIssue( + obj.GetObjectType(), + table.GetObjectName(), + "", + )) + } + + if len(table.Constraints) > 0 { + + for _, c := range table.Constraints { + if c.ConstraintType == queryparser.EXCLUSION_CONSTR_TYPE { + issues = append(issues, NewExclusionConstraintIssue( + obj.GetObjectType(), + table.GetObjectName(), + "", + c.ConstraintName, + )) + } + + if c.ConstraintType != queryparser.FOREIGN_CONSTR_TYPE && c.IsDeferrable { + issues = append(issues, NewDeferrableConstraintIssue( + obj.GetObjectType(), + table.GetObjectName(), + "", + c.ConstraintName, + )) + } + + if c.ConstraintType == queryparser.FOREIGN_CONSTR_TYPE && d.partitionTablesMap[c.ReferencedTable] { + issues = append(issues, NewForeignKeyReferencesPartitionedTableIssue( + TABLE_OBJECT_TYPE, + table.GetObjectName(), + "", + c.ConstraintName, + )) + } + + if c.IsPrimaryKeyORUniqueConstraint() { + for _, col := range c.Columns { + unsupportedColumnsForTable, ok := d.columnsWithUnsupportedIndexDatatypes[table.GetObjectName()] + if !ok { + break + } + + typeName, ok := unsupportedColumnsForTable[col] + if !ok { + continue + } + issues = append(issues, NewPrimaryOrUniqueConsOnUnsupportedIndexTypesIssue( + obj.GetObjectType(), + table.GetObjectName(), + "", + typeName, + c.ConstraintName, + )) + } + } + } + } + for _, col := range table.Columns { + liveUnsupportedDatatypes := srcdb.GetPGLiveMigrationUnsupportedDatatypes() + liveWithFfOrFbUnsupportedDatatypes := srcdb.GetPGLiveMigrationWithFFOrFBUnsupportedDatatypes() + + isUnsupportedDatatype := utils.ContainsAnyStringFromSlice(srcdb.PostgresUnsupportedDataTypes, col.TypeName) + isUnsupportedDatatypeInLive := utils.ContainsAnyStringFromSlice(liveUnsupportedDatatypes, col.TypeName) + + isUnsupportedDatatypeInLiveWithFFOrFBList := utils.ContainsAnyStringFromSlice(liveWithFfOrFbUnsupportedDatatypes, col.TypeName) + isUDTDatatype := utils.ContainsAnyStringFromSlice(d.compositeTypes, col.GetFullTypeName()) //if type is array + isEnumDatatype := utils.ContainsAnyStringFromSlice(d.enumTypes, col.GetFullTypeName()) //is ENUM type + isArrayOfEnumsDatatype := col.IsArrayType && isEnumDatatype + isUnsupportedDatatypeInLiveWithFFOrFB := isUnsupportedDatatypeInLiveWithFFOrFBList || isUDTDatatype || isArrayOfEnumsDatatype + + if isUnsupportedDatatype { + reportUnsupportedDatatypes(col, obj.GetObjectType(), table.GetObjectName(), &issues) + } else if isUnsupportedDatatypeInLive { + issues = append(issues, NewUnsupportedDatatypesForLMIssue( + obj.GetObjectType(), + table.GetObjectName(), + "", + col.TypeName, + col.ColumnName, + )) + } else if isUnsupportedDatatypeInLiveWithFFOrFB { + //reporting only for TABLE Type as we don't deal with FOREIGN TABLE in live migration + reportTypeName := col.GetFullTypeName() + if col.IsArrayType { // For Array cases to make it clear in issue + reportTypeName = fmt.Sprintf("%s[]", reportTypeName) + } + issues = append(issues, NewUnsupportedDatatypesForLMWithFFOrFBIssue( + obj.GetObjectType(), + table.GetObjectName(), + "", + reportTypeName, + col.ColumnName, + )) + } + } + + if table.IsPartitioned { + + /* + 1. Adding PK to Partitioned Table (in cases where ALTER is before create) + 2. Expression partitions are not allowed if PK/UNIQUE columns are there is table + 3. List partition strategy is not allowed with multi-column partitions. + 4. Partition columns should all be included in Primary key set if any on table. + */ + alterAddPk := d.primaryConsInAlter[table.GetObjectName()] + if alterAddPk != nil { + issues = append(issues, NewAlterTableAddPKOnPartiionIssue( + obj.GetObjectType(), + table.GetObjectName(), + alterAddPk.Query, + )) + } + primaryKeyColumns := table.PrimaryKeyColumns() + uniqueKeyColumns := table.UniqueKeyColumns() + + if table.IsExpressionPartition && (len(primaryKeyColumns) > 0 || len(uniqueKeyColumns) > 0) { + issues = append(issues, NewExpressionPartitionIssue( + obj.GetObjectType(), + table.GetObjectName(), + "", + )) + } + + if table.PartitionStrategy == queryparser.LIST_PARTITION && + len(table.PartitionColumns) > 1 { + issues = append(issues, NewMultiColumnListPartition( + obj.GetObjectType(), + table.GetObjectName(), + "", + )) + } + partitionColumnsNotInPK, _ := lo.Difference(table.PartitionColumns, primaryKeyColumns) + if len(primaryKeyColumns) > 0 && len(partitionColumnsNotInPK) > 0 { + issues = append(issues, NewInsufficientColumnInPKForPartition( + obj.GetObjectType(), + table.GetObjectName(), + "", + partitionColumnsNotInPK, + )) + } + } + + return issues, nil +} + +func reportUnsupportedDatatypes(col queryparser.TableColumn, objType string, objName string, issues *[]QueryIssue) { + switch col.TypeName { + case "xml": + *issues = append(*issues, NewXMLDatatypeIssue( + objType, + objName, + "", + col.ColumnName, + )) + case "xid": + *issues = append(*issues, NewXIDDatatypeIssue( + objType, + objName, + "", + col.ColumnName, + )) + case "geometry", "geography", "box2d", "box3d", "topogeometry": + *issues = append(*issues, NewPostGisDatatypeIssue( + objType, + objName, + "", + col.TypeName, + col.ColumnName, + )) + case "lo": + *issues = append(*issues, NewLODatatypeIssue( + objType, + objName, + "", + col.ColumnName, + )) + case "int8multirange", "int4multirange", "datemultirange", "nummultirange", "tsmultirange", "tstzmultirange": + *issues = append(*issues, NewMultiRangeDatatypeIssue( + objType, + objName, + "", + col.TypeName, + col.ColumnName, + )) + default: + *issues = append(*issues, NewUnsupportedDatatypesIssue( + objType, + objName, + "", + col.TypeName, + col.ColumnName, + )) + } +} + +//=============FOREIGN TABLE ISSUE DETECTOR =========================== + +//ForeignTableIssueDetector handles detection Foreign table issues + +type ForeignTableIssueDetector struct{} + +func (f *ForeignTableIssueDetector) DetectIssues(obj queryparser.DDLObject) ([]QueryIssue, error) { + foreignTable, ok := obj.(*queryparser.ForeignTable) + if !ok { + return nil, fmt.Errorf("invalid object type: expected Foreign Table") + } + issues := make([]QueryIssue, 0) + + issues = append(issues, NewForeignTableIssue( + obj.GetObjectType(), + foreignTable.GetObjectName(), + "", + foreignTable.ServerName, + )) + + for _, col := range foreignTable.Columns { + isUnsupportedDatatype := utils.ContainsAnyStringFromSlice(srcdb.PostgresUnsupportedDataTypes, col.TypeName) + if isUnsupportedDatatype { + reportUnsupportedDatatypes(col, obj.GetObjectType(), foreignTable.GetObjectName(), &issues) + } + } + + return issues, nil + +} + +//=============INDEX ISSUE DETECTOR =========================== + +// IndexIssueDetector handles detection of index-related issues +type IndexIssueDetector struct { + ParserIssueDetector +} + +func (d *IndexIssueDetector) DetectIssues(obj queryparser.DDLObject) ([]QueryIssue, error) { + index, ok := obj.(*queryparser.Index) + if !ok { + return nil, fmt.Errorf("invalid object type: expected Index") + } + + var issues []QueryIssue + + // Check for unsupported index methods + if slices.Contains(UnsupportedIndexMethods, index.AccessMethod) { + issues = append(issues, NewUnsupportedIndexMethodIssue( + obj.GetObjectType(), + index.GetObjectName(), + "", // query string + index.AccessMethod, + )) + } + + // Check for storage parameters + if index.NumStorageOptions > 0 { + issues = append(issues, NewStorageParameterIssue( + obj.GetObjectType(), + index.GetObjectName(), + "", // query string + )) + } + + //GinVariations + if index.AccessMethod == GIN_ACCESS_METHOD { + if len(index.Params) > 1 { + issues = append(issues, NewMultiColumnGinIndexIssue( + obj.GetObjectType(), + index.GetObjectName(), + "", + )) + } else { + //In case only one Param is there + param := index.Params[0] + if param.SortByOrder != queryparser.DEFAULT_SORTING_ORDER { + issues = append(issues, NewOrderedGinIndexIssue( + obj.GetObjectType(), + index.GetObjectName(), + "", + )) + } + } + } + + //Index on complex datatypes + /* + cases covered + 1. normal index on column with these types + 2. expression index with casting of unsupported column to supported types [No handling as such just to test as colName will not be there] + 3. expression index with casting to unsupported types + 4. normal index on column with UDTs + 5. these type of indexes on different access method like gin etc.. [TODO to explore more, for now not reporting the indexes on anyother access method than btree] + */ + _, ok = d.columnsWithUnsupportedIndexDatatypes[index.GetTableName()] + if ok && index.AccessMethod == BTREE_ACCESS_METHOD { // Right now not reporting any other access method issues with such types. + for _, param := range index.Params { + if param.IsExpression { + isUnsupportedType := slices.Contains(UnsupportedIndexDatatypes, param.ExprCastTypeName) + isUDTType := slices.Contains(d.compositeTypes, param.GetFullExprCastTypeName()) + if param.IsExprCastArrayType { + issues = append(issues, NewIndexOnComplexDatatypesIssue( + obj.GetObjectType(), + index.GetObjectName(), + "", + "array", + )) + } else if isUnsupportedType || isUDTType { + reportTypeName := param.ExprCastTypeName + if isUDTType { + reportTypeName = "user_defined_type" + } + issues = append(issues, NewIndexOnComplexDatatypesIssue( + obj.GetObjectType(), + index.GetObjectName(), + "", + reportTypeName, + )) + } + + } else { + colName := param.ColName + typeName, ok := d.columnsWithUnsupportedIndexDatatypes[index.GetTableName()][colName] + if !ok { + continue + } + issues = append(issues, NewIndexOnComplexDatatypesIssue( + obj.GetObjectType(), + index.GetObjectName(), + "", + typeName, + )) + } + } + } + + return issues, nil +} + +//=============ALTER TABLE ISSUE DETECTOR =========================== + +// AlterTableIssueDetector handles detection of alter table-related issues +type AlterTableIssueDetector struct { + ParserIssueDetector +} + +func (aid *AlterTableIssueDetector) DetectIssues(obj queryparser.DDLObject) ([]QueryIssue, error) { + alter, ok := obj.(*queryparser.AlterTable) + if !ok { + return nil, fmt.Errorf("invalid object type: expected AlterTable") + } + + var issues []QueryIssue + + switch alter.AlterType { + case queryparser.SET_OPTIONS: + if alter.NumSetAttributes > 0 { + issues = append(issues, NewSetColumnAttributeIssue( + obj.GetObjectType(), + alter.GetObjectName(), + "", // query string + )) + } + case queryparser.ADD_CONSTRAINT: + if alter.NumStorageOptions > 0 { + issues = append(issues, NewStorageParameterIssue( + obj.GetObjectType(), + alter.GetObjectName(), + "", // query string + )) + } + if alter.ConstraintType == queryparser.EXCLUSION_CONSTR_TYPE { + issues = append(issues, NewExclusionConstraintIssue( + obj.GetObjectType(), + alter.GetObjectName(), + "", + alter.ConstraintName, + )) + } + if alter.ConstraintType != queryparser.FOREIGN_CONSTR_TYPE && alter.IsDeferrable { + issues = append(issues, NewDeferrableConstraintIssue( + obj.GetObjectType(), + alter.GetObjectName(), + "", + alter.ConstraintName, + )) + } + + if alter.ConstraintType == queryparser.FOREIGN_CONSTR_TYPE && + aid.partitionTablesMap[alter.ConstraintReferencedTable] { + //FK constraint references partitioned table + issues = append(issues, NewForeignKeyReferencesPartitionedTableIssue( + TABLE_OBJECT_TYPE, + alter.GetObjectName(), + "", + alter.ConstraintName, + )) + } + + if alter.ConstraintType == queryparser.PRIMARY_CONSTR_TYPE && + aid.partitionTablesMap[alter.GetObjectName()] { + issues = append(issues, NewAlterTableAddPKOnPartiionIssue( + obj.GetObjectType(), + alter.GetObjectName(), + "", + )) + } + + if alter.AddPrimaryKeyOrUniqueCons() { + for _, col := range alter.ConstraintColumns { + unsupportedColumnsForTable, ok := aid.columnsWithUnsupportedIndexDatatypes[alter.GetObjectName()] + if !ok { + break + } + + typeName, ok := unsupportedColumnsForTable[col] + if !ok { + continue + } + issues = append(issues, NewPrimaryOrUniqueConsOnUnsupportedIndexTypesIssue( + obj.GetObjectType(), + alter.GetObjectName(), + "", + typeName, + alter.ConstraintName, + )) + } + + } + case queryparser.DISABLE_RULE: + issues = append(issues, NewAlterTableDisableRuleIssue( + obj.GetObjectType(), + alter.GetObjectName(), + "", // query string + alter.RuleName, + )) + case queryparser.CLUSTER_ON: + issues = append(issues, NewClusterONIssue( + obj.GetObjectType(), + alter.GetObjectName(), + "", // query string + )) + } + + return issues, nil +} + +//=============POLICY ISSUE DETECTOR =========================== + +// PolicyIssueDetector handles detection of Create policy issues +type PolicyIssueDetector struct{} + +func (p *PolicyIssueDetector) DetectIssues(obj queryparser.DDLObject) ([]QueryIssue, error) { + policy, ok := obj.(*queryparser.Policy) + if !ok { + return nil, fmt.Errorf("invalid object type: expected Policy") + } + issues := make([]QueryIssue, 0) + if len(policy.RoleNames) > 0 { + issues = append(issues, NewPolicyRoleIssue( + obj.GetObjectType(), + policy.GetObjectName(), + "", + policy.RoleNames, + )) + } + return issues, nil +} + +//=============TRIGGER ISSUE DETECTOR =========================== + +// TriggerIssueDetector handles detection of Create Trigger issues +type TriggerIssueDetector struct { + ParserIssueDetector +} + +func (tid *TriggerIssueDetector) DetectIssues(obj queryparser.DDLObject) ([]QueryIssue, error) { + trigger, ok := obj.(*queryparser.Trigger) + if !ok { + return nil, fmt.Errorf("invalid object type: expected Trigger") + } + issues := make([]QueryIssue, 0) + + if trigger.IsConstraint { + issues = append(issues, NewConstraintTriggerIssue( + obj.GetObjectType(), + trigger.GetObjectName(), + "", + )) + } + + if trigger.NumTransitionRelations > 0 { + issues = append(issues, NewReferencingClauseTrigIssue( + obj.GetObjectType(), + trigger.GetObjectName(), + "", + )) + } + + if trigger.IsBeforeRowTrigger() && tid.partitionTablesMap[trigger.GetTableName()] { + issues = append(issues, NewBeforeRowOnPartitionTableIssue( + obj.GetObjectType(), + trigger.GetObjectName(), + "", + )) + } + + if unsupportedLargeObjectFunctions.ContainsOne(trigger.FuncName) { + //Can't detect trigger func name using the genericIssues's FuncCallDetector + //as trigger execute Func name is not a FuncCall node, its []pg_query.Node + issues = append(issues, NewLOFuntionsIssue( + obj.GetObjectType(), + trigger.GetObjectName(), + "", + []string{trigger.FuncName}, + )) + } + + return issues, nil +} + +// ==============VIEW ISSUE DETECTOR ====================== + +type ViewIssueDetector struct{} + +func (v *ViewIssueDetector) DetectIssues(obj queryparser.DDLObject) ([]QueryIssue, error) { + view, ok := obj.(*queryparser.View) + if !ok { + return nil, fmt.Errorf("invalid object type: expected View") + } + var issues []QueryIssue + + if view.SecurityInvoker { + issues = append(issues, NewSecurityInvokerViewIssue(obj.GetObjectType(), obj.GetObjectName(), "")) + } + return issues, nil +} + +// ==============MVIEW ISSUE DETECTOR ====================== + +type MViewIssueDetector struct{} + +func (v *MViewIssueDetector) DetectIssues(obj queryparser.DDLObject) ([]QueryIssue, error) { + return nil, nil +} + +//===============COLLATION ISSUE DETECTOR ==================== + +type CollationIssueDetector struct{} + +func (c *CollationIssueDetector) DetectIssues(obj queryparser.DDLObject) ([]QueryIssue, error) { + collation, ok := obj.(*queryparser.Collation) + if !ok { + return nil, fmt.Errorf("invalid object type: expected Collation") + } + issues := make([]QueryIssue, 0) + if slices.Contains(collation.Options, "deterministic") { + // deterministic attribute is itself not supported in YB either true or false so checking only whether option is present or not + issues = append(issues, NewDeterministicOptionCollationIssue( + collation.GetObjectType(), + collation.GetObjectName(), + "", + )) + } + return issues, nil +} + +//=============NO-OP ISSUE DETECTOR =========================== + +// Need to handle all the cases for which we don't have any issues detector +type NoOpIssueDetector struct{} + +func (n *NoOpIssueDetector) DetectIssues(obj queryparser.DDLObject) ([]QueryIssue, error) { + return nil, nil +} + +func (p *ParserIssueDetector) GetDDLDetector(obj queryparser.DDLObject) (DDLIssueDetector, error) { + switch obj.(type) { + case *queryparser.Table: + return &TableIssueDetector{ + ParserIssueDetector: *p, + }, nil + case *queryparser.Index: + return &IndexIssueDetector{ + ParserIssueDetector: *p, + }, nil + case *queryparser.AlterTable: + return &AlterTableIssueDetector{ + ParserIssueDetector: *p, + }, nil + case *queryparser.Policy: + return &PolicyIssueDetector{}, nil + case *queryparser.Trigger: + return &TriggerIssueDetector{ + ParserIssueDetector: *p, + }, nil + case *queryparser.ForeignTable: + return &ForeignTableIssueDetector{}, nil + case *queryparser.View: + return &ViewIssueDetector{}, nil + case *queryparser.MView: + return &MViewIssueDetector{}, nil + case *queryparser.Collation: + return &CollationIssueDetector{}, nil + default: + return &NoOpIssueDetector{}, nil + } +} + +const ( + GIN_ACCESS_METHOD = "gin" + BTREE_ACCESS_METHOD = "btree" +) diff --git a/yb-voyager/src/query/queryissue/detectors_test.go b/yb-voyager/src/query/queryissue/detectors_test.go new file mode 100644 index 0000000000..35db47c70f --- /dev/null +++ b/yb-voyager/src/query/queryissue/detectors_test.go @@ -0,0 +1,773 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package queryissue + +import ( + "fmt" + "testing" + + mapset "github.com/deckarep/golang-set/v2" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/query/queryparser" +) + +func getDetectorIssues(t *testing.T, detector UnsupportedConstructDetector, sql string) []QueryIssue { + parseResult, err := queryparser.Parse(sql) + assert.NoError(t, err, "Failed to parse SQL: %s", sql) + + visited := make(map[protoreflect.Message]bool) + + processor := func(msg protoreflect.Message) error { + err := detector.Detect(msg) + if err != nil { + return err + } + return nil + } + + parseTreeMsg := queryparser.GetProtoMessageFromParseTree(parseResult) + err = queryparser.TraverseParseTree(parseTreeMsg, visited, processor) + assert.NoError(t, err) + return detector.GetIssues() +} + +func TestFuncCallDetector(t *testing.T) { + advisoryLockSqls := []string{ + `SELECT pg_advisory_lock(100), COUNT(*) FROM cars;`, + `SELECT pg_advisory_lock_shared(100), COUNT(*) FROM cars;`, + `SELECT pg_advisory_unlock_shared(100);`, + `SELECT * FROM (SELECT pg_advisory_xact_lock(200)) AS lock_acquired;`, + `SELECT * FROM (SELECT pg_advisory_xact_lock_shared(200)) AS lock_acquired;`, + `SELECT id, first_name FROM employees WHERE pg_try_advisory_lock(300) IS TRUE;`, + `SELECT id, first_name FROM employees WHERE salary > 400 AND EXISTS (SELECT 1 FROM pg_advisory_lock(500));`, + `SELECT id, first_name FROM employees WHERE pg_try_advisory_lock(600) IS TRUE AND salary > 700;`, + `SELECT pg_try_advisory_lock_shared(1234, 100);`, + `SELECT pg_try_advisory_xact_lock_shared(1,2);`, + `WITH lock_cte AS ( + SELECT pg_advisory_lock(1000) AS lock_acquired + ) + SELECT e.id, e.name + FROM employees e + JOIN lock_cte ON TRUE + WHERE e.department = 'Engineering';`, + `SELECT e.id, e.name + FROM employees e + WHERE EXISTS ( + SELECT 1 + FROM projects p + WHERE p.manager_id = e.id + AND pg_try_advisory_lock_shared(p.project_id) + );`, + `SELECT e.id, + CASE + WHEN e.salary > 100000 THEN pg_advisory_lock(e.id) + ELSE pg_advisory_unlock(e.id) + END AS lock_status + FROM employees e;`, + `SELECT e.id, l.lock_status + FROM employees e + JOIN LATERAL ( + SELECT pg_try_advisory_lock(e.id) AS lock_status + ) l ON TRUE + WHERE e.status = 'active';`, + `WITH lock_cte AS ( + SELECT 1 + ) + SELECT e.id, e.name, pg_try_advisory_lock(600) + FROM employees e + JOIN lock_cte ON TRUE + WHERE pg_advisory_unlock(500) = TRUE;`, + `SELECT pg_advisory_unlock_all();`, + } + + loFunctionSqls := []string{ + `UPDATE documents +SET content_oid = lo_import('/path/to/new/file.pdf') +WHERE title = 'Sample Document';`, + `INSERT INTO documents (title, content_oid) +VALUES ('Sample Document', lo_import('/path/to/your/file.pdf'));`, + `SELECT lo_export(content_oid, '/path/to/exported_design_document.pdf') +FROM documents +WHERE title = 'Design Document';`, + `SELECT lo_create('32142');`, + `SELECT lo_unlink(loid);`, + `SELECT lo_unlink((SELECT content_oid FROM documents WHERE title = 'Sample Document'));`, + `create table test_lo_default (id int, raster lo DEFAULT lo_import('3242'));`, + } + for _, sql := range advisoryLockSqls { + + issues := getDetectorIssues(t, NewFuncCallDetector(sql), sql) + assert.Equal(t, 1, len(issues), "Expected 1 issue for SQL: %s", sql) + assert.Equal(t, ADVISORY_LOCKS, issues[0].Type, "Expected Advisory Locks issue for SQL: %s", sql) + } + + for _, sql := range loFunctionSqls { + issues := getDetectorIssues(t, NewFuncCallDetector(sql), sql) + assert.Equal(t, len(issues), 1) + assert.Equal(t, issues[0].Type, LARGE_OBJECT_FUNCTIONS, "Large Objects not detected in SQL: %s", sql) + + } + +} + +func TestColumnRefDetector(t *testing.T) { + systemColumnSqls := []string{ + `SELECT xmin, xmax FROM employees;`, + `SELECT * FROM (SELECT * FROM employees WHERE xmin = 100) AS version_info;`, + `SELECT * FROM (SELECT xmin, xmax FROM employees) AS version_info;`, + `SELECT * FROM employees WHERE xmin = 200;`, + `SELECT * FROM employees WHERE 1 = 1 AND xmax = 300;`, + `SELECT cmin + FROM employees;`, + `SELECT cmax + FROM employees;`, + `SELECT ctid, tableoid, xmin, xmax, cmin, cmax + FROM employees;`, + `WITH versioned_employees AS ( + SELECT *, xmin, xmax + FROM employees + ) + SELECT ve1.id, ve2.id + FROM versioned_employees ve1 + JOIN versioned_employees ve2 ON ve1.xmin = ve2.xmax + WHERE ve1.id <> ve2.id;`, + `SELECT e.id, e.name, + ROW_NUMBER() OVER (ORDER BY e.ctid) AS row_num + FROM employees e;`, + `SELECT * + FROM employees e + WHERE e.xmax = ( + SELECT MAX(xmax) + FROM employees + WHERE department = e.department + );`, + `UPDATE employees + SET salary = salary * 1.05 + WHERE department = 'Sales' + RETURNING id, xmax;`, + `SELECT xmin, COUNT(*) + FROM employees + GROUP BY xmin + HAVING COUNT(*) > 1;`, + } + + for _, sql := range systemColumnSqls { + issues := getDetectorIssues(t, NewColumnRefDetector(sql), sql) + + assert.Equal(t, 1, len(issues), "Expected 1 issue for SQL: %s", sql) + assert.Equal(t, SYSTEM_COLUMNS, issues[0].Type, "Expected System Columns issue for SQL: %s", sql) + } +} + +func TestRangeTableFuncDetector(t *testing.T) { + xmlTableSqls := []string{ + // Test Case 1: Simple XMLTABLE usage with basic columns + `SELECT + p.id, + x.product_id, + x.product_name, + x.price + FROM + products_basic p, + XMLTABLE( + '//Product' + PASSING p.data + COLUMNS + product_id TEXT PATH 'ID', + product_name TEXT PATH 'Name', + price NUMERIC PATH 'Price' + ) AS x;`, + + // Test Case 2: XMLTABLE with CROSS JOIN LATERAL + `SELECT + o.order_id, + items.product, + items.quantity::INT + FROM + orders_lateral o + CROSS JOIN LATERAL XMLTABLE( + '/order/item' + PASSING o.order_details + COLUMNS + product TEXT PATH 'product', + quantity TEXT PATH 'quantity' + ) AS items;`, + + // Test Case 3: XMLTABLE within a Common Table Expression (CTE) + `WITH xml_data AS ( + SELECT id, xml_column FROM xml_documents_cte + ) + SELECT + xd.id, + e.emp_id, + e.name, + e.department + FROM + xml_data xd, + XMLTABLE( + '//Employee' + PASSING xd.xml_column + COLUMNS + emp_id INT PATH 'ID', + name TEXT PATH 'Name', + department TEXT PATH 'Department' + ) AS e;`, + + // Test Case 4: Nested XMLTABLEs for handling hierarchical XML structures + `SELECT + s.section_name, + b.title, + b.author + FROM + library_nested l, + XMLTABLE( + '/library/section' + PASSING l.lib_data + COLUMNS + section_name TEXT PATH '@name', + books XML PATH 'book' + ) AS s, + XMLTABLE( + '/book' + PASSING s.books + COLUMNS + title TEXT PATH 'title', + author TEXT PATH 'author' + ) AS b;`, + + // Test Case 5: XMLTABLE with XML namespaces + `SELECT + x.emp_name, + x.position, + x.city, + x.country + FROM + employees_ns, + XMLTABLE( + XMLNAMESPACES ( + 'http://example.com/emp' AS emp, + 'http://example.com/address' AS addr + ), + '/emp:Employee' -- Using the emp namespace prefix + PASSING employees_ns.emp_data + COLUMNS + emp_name TEXT PATH 'emp:Name', -- Using emp prefix + position TEXT PATH 'emp:Position', -- Using emp prefix + city TEXT PATH 'addr:Address/addr:City', + country TEXT PATH 'addr:Address/addr:Country' + ) AS x;`, + + // Test Case 6: XMLTABLE used within a VIEW creation + `CREATE VIEW order_items_view AS + SELECT + o.order_id, + o.customer_name, + items.product, + items.quantity::INT + FROM + orders_view o, + XMLTABLE( + '/order/item' + PASSING o.order_details + COLUMNS + product TEXT PATH 'product', + quantity TEXT PATH 'quantity' + ) AS items;`, + `CREATE VIEW public.order_items_view AS + SELECT o.order_id, + o.customer_name, + items.product, + (items.quantity)::integer AS quantity + FROM public.orders_view o, + LATERAL XMLTABLE(('/order/item'::text) PASSING (o.order_details) COLUMNS product text PATH ('product'::text), quantity text PATH ('quantity'::text)) items;`, + + // Test Case 7: XMLTABLE with aggregation functions + `SELECT + s.report_id, + SUM(t.amount::NUMERIC) AS total_sales + FROM + sales_reports_nested s, + XMLTABLE( + '/sales/transaction' + PASSING s.sales_data + COLUMNS + amount TEXT PATH 'amount' + ) AS t + GROUP BY + s.report_id;`, + + // Test Case 8: Nested XMLTABLE() with subqueries + `SELECT + s.report_id, + SUM(t.amount::NUMERIC) AS total_sales + FROM + sales_reports_complex s, + XMLTABLE( + '/sales/transaction' + PASSING s.sales_data + COLUMNS + transaction_id INT PATH '@id', + transaction_details XML PATH 'details' + ) AS t, + XMLTABLE( + '/transaction/detail' + PASSING t.transaction_details + COLUMNS + amount TEXT PATH 'amount' + ) AS detail + GROUP BY + s.report_id;`, + } + + for _, sql := range xmlTableSqls { + issues := getDetectorIssues(t, NewRangeTableFuncDetector(sql), sql) + + assert.Equal(t, 1, len(issues), "Expected 1 issue for SQL: %s", sql) + assert.Equal(t, XML_FUNCTIONS, issues[0].Type, "Expected XML Functions issue for SQL: %s", sql) + } +} + +// TestXmlExprDetector tests the XML Function Detection - FuncCallDetector, XMLExprDetector, RangeTableFuncDetector. +func TestXMLFunctionsDetectors(t *testing.T) { + xmlFunctionSqls := []string{ + `SELECT id, xmlelement(name "employee", name) AS employee_data FROM employees;`, + `SELECT id, xpath('/person/name/text()', data) AS name FROM xml_example;`, + `SELECT id FROM employees WHERE xmlexists('/id' PASSING BY VALUE xmlcolumn);`, + `SELECT e.id, x.employee_xml + FROM employees e + JOIN ( + SELECT xmlelement(name "employee", xmlattributes(e.id AS "id"), e.name) AS employee_xml + FROM employees e + ) x ON x.employee_xml IS NOT NULL + WHERE xmlexists('//employee[name="John Doe"]' PASSING BY REF x.employee_xml);`, + `WITH xml_data AS ( + SELECT + id, + xml_column, + xpath('/root/element/@attribute', xml_column) as xpath_result + FROM xml_documents + ) + SELECT + x.id, + (xt.value).text as value + FROM + xml_data x + CROSS JOIN LATERAL unnest(x.xpath_result) as xt(value);`, + `SELECT e.id, e.name + FROM employees e + WHERE CASE + WHEN e.department = 'IT' THEN xmlexists('//access[@level="high"]' PASSING e.permissions) + ELSE FALSE + END;`, + `SELECT xmlserialize( + content xmlelement(name "employees", + xmlagg( + xmlelement(name "employee", + xmlattributes(e.id AS "id"), + e.name + ) + ) + ) AS CLOB + ) AS employees_xml + FROM employees e + WHERE e.status = 'active';`, + `CREATE VIEW employee_xml_view AS + SELECT e.id, + xmlelement(name "employee", + xmlattributes(e.id AS "id"), + e.name, + e.department + ) AS employee_xml + FROM employees e;`, + `SELECT xmltext('Widget') AS inventory_text + FROM inventory + WHERE id = 5;`, + `SELECT xmlforest(name, department) AS employee_info + FROM employees + WHERE id = 4;`, + `SELECT xmltable.* + FROM xmldata, + XMLTABLE('//ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + ordinality FOR ORDINALITY, + "COUNTRY_NAME" text, + country_id text PATH 'COUNTRY_ID', + size_sq_km float PATH 'SIZE[@unit = "sq_km"]', + size_other text PATH + 'concat(SIZE[@unit!="sq_km"], " ", SIZE[@unit!="sq_km"]/@unit)', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');`, + `SELECT xmltable.* + FROM XMLTABLE(XMLNAMESPACES('http://example.com/myns' AS x, + 'http://example.com/b' AS "B"), + '/x:example/x:item' + PASSING (SELECT data FROM xmldata) + COLUMNS foo int PATH '@foo', + bar int PATH '@B:bar');`, + `SELECT xml_is_well_formed_content('Alpha') AS is_well_formed_content + FROM projects + WHERE project_id = 10;`, + `SELECT xml_is_well_formed_document(xmlforest(name, department)) AS is_well_formed_document + FROM employees + WHERE id = 2;`, + `SELECT xml_is_well_formed(xmltext('Jane Doe')) AS is_well_formed + FROM employees + WHERE id = 1;`, + `SELECT xmlparse(DOCUMENT 'John');`, + `SELECT xpath_exists('/employee/name', 'John'::xml)`, + `SELECT table_to_xml('employees', TRUE, FALSE, '');`, + `SELECT query_to_xml('SELECT * FROM employees', TRUE, FALSE, '');`, + `SELECT schema_to_xml('public', TRUE, FALSE, '');`, + `SELECT database_to_xml(TRUE, TRUE, '');`, + `SELECT query_to_xmlschema('SELECT * FROM employees', TRUE, FALSE, '');`, + `SELECT table_to_xmlschema('employees', TRUE, FALSE, '');`, + `SELECT xmlconcat('value1'::xml, 'value2'::xml);`, + `SELECT xmlcomment('Sample XML comment');`, + `SELECT xmlpi(name php, 'echo "hello world";');`, + `SELECT xmlroot('content', VERSION '1.0');`, + `SELECT xmlagg('content');`, + `SELECT xmlexists('//some/path' PASSING BY REF '');`, + `SELECT table_to_xml_and_xmlschema('public', 'employees', true, false, '');`, + `SELECT * FROM cursor_to_xmlschema('foo_cursor', false, true,'');`, + `SELECT * FROM cursor_to_xml('foo_cursor', 1, false, false,'');`, + `SELECT query_to_xml_and_xmlschema('SELECT * FROM employees', true, false, '');`, + `SELECT schema_to_xmlschema('public', true, false, '');`, + `SELECT schema_to_xml_and_xmlschema('public', true, false, '');`, + `SELECT database_to_xmlschema(true, false, '');`, + `SELECT database_to_xml_and_xmlschema(true, false, '');`, + `SELECT xmlconcat2('Content', 'More Content');`, + `SELECT xmlvalidate('content');`, + `SELECT xml_in('input');`, + `SELECT xml_out('output');`, + `SELECT xml_recv('');`, + `SELECT xml_send('send');`, + } + + for _, sql := range xmlFunctionSqls { + detectors := []UnsupportedConstructDetector{ + NewXmlExprDetector(sql), + NewRangeTableFuncDetector(sql), + NewFuncCallDetector(sql), + } + + parseResult, err := queryparser.Parse(sql) + assert.NoError(t, err) + + visited := make(map[protoreflect.Message]bool) + + processor := func(msg protoreflect.Message) error { + for _, detector := range detectors { + log.Debugf("running detector %T", detector) + err := detector.Detect(msg) + if err != nil { + log.Debugf("error in detector %T: %v", detector, err) + return fmt.Errorf("error in detectors %T: %w", detector, err) + } + } + return nil + } + + parseTreeMsg := queryparser.GetProtoMessageFromParseTree(parseResult) + err = queryparser.TraverseParseTree(parseTreeMsg, visited, processor) + assert.NoError(t, err) + + var allIssues []QueryIssue + for _, detector := range detectors { + allIssues = append(allIssues, detector.GetIssues()...) + } + + xmlIssueDetected := false + for _, issue := range allIssues { + if issue.Type == XML_FUNCTIONS { + xmlIssueDetected = true + break + } + } + + assert.True(t, xmlIssueDetected, "Expected XML Functions issue for SQL: %s", sql) + } +} + +// Combination of: FuncCallDetector, ColumnRefDetector, XmlExprDetector +func TestCombinationOfDetectors1(t *testing.T) { + combinationSqls := []string{ + `WITH LockedEmployees AS ( + SELECT *, pg_advisory_lock(xmin) AS lock_acquired + FROM employees + WHERE pg_try_advisory_lock(xmin) IS TRUE +) +SELECT xmlelement(name "EmployeeData", xmlagg( + xmlelement(name "Employee", xmlattributes(id AS "ID"), + xmlforest(name AS "Name", xmin AS "TransactionID", xmax AS "ModifiedID")))) +FROM LockedEmployees +WHERE xmax IS NOT NULL;`, + `WITH Data AS ( + SELECT id, name, xmin, xmax, + pg_try_advisory_lock(id) AS lock_status, + xmlelement(name "info", xmlforest(name as "name", xmin as "transaction_start", xmax as "transaction_end")) as xml_info + FROM projects + WHERE xmin > 100 AND xmax < 500 +) +SELECT x.id, x.xml_info +FROM Data x +WHERE x.lock_status IS TRUE;`, + `UPDATE employees +SET salary = salary * 1.1 +WHERE pg_try_advisory_xact_lock(ctid) IS TRUE AND department = 'Engineering' +RETURNING id, + xmlelement(name "UpdatedEmployee", + xmlattributes(id AS "ID"), + xmlforest(name AS "Name", salary AS "NewSalary", xmin AS "TransactionStartID", xmax AS "TransactionEndID"));`, + } + expectedIssueTypes := mapset.NewThreadUnsafeSet[string]([]string{ADVISORY_LOCKS, SYSTEM_COLUMNS, XML_FUNCTIONS}...) + + for _, sql := range combinationSqls { + detectors := []UnsupportedConstructDetector{ + NewFuncCallDetector(sql), + NewColumnRefDetector(sql), + NewXmlExprDetector(sql), + } + parseResult, err := queryparser.Parse(sql) + assert.NoError(t, err) + + visited := make(map[protoreflect.Message]bool) + + processor := func(msg protoreflect.Message) error { + for _, detector := range detectors { + log.Debugf("running detector %T", detector) + err := detector.Detect(msg) + if err != nil { + log.Debugf("error in detector %T: %v", detector, err) + return fmt.Errorf("error in detectors %T: %w", detector, err) + } + } + return nil + } + + parseTreeMsg := queryparser.GetProtoMessageFromParseTree(parseResult) + err = queryparser.TraverseParseTree(parseTreeMsg, visited, processor) + assert.NoError(t, err) + + var allIssues []QueryIssue + for _, detector := range detectors { + allIssues = append(allIssues, detector.GetIssues()...) + } + issueTypesDetected := mapset.NewThreadUnsafeSet[string]() + for _, issue := range allIssues { + issueTypesDetected.Add(issue.Type) + } + + assert.True(t, expectedIssueTypes.Equal(issueTypesDetected), "Expected issue types do not match the detected issue types. Expected: %v, Actual: %v", expectedIssueTypes, issueTypesDetected) + } +} + +func TestCombinationOfDetectors1WithObjectCollector(t *testing.T) { + tests := []struct { + Sql string + ExpectedObjects []string + ExpectedSchemas []string + }{ + { + Sql: `WITH LockedEmployees AS ( + SELECT *, pg_advisory_lock(xmin) AS lock_acquired + FROM public.employees + WHERE pg_try_advisory_lock(xmin) IS TRUE + ) + SELECT xmlelement(name "EmployeeData", xmlagg( + xmlelement(name "Employee", xmlattributes(id AS "ID"), + xmlforest(name AS "Name", xmin AS "TransactionID", xmax AS "ModifiedID")))) + FROM LockedEmployees + WHERE xmax IS NOT NULL;`, + /* + Limitation: limited coverage provided by objectCollector.Collect() right now. Might not detect some cases. + xmlelement, xmlforest etc are present under xml_expr node in parse tree not funccall node. + */ + ExpectedObjects: []string{"pg_advisory_lock", "public.employees", "pg_try_advisory_lock", "xmlagg", "lockedemployees"}, + ExpectedSchemas: []string{"public", ""}, + }, + { + Sql: `WITH Data AS ( + SELECT id, name, xmin, xmax, + pg_try_advisory_lock(id) AS lock_status, + xmlelement(name "info", xmlforest(name as "name", xmin as "transaction_start", xmax as "transaction_end")) as xml_info + FROM projects + WHERE xmin > 100 AND xmax < 500 + ) + SELECT x.id, x.xml_info + FROM Data x + WHERE x.lock_status IS TRUE;`, + ExpectedObjects: []string{"pg_try_advisory_lock", "projects", "data"}, + ExpectedSchemas: []string{""}, + }, + { + Sql: `UPDATE s1.employees + SET salary = salary * 1.1 + WHERE pg_try_advisory_xact_lock(ctid) IS TRUE AND department = 'Engineering' + RETURNING id, xmlelement(name "UpdatedEmployee", xmlattributes(id AS "ID"), + xmlforest(name AS "Name", salary AS "NewSalary", xmin AS "TransactionStartID", xmax AS "TransactionEndID"));`, + ExpectedObjects: []string{"s1.employees", "pg_try_advisory_xact_lock"}, + ExpectedSchemas: []string{"s1", ""}, + }, + } + + expectedIssueTypes := mapset.NewThreadUnsafeSet[string]([]string{ADVISORY_LOCKS, SYSTEM_COLUMNS, XML_FUNCTIONS}...) + + for _, tc := range tests { + detectors := []UnsupportedConstructDetector{ + NewFuncCallDetector(tc.Sql), + NewColumnRefDetector(tc.Sql), + NewXmlExprDetector(tc.Sql), + } + parseResult, err := queryparser.Parse(tc.Sql) + assert.NoError(t, err) + + visited := make(map[protoreflect.Message]bool) + + objectCollector := queryparser.NewObjectCollector(nil) + processor := func(msg protoreflect.Message) error { + for _, detector := range detectors { + log.Debugf("running detector %T", detector) + err := detector.Detect(msg) + if err != nil { + log.Debugf("error in detector %T: %v", detector, err) + return fmt.Errorf("error in detectors %T: %w", detector, err) + } + } + objectCollector.Collect(msg) + return nil + } + + parseTreeMsg := queryparser.GetProtoMessageFromParseTree(parseResult) + err = queryparser.TraverseParseTree(parseTreeMsg, visited, processor) + assert.NoError(t, err) + + var allIssues []QueryIssue + for _, detector := range detectors { + allIssues = append(allIssues, detector.GetIssues()...) + } + issueTypesDetected := mapset.NewThreadUnsafeSet[string]() + for _, issue := range allIssues { + issueTypesDetected.Add(issue.Type) + } + + assert.True(t, expectedIssueTypes.Equal(issueTypesDetected), "Expected issue types do not match the detected issue types. Expected: %v, Actual: %v", expectedIssueTypes, issueTypesDetected) + + collectedObjects := objectCollector.GetObjects() + collectedSchemas := objectCollector.GetSchemaList() + + assert.ElementsMatch(t, tc.ExpectedObjects, collectedObjects, + "Objects list mismatch for sql [%s]. Expected: %v(len=%d), Actual: %v(len=%d)", tc.Sql, tc.ExpectedObjects, len(tc.ExpectedObjects), collectedObjects, len(collectedObjects)) + assert.ElementsMatch(t, tc.ExpectedSchemas, collectedSchemas, + "Schema list mismatch for sql [%s]. Expected: %v(len=%d), Actual: %v(len=%d)", tc.Sql, tc.ExpectedSchemas, len(tc.ExpectedSchemas), collectedSchemas, len(collectedSchemas)) + } +} + +func TestJsonbSubscriptingDetector(t *testing.T) { + withoutIssueSqls := []string{ + `SELECT numbers[1] AS first_number + FROM array_data;`, + `select ab_data['name'] from (select Data as ab_data from test_jsonb);`, // NOT REPORTED AS OF NOW because of caveat + } + issuesSqls := []string{ + `SELECT ('{"a": {"b": {"c": 1}}}'::jsonb)['a']['b']['c'];`, + `UPDATE json_data +SET data = jsonb_set(data, '{user,details,city}', '"San Francisco"') +WHERE data['user']['name'] = '"Alice"';`, + `SELECT + data->>$1 AS name, + data[$2][$3] AS second_score +FROM test_jsonb1`, + `SELECT (jsonb_build_object('name', 'PostgreSQL', 'version', 14, 'open_source', TRUE) || '{"key": "value2"}')['name'] AS json_obj;`, + `SELECT (data || '{"new_key": "new_value"}' )['name'] FROM test_jsonb;`, + `SELECT ('{"key": "value1"}'::jsonb || '{"key": "value2"}'::jsonb)['key'] AS object_in_array;`, + } + + for _, sql := range withoutIssueSqls { + issues := getDetectorIssues(t, NewJsonbSubscriptingDetector(sql, []string{}, []string{}), sql) + + assert.Equal(t, 0, len(issues), "Expected 1 issue for SQL: %s", sql) + } + + for _, sql := range issuesSqls { + issues := getDetectorIssues(t, NewJsonbSubscriptingDetector(sql, []string{"data"}, []string{"jsonb_build_object"}), sql) + + assert.Equal(t, 1, len(issues), "Expected 1 issue for SQL: %s", sql) + assert.Equal(t, JSONB_SUBSCRIPTING, issues[0].Type, "Expected System Columns issue for SQL: %s", sql) + } +} + +func TestJsonConstructorDetector(t *testing.T) { + sql := `SELECT JSON_ARRAY('PostgreSQL', 12, TRUE, NULL) AS json_array;` + + issues := getDetectorIssues(t, NewJsonConstructorFuncDetector(sql), sql) + assert.Equal(t, 1, len(issues), "Expected 1 issue for SQL: %s", sql) + assert.Equal(t, JSON_CONSTRUCTOR_FUNCTION, issues[0].Type, "Expected Advisory Locks issue for SQL: %s", sql) + +} + +func TestJsonQueryFunctionDetector(t *testing.T) { + sql := `SELECT id, JSON_VALUE(details, '$.title') AS title +FROM books +WHERE JSON_EXISTS(details, '$.price ? (@ > $price)' PASSING 30 AS price);` + + issues := getDetectorIssues(t, NewJsonQueryFunctionDetector(sql), sql) + assert.Equal(t, 1, len(issues), "Expected 1 issue for SQL: %s", sql) + assert.Equal(t, JSON_QUERY_FUNCTION, issues[0].Type, "Expected Advisory Locks issue for SQL: %s", sql) + +} + +func TestMergeStatementDetector(t *testing.T) { + sqls := []string{ + `MERGE INTO customer_account ca +USING recent_transactions t +ON t.customer_id = ca.customer_id +WHEN MATCHED THEN + UPDATE SET balance = balance + transaction_value +WHEN NOT MATCHED THEN + INSERT (customer_id, balance) + VALUES (t.customer_id, t.transaction_value);`, + + `MERGE INTO wines w +USING wine_stock_changes s +ON s.winename = w.winename +WHEN NOT MATCHED AND s.stock_delta > 0 THEN + INSERT VALUES(s.winename, s.stock_delta) +WHEN MATCHED AND w.stock + s.stock_delta > 0 THEN + UPDATE SET stock = w.stock + s.stock_delta +WHEN MATCHED THEN + DELETE +RETURNING merge_action(), w.*;`, + } + + for _, sql := range sqls { + issues := getDetectorIssues(t, NewMergeStatementDetector(sql), sql) + assert.Equal(t, 1, len(issues), "Expected 1 issue for SQL: %s", sql) + assert.Equal(t, MERGE_STATEMENT, issues[0].Type, "Expected Advisory Locks issue for SQL: %s", sql) + } +} +func TestIsJsonPredicate(t *testing.T) { + sql := `SELECT js, js IS JSON "json?" FROM (VALUES ('123'), ('"abc"'), ('{"a": "b"}'), ('[1,2]'),('abc')) foo(js);` + + issues := getDetectorIssues(t, NewJsonPredicateExprDetector(sql), sql) + assert.Equal(t, 1, len(issues), "Expected 1 issue for SQL: %s", sql) + assert.Equal(t, JSON_TYPE_PREDICATE, issues[0].Type, "Expected Advisory Locks issue for SQL: %s", sql) + +} diff --git a/yb-voyager/src/query/queryissue/helpers.go b/yb-voyager/src/query/queryissue/helpers.go new file mode 100644 index 0000000000..dfc89f1197 --- /dev/null +++ b/yb-voyager/src/query/queryissue/helpers.go @@ -0,0 +1,159 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queryissue + +import ( + mapset "github.com/deckarep/golang-set/v2" +) + +// Refer: https://www.postgresql.org/docs/current/functions-admin.html#FUNCTIONS-ADVISORY-LOCKS +var unsupportedAdvLockFuncs = mapset.NewThreadUnsafeSet([]string{ + "pg_advisory_lock", "pg_advisory_lock_shared", + "pg_advisory_unlock", "pg_advisory_unlock_all", "pg_advisory_unlock_shared", + "pg_advisory_xact_lock", "pg_advisory_xact_lock_shared", + "pg_try_advisory_lock", "pg_try_advisory_lock_shared", + "pg_try_advisory_xact_lock", "pg_try_advisory_xact_lock_shared", +}...) + +var unsupportedSysCols = mapset.NewThreadUnsafeSet([]string{ + "xmin", "xmax", "cmin", "cmax", "ctid", +}...) + +// Refer: https://www.postgresql.org/docs/17/functions-xml.html#FUNCTIONS-XML-PROCESSING +var unsupportedXmlFunctions = mapset.NewThreadUnsafeSet([]string{ + // 1. Producing XML content + "xmltext", "xmlcomment", "xmlconcat", "xmlelement", "xmlforest", + "xmlpi", "xmlroot", "xmlagg", + // 2. XML predicates + "xml", "xmlexists", "xml_is_well_formed", "xml_is_well_formed_document", + "xml_is_well_formed_content", + // 3. Processing XML + "xpath", "xpath_exists", "xmltable", + // 4. Mapping Table to XML + "table_to_xml", "table_to_xmlschema", "table_to_xml_and_xmlschema", + "cursor_to_xmlschema", "cursor_to_xml", + "query_to_xmlschema", "query_to_xml", "query_to_xml_and_xmlschema", + "schema_to_xml", "schema_to_xmlschema", "schema_to_xml_and_xmlschema", + "database_to_xml", "database_to_xmlschema", "database_to_xml_and_xmlschema", + + /* + 5. extras - not in ref doc but exists + SELECT proname FROM pg_proc + WHERE prorettype = 'xml'::regtype; + */ + "xmlconcat2", "xmlvalidate", "xml_in", "xml_out", "xml_recv", "xml_send", // System XML I/O +}...) + +var unsupportedRegexFunctions = mapset.NewThreadUnsafeSet([]string{ + "regexp_count", "regexp_instr", "regexp_like", +}...) + +var UnsupportedIndexMethods = []string{ + "gist", + "brin", + "spgist", +} + +// Reference for some of the types https://docs.yugabyte.com/stable/api/ysql/datatypes/ (datatypes with type 1) +var UnsupportedIndexDatatypes = []string{ + "citext", + "tsvector", + "tsquery", + "jsonb", + "inet", + "json", + "macaddr", + "macaddr8", + "cidr", + "bit", // for BIT (n) + "varbit", // for BIT varying (n) + "daterange", + "tsrange", + "tstzrange", + "numrange", + "int4range", + "int8range", + "interval", // same for INTERVAL YEAR TO MONTH and INTERVAL DAY TO SECOND + //Below ones are not supported on PG as well with atleast btree access method. Better to have in our list though + //Need to understand if there is other method or way available in PG to have these index key [TODO] + "circle", + "box", + "line", + "lseg", + "point", + "pg_lsn", + "path", + "polygon", + "txid_snapshot", + // array as well but no need to add it in the list as fetching this type is a different way TODO: handle better with specific types +} + +var unsupportedAggFunctions = mapset.NewThreadUnsafeSet([]string{ + //agg function added in PG16 - https://www.postgresql.org/docs/16/functions-aggregate.html#id-1.5.8.27.5.2.4.1.1.1.1 + "any_value", "range_agg", "range_intersect_agg", +}...) + +const ( + // // json functions, refer - https://www.postgresql.org/about/featurematrix/detail/395/ + JSON_OBJECTAGG = "JSON_OBJECTAGG" + JSON_ARRAY = "JSON_ARRAY" + JSON_ARRAYAGG = "JSON_ARRAYAGG" + JSON_OBJECT = "JSON_OBJECT" + //json query functions supported in PG 17, refer - https://www.postgresql.org/docs/17/functions-json.html#FUNCTIONS-SQLJSON-QUERYING + JSON_EXISTS = "JSON_EXISTS" + JSON_QUERY = "JSON_QUERY" + JSON_VALUE = "JSON_VALUE" + JSON_TABLE = "JSON_TABLE" +) + +var unsupportedLargeObjectFunctions = mapset.NewThreadUnsafeSet([]string{ + + //refer - https://www.postgresql.org/docs/current/lo-interfaces.html#LO-CREATE + "lo_create", "lo_creat", "lo_import", "lo_import_with_oid", + "lo_export", "lo_open", "lo_write", "lo_read", "lo_lseek", "lo_lseek64", + "lo_tell", "lo_tell64", "lo_truncate", "lo_truncate64", "lo_close", + "lo_unlink", + + //server side functions - https://www.postgresql.org/docs/current/lo-funcs.html + "lo_from_bytea", "lo_put", "lo_get", + + //functions provided by lo extension, refer - https://www.postgresql.org/docs/current/lo.html#LO-RATIONALE + "lo_manage", "lo_oid", +}...) + +// catalog functions return type jsonb +var catalogFunctionsReturningJsonb = mapset.NewThreadUnsafeSet([]string{ + /* + SELECT + DISTINCT p.proname AS Function_Name + FROM + pg_catalog.pg_proc p + LEFT JOIN pg_catalog.pg_language l ON p.prolang = l.oid + LEFT JOIN pg_catalog.pg_namespace n ON p.pronamespace = n.oid + WHERE + pg_catalog.pg_function_is_visible(p.oid) AND pg_catalog.pg_get_function_result(p.oid) = 'jsonb' + + ORDER BY Function_Name; + */ + "jsonb_agg", "jsonb_agg_finalfn", "jsonb_agg_strict", "jsonb_array_element", + "jsonb_build_array", "jsonb_build_object", "jsonb_concat", "jsonb_delete", + "jsonb_delete_path", "jsonb_extract_path", "jsonb_in", "jsonb_insert", + "jsonb_object", "jsonb_object_agg", "jsonb_object_agg_finalfn", "jsonb_object_agg_strict", + "jsonb_object_agg_unique", "jsonb_object_agg_unique_strict", "jsonb_object_field", "jsonb_path_query_array", + "jsonb_path_query_array_tz", "jsonb_path_query_first", "jsonb_path_query_first_tz", "jsonb_recv", + "jsonb_set", "jsonb_set_lax", "jsonb_strip_nulls", "to_jsonb", "ts_headline", +}...) diff --git a/yb-voyager/src/query/queryissue/issues_ddl.go b/yb-voyager/src/query/queryissue/issues_ddl.go new file mode 100644 index 0000000000..6f3da748cd --- /dev/null +++ b/yb-voyager/src/query/queryissue/issues_ddl.go @@ -0,0 +1,543 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queryissue + +import ( + "fmt" + "strings" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" + "github.com/yugabyte/yb-voyager/yb-voyager/src/issue" + "github.com/yugabyte/yb-voyager/yb-voyager/src/ybversion" +) + +var generatedColumnsIssue = issue.Issue{ + Type: STORED_GENERATED_COLUMNS, + Name: "Stored generated columns are not supported.", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yugabyte-db/issues/10695", + Suggestion: "Using Triggers to update the generated columns is one way to work around this issue, refer docs link for more details.", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#generated-always-as-stored-type-column-is-not-supported", +} + +func NewGeneratedColumnsIssue(objectType string, objectName string, sqlStatement string, generatedColumns []string) QueryIssue { + issue := generatedColumnsIssue + issue.Name = issue.Name + fmt.Sprintf(" Generated Columns: (%s)", strings.Join(generatedColumns, ",")) + return newQueryIssue(issue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var unloggedTableIssue = issue.Issue{ + Type: UNLOGGED_TABLE, + Name: "UNLOGGED tables are not supported yet.", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yugabyte-db/issues/1129/", + Suggestion: "Remove UNLOGGED keyword to make it work", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unlogged-table-is-not-supported", + MinimumVersionsFixedIn: map[string]*ybversion.YBVersion{ + ybversion.SERIES_2024_2: ybversion.V2024_2_0_0, + }, +} + +func NewUnloggedTableIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + details := map[string]interface{}{} + return newQueryIssue(unloggedTableIssue, objectType, objectName, sqlStatement, details) +} + +var unsupportedIndexMethodIssue = issue.Issue{ + Type: UNSUPPORTED_INDEX_METHOD, + Name: "Schema contains %s index which is not supported.", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/YugaByte/yugabyte-db/issues/1337", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gist-brin-and-spgist-index-types-are-not-supported", +} + +func NewUnsupportedIndexMethodIssue(objectType string, objectName string, sqlStatement string, indexAccessMethod string) QueryIssue { + issue := unsupportedIndexMethodIssue + issue.Name = fmt.Sprintf(unsupportedIndexMethodIssue.Name, strings.ToUpper(indexAccessMethod)) + return newQueryIssue(issue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var storageParameterIssue = issue.Issue{ + Type: STORAGE_PARAMETER, + Name: "Storage parameters are not supported yet.", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yugabyte-db/issues/23467", + Suggestion: "Remove the storage parameters from the DDL", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#storage-parameters-on-indexes-or-constraints-in-the-source-postgresql", +} + +func NewStorageParameterIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + details := map[string]interface{}{} + return newQueryIssue(storageParameterIssue, objectType, objectName, sqlStatement, details) +} + +var setColumnAttributeIssue = issue.Issue{ + Type: ALTER_TABLE_SET_COLUMN_ATTRIBUTE, + Name: "ALTER TABLE .. ALTER COLUMN .. SET ( attribute = value ) not supported yet", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yugabyte-db/issues/1124", + Suggestion: "Remove it from the exported schema", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", +} + +func NewSetColumnAttributeIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + details := map[string]interface{}{} + return newQueryIssue(setColumnAttributeIssue, objectType, objectName, sqlStatement, details) +} + +var alterTableClusterOnIssue = issue.Issue{ + Type: ALTER_TABLE_CLUSTER_ON, + Name: "ALTER TABLE CLUSTER not supported yet.", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/YugaByte/yugabyte-db/issues/1124", + Suggestion: "Remove it from the exported schema.", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", +} + +func NewClusterONIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + details := map[string]interface{}{} + return newQueryIssue(alterTableClusterOnIssue, objectType, objectName, sqlStatement, details) +} + +var alterTableDisableRuleIssue = issue.Issue{ + Type: ALTER_TABLE_DISABLE_RULE, + Name: "ALTER TABLE name DISABLE RULE not supported yet", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yugabyte-db/issues/1124", + Suggestion: "Remove this and the rule '%s' from the exported schema to be not enabled on the table.", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-alter-table-ddl-variants-in-source-schema", +} + +func NewAlterTableDisableRuleIssue(objectType string, objectName string, sqlStatement string, ruleName string) QueryIssue { + details := map[string]interface{}{} + issue := alterTableDisableRuleIssue + issue.Suggestion = fmt.Sprintf(issue.Suggestion, ruleName) + return newQueryIssue(issue, objectType, objectName, sqlStatement, details) +} + +var exclusionConstraintIssue = issue.Issue{ + Type: EXCLUSION_CONSTRAINTS, + Name: "Exclusion constraint is not supported yet", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yugabyte-db/issues/3944", + Suggestion: "Refer docs link for details on possible workaround", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#exclusion-constraints-is-not-supported", +} + +func NewExclusionConstraintIssue(objectType string, objectName string, sqlStatement string, constraintName string) QueryIssue { + details := map[string]interface{}{ + CONSTRAINT_NAME: constraintName, + } + return newQueryIssue(exclusionConstraintIssue, objectType, objectName, sqlStatement, details) +} + +var deferrableConstraintIssue = issue.Issue{ + Type: DEFERRABLE_CONSTRAINTS, + Name: "DEFERRABLE constraints not supported yet", + Impact: constants.IMPACT_LEVEL_3, + GH: "https://github.com/yugabyte/yugabyte-db/issues/1709", + Suggestion: "Remove these constraints from the exported schema and make the neccessary changes to the application to work on target seamlessly", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#deferrable-constraint-on-constraints-other-than-foreign-keys-is-not-supported", +} + +func NewDeferrableConstraintIssue(objectType string, objectName string, sqlStatement string, constraintName string) QueryIssue { + details := map[string]interface{}{ + CONSTRAINT_NAME: constraintName, + } + return newQueryIssue(deferrableConstraintIssue, objectType, objectName, sqlStatement, details) +} + +var multiColumnGinIndexIssue = issue.Issue{ + Type: MULTI_COLUMN_GIN_INDEX, + Name: "Schema contains gin index on multi column which is not supported.", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yugabyte-db/issues/10652", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#gin-indexes-on-multiple-columns-are-not-supported", +} + +func NewMultiColumnGinIndexIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(multiColumnGinIndexIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var orderedGinIndexIssue = issue.Issue{ + Type: ORDERED_GIN_INDEX, + Name: "Schema contains gin index on column with ASC/DESC/HASH Clause which is not supported.", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yugabyte-db/issues/10653", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#issue-in-some-unsupported-cases-of-gin-indexes", +} + +func NewOrderedGinIndexIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(orderedGinIndexIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var policyRoleIssue = issue.Issue{ + Type: POLICY_WITH_ROLES, + Name: "Policy require roles to be created.", + Impact: constants.IMPACT_LEVEL_1, + Suggestion: "Users/Grants are not migrated during the schema migration. Create the Users manually to make the policies work", + GH: "https://github.com/yugabyte/yb-voyager/issues/1655", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#policies-on-users-in-source-require-manual-user-creation", +} + +func NewPolicyRoleIssue(objectType string, objectName string, sqlStatement string, roles []string) QueryIssue { + issue := policyRoleIssue + issue.Name = fmt.Sprintf("%s Users - (%s)", issue.Name, strings.Join(roles, ",")) + return newQueryIssue(issue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var constraintTriggerIssue = issue.Issue{ + Type: CONSTRAINT_TRIGGER, + Name: "CONSTRAINT TRIGGER not supported yet.", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/YugaByte/yugabyte-db/issues/1709", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#constraint-trigger-is-not-supported", +} + +func NewConstraintTriggerIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + details := map[string]interface{}{} + return newQueryIssue(constraintTriggerIssue, objectType, objectName, sqlStatement, details) +} + +var referencingClauseInTriggerIssue = issue.Issue{ + Type: REFERENCING_CLAUSE_IN_TRIGGER, + Name: "REFERENCING clause (transition tables) not supported yet.", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/YugaByte/yugabyte-db/issues/1668", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#referencing-clause-for-triggers", +} + +func NewReferencingClauseTrigIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(referencingClauseInTriggerIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var beforeRowTriggerOnPartitionTableIssue = issue.Issue{ + Type: BEFORE_ROW_TRIGGER_ON_PARTITIONED_TABLE, + Name: "Partitioned tables cannot have BEFORE / FOR EACH ROW triggers.", + Impact: constants.IMPACT_LEVEL_1, + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#before-row-triggers-on-partitioned-tables", + GH: "https://github.com/yugabyte/yugabyte-db/issues/24830", + Suggestion: "Create the triggers on individual partitions.", +} + +func NewBeforeRowOnPartitionTableIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(beforeRowTriggerOnPartitionTableIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var alterTableAddPKOnPartitionIssue = issue.Issue{ + Type: ALTER_TABLE_ADD_PK_ON_PARTITIONED_TABLE, + Name: "Adding primary key to a partitioned table is not supported yet.", + Impact: constants.IMPACT_LEVEL_1, + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#adding-primary-key-to-a-partitioned-table-results-in-an-error", + GH: "https://github.com/yugabyte/yugabyte-db/issues/10074", + MinimumVersionsFixedIn: map[string]*ybversion.YBVersion{ + ybversion.SERIES_2024_1: ybversion.V2024_1_0_0, + ybversion.SERIES_2024_2: ybversion.V2024_2_0_0, + ybversion.SERIES_2_23: ybversion.V2_23_0_0, + }, +} + +func NewAlterTableAddPKOnPartiionIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + details := map[string]interface{}{} + return newQueryIssue(alterTableAddPKOnPartitionIssue, objectType, objectName, sqlStatement, details) +} + +var expressionPartitionIssue = issue.Issue{ + Type: EXPRESSION_PARTITION_WITH_PK_UK, + Name: "Issue with Partition using Expression on a table which cannot contain Primary Key / Unique Key on any column", + Impact: constants.IMPACT_LEVEL_1, + Suggestion: "Remove the Constriant from the table definition", + GH: "https://github.com/yugabyte/yb-voyager/issues/698", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/mysql/#tables-partitioned-with-expressions-cannot-contain-primary-unique-keys", +} + +func NewExpressionPartitionIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(expressionPartitionIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var multiColumnListPartition = issue.Issue{ + Type: MULTI_COLUMN_LIST_PARTITION, + Name: `cannot use "list" partition strategy with more than one column`, + Impact: constants.IMPACT_LEVEL_1, + Suggestion: "Make it a single column partition by list or choose other supported Partitioning methods", + GH: "https://github.com/yugabyte/yb-voyager/issues/699", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/mysql/#multi-column-partition-by-list-is-not-supported", +} + +func NewMultiColumnListPartition(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(multiColumnListPartition, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var insufficientColumnsInPKForPartition = issue.Issue{ + Type: INSUFFICIENT_COLUMNS_IN_PK_FOR_PARTITION, + Name: "insufficient columns in the PRIMARY KEY constraint definition in CREATE TABLE", + Impact: constants.IMPACT_LEVEL_1, + Suggestion: "Add all Partition columns to Primary Key", + GH: "https://github.com/yugabyte/yb-voyager/issues/578", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/oracle/#partition-key-column-not-part-of-primary-key-columns", +} + +func NewInsufficientColumnInPKForPartition(objectType string, objectName string, sqlStatement string, partitionColumnsNotInPK []string) QueryIssue { + issue := insufficientColumnsInPKForPartition + issue.Name = fmt.Sprintf("%s - (%s)", issue.Name, strings.Join(partitionColumnsNotInPK, ", ")) + return newQueryIssue(issue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var xmlDatatypeIssue = issue.Issue{ + Type: XML_DATATYPE, + Name: "Unsupported datatype - xml", + Impact: constants.IMPACT_LEVEL_3, + Suggestion: "Data ingestion is not supported for this type in YugabyteDB so handle this type in different way. Refer link for more details.", + GH: "https://github.com/yugabyte/yugabyte-db/issues/1043", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#data-ingestion-on-xml-data-type-is-not-supported", +} + +func NewXMLDatatypeIssue(objectType string, objectName string, sqlStatement string, colName string) QueryIssue { + issue := xmlDatatypeIssue + issue.Name = fmt.Sprintf("%s on column - %s", issue.Name, colName) + return newQueryIssue(issue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var xidDatatypeIssue = issue.Issue{ + Type: XID_DATATYPE, + Name: "Unsupported datatype - xid", + Impact: constants.IMPACT_LEVEL_3, + Suggestion: "Functions for this type e.g. txid_current are not supported in YugabyteDB yet", + GH: "https://github.com/yugabyte/yugabyte-db/issues/15638", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xid-functions-is-not-supported", +} + +func NewXIDDatatypeIssue(objectType string, objectName string, sqlStatement string, colName string) QueryIssue { + issue := xidDatatypeIssue + issue.Name = fmt.Sprintf("%s on column - %s", issue.Name, colName) + return newQueryIssue(issue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var postgisDatatypeIssue = issue.Issue{ + Type: POSTGIS_DATATYPES, + Name: "Unsupported datatype", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yugabyte-db/issues/11323", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-yugabytedb", +} + +func NewPostGisDatatypeIssue(objectType string, objectName string, sqlStatement string, typeName string, colName string) QueryIssue { + issue := postgisDatatypeIssue + issue.Name = fmt.Sprintf("%s - %s on column - %s", issue.Name, typeName, colName) + return newQueryIssue(issue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var unsupportedDatatypesIssue = issue.Issue{ + Type: UNSUPPORTED_DATATYPES, + Name: "Unsupported datatype", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yb-voyager/issues/1731", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-yugabytedb", +} + +func NewUnsupportedDatatypesIssue(objectType string, objectName string, sqlStatement string, typeName string, colName string) QueryIssue { + issue := unsupportedDatatypesIssue + issue.Name = fmt.Sprintf("%s - %s on column - %s", issue.Name, typeName, colName) + return newQueryIssue(issue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var unsupportedDatatypesForLiveMigrationIssue = issue.Issue{ + Type: UNSUPPORTED_DATATYPES_LIVE_MIGRATION, + Name: "Unsupported datatype for Live migration", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yb-voyager/issues/1731", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", +} + +func NewUnsupportedDatatypesForLMIssue(objectType string, objectName string, sqlStatement string, typeName string, colName string) QueryIssue { + issue := unsupportedDatatypesForLiveMigrationIssue + issue.Name = fmt.Sprintf("%s - %s on column - %s", issue.Name, typeName, colName) + return newQueryIssue(issue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var unsupportedDatatypesForLiveMigrationWithFFOrFBIssue = issue.Issue{ + Type: UNSUPPORTED_DATATYPES_LIVE_MIGRATION_WITH_FF_FB, + Name: "Unsupported datatype for Live migration with fall-forward/fallback", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yb-voyager/issues/1731", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#unsupported-datatypes-by-voyager-during-live-migration", +} + +func NewUnsupportedDatatypesForLMWithFFOrFBIssue(objectType string, objectName string, sqlStatement string, typeName string, colName string) QueryIssue { + issue := unsupportedDatatypesForLiveMigrationWithFFOrFBIssue + issue.Name = fmt.Sprintf("%s - %s on column - %s", issue.Name, typeName, colName) + return newQueryIssue(issue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var primaryOrUniqueOnUnsupportedIndexTypesIssue = issue.Issue{ + Type: PK_UK_ON_COMPLEX_DATATYPE, + Name: "Primary key and Unique constraint on column '%s' not yet supported", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yugabyte-db/issues/25003", + Suggestion: "Refer to the docs link for the workaround", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", //Keeping it similar for now, will see if we need to a separate issue on docs, +} + +func NewPrimaryOrUniqueConsOnUnsupportedIndexTypesIssue(objectType string, objectName string, sqlStatement string, typeName string, constraintName string) QueryIssue { + details := map[string]interface{}{ + CONSTRAINT_NAME: constraintName, + } + issue := primaryOrUniqueOnUnsupportedIndexTypesIssue + issue.Name = fmt.Sprintf(issue.Name, typeName) + return newQueryIssue(issue, objectType, objectName, sqlStatement, details) +} + +var indexOnComplexDatatypesIssue = issue.Issue{ + Type: INDEX_ON_COMPLEX_DATATYPE, + Name: "INDEX on column '%s' not yet supported", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yugabyte-db/issues/25003", + Suggestion: "Refer to the docs link for the workaround", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#indexes-on-some-complex-data-types-are-not-supported", +} + +func NewIndexOnComplexDatatypesIssue(objectType string, objectName string, sqlStatement string, typeName string) QueryIssue { + issue := indexOnComplexDatatypesIssue + issue.Name = fmt.Sprintf(issue.Name, typeName) + return newQueryIssue(issue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var foreignTableIssue = issue.Issue{ + Type: FOREIGN_TABLE, + Name: "Foreign tables require manual intervention.", + Impact: constants.IMPACT_LEVEL_1, + GH: "https://github.com/yugabyte/yb-voyager/issues/1627", + Suggestion: "SERVER '%s', and USER MAPPING should be created manually on the target to create and use the foreign table", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#foreign-table-in-the-source-database-requires-server-and-user-mapping", +} + +func NewForeignTableIssue(objectType string, objectName string, sqlStatement string, serverName string) QueryIssue { + issue := foreignTableIssue + issue.Suggestion = fmt.Sprintf(issue.Suggestion, serverName) + return newQueryIssue(issue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var inheritanceIssue = issue.Issue{ + Type: INHERITANCE, + Name: "TABLE INHERITANCE not supported in YugabyteDB", + Impact: constants.IMPACT_LEVEL_3, + GH: "https://github.com/YugaByte/yugabyte-db/issues/1129", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#table-inheritance-is-not-supported", +} + +func NewInheritanceIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(inheritanceIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var percentTypeSyntax = issue.Issue{ + Type: REFERENCED_TYPE_DECLARATION, + Name: "Referenced type declaration of variables", + Impact: constants.IMPACT_LEVEL_1, + Description: "", + Suggestion: "Fix the syntax to include the actual type name instead of referencing the type of a column", + GH: "https://github.com/yugabyte/yugabyte-db/issues/23619", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#type-syntax-is-not-supported", +} + +func NewPercentTypeSyntaxIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(percentTypeSyntax, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var loDatatypeIssue = issue.Issue{ + Type: LARGE_OBJECT_DATATYPE, + Name: "Unsupported datatype - lo", + Impact: constants.IMPACT_LEVEL_1, + Suggestion: "Large objects are not yet supported in YugabyteDB, no workaround available currently", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25318", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#large-objects-and-its-functions-are-currently-not-supported", // TODO +} + +func NewLODatatypeIssue(objectType string, objectName string, SqlStatement string, colName string) QueryIssue { + issue := loDatatypeIssue + issue.Name = fmt.Sprintf("%s on column - %s", issue.Name, colName) + return newQueryIssue(issue, objectType, objectName, SqlStatement, map[string]interface{}{}) +} + +var multiRangeDatatypeIssue = issue.Issue{ + Type: MULTI_RANGE_DATATYPE, + Name: "Unsupported datatype", + Impact: constants.IMPACT_LEVEL_1, + Suggestion: "Multirange data type is not yet supported in YugabyteDB, no workaround available currently", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25575", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", +} + +func NewMultiRangeDatatypeIssue(objectType string, objectName string, sqlStatement string, typeName string, colName string) QueryIssue { + issue := multiRangeDatatypeIssue + issue.Name = fmt.Sprintf("%s - %s on column - %s", issue.Name, typeName, colName) + return newQueryIssue(issue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var securityInvokerViewIssue = issue.Issue{ + Type: SECURITY_INVOKER_VIEWS, + Name: "Security Invoker Views not supported yet", + Impact: constants.IMPACT_LEVEL_1, + Suggestion: "Security Invoker Views are not yet supported in YugabyteDB, no workaround available currently", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25575", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", +} + +func NewSecurityInvokerViewIssue(objectType string, objectName string, SqlStatement string) QueryIssue { + return newQueryIssue(securityInvokerViewIssue, objectType, objectName, SqlStatement, map[string]interface{}{}) +} + +var deterministicOptionCollationIssue = issue.Issue{ + Type: DETERMINISTIC_OPTION_WITH_COLLATION, + Name: DETERMINISTIC_OPTION_WITH_COLLATION_NAME, + Impact: constants.IMPACT_LEVEL_1, + Suggestion: "This feature is not supported in YugabyteDB yet", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25575", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", +} + +func NewDeterministicOptionCollationIssue(objectType string, objectName string, SqlStatement string) QueryIssue { + return newQueryIssue(deterministicOptionCollationIssue, objectType, objectName, SqlStatement, map[string]interface{}{}) +} + +var foreignKeyReferencesPartitionedTableIssue = issue.Issue{ + Type: FOREIGN_KEY_REFERENCES_PARTITIONED_TABLE, + Name: FOREIGN_KEY_REFERENCES_PARTITIONED_TABLE_NAME, + Impact: constants.IMPACT_LEVEL_1, + Suggestion: "No workaround available ", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25575", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", +} + +func NewForeignKeyReferencesPartitionedTableIssue(objectType string, objectName string, SqlStatement string, constraintName string) QueryIssue { + details := map[string]interface{}{ + CONSTRAINT_NAME: constraintName, + } + return newQueryIssue(foreignKeyReferencesPartitionedTableIssue, objectType, objectName, SqlStatement, details) +} + +var uniqueNullsNotDistinctIssue = issue.Issue{ + Type: UNIQUE_NULLS_NOT_DISTINCT, + Name: UNIQUE_NULLS_NOT_DISTINCT_NAME, + Impact: constants.IMPACT_LEVEL_1, + Suggestion: "", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25575", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", +} + +func NewUniqueNullsNotDistinctIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(uniqueNullsNotDistinctIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} diff --git a/yb-voyager/src/query/queryissue/issues_ddl_test.go b/yb-voyager/src/query/queryissue/issues_ddl_test.go new file mode 100644 index 0000000000..01f90aa0fd --- /dev/null +++ b/yb-voyager/src/query/queryissue/issues_ddl_test.go @@ -0,0 +1,386 @@ +//go:build issues_integration + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package queryissue + +import ( + "context" + "fmt" + "os" + "strings" + "testing" + + "github.com/jackc/pgx/v5" + "github.com/stretchr/testify/assert" + "github.com/testcontainers/testcontainers-go/modules/yugabytedb" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/issue" + "github.com/yugabyte/yb-voyager/yb-voyager/src/ybversion" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" +) + +var ( + testYugabytedbContainer *yugabytedb.Container + testYugabytedbConnStr string + testYbVersion *ybversion.YBVersion +) + +func getConn() (*pgx.Conn, error) { + ctx := context.Background() + var connStr string + var err error + if testYugabytedbConnStr != "" { + connStr = testYugabytedbConnStr + } else { + connStr, err = testYugabytedbContainer.YSQLConnectionString(ctx, "sslmode=disable") + if err != nil { + return nil, err + } + } + + conn, err := pgx.Connect(ctx, connStr) + if err != nil { + return nil, err + } + + return conn, nil +} + +func assertErrorCorrectlyThrownForIssueForYBVersion(t *testing.T, execErr error, expectedError string, issue issue.Issue) { + isFixed, err := issue.IsFixedIn(testYbVersion) + testutils.FatalIfError(t, err) + + if isFixed { + assert.NoError(t, execErr) + } else { + assert.ErrorContains(t, execErr, expectedError) + } +} + +func testXMLFunctionIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, "SELECT xmlconcat('', 'foo')") + assert.ErrorContains(t, err, "unsupported XML feature") +} + +func testStoredGeneratedFunctionsIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, ` + CREATE TABLE rectangles ( + id SERIAL PRIMARY KEY, + length NUMERIC NOT NULL, + width NUMERIC NOT NULL, + area NUMERIC GENERATED ALWAYS AS (length * width) STORED + )`) + assert.ErrorContains(t, err, "syntax error") +} + +func testUnloggedTableIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, "CREATE UNLOGGED TABLE unlogged_table (a int)") + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "UNLOGGED database object not supported yet", unloggedTableIssue) +} + +func testAlterTableAddPKOnPartitionIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, ` + CREATE TABLE orders2 ( + order_id bigint NOT NULL, + order_date timestamp + ) PARTITION BY RANGE (order_date); + ALTER TABLE orders2 ADD PRIMARY KEY (order_id,order_date)`) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "changing primary key of a partitioned table is not yet implemented", alterTableAddPKOnPartitionIssue) +} + +func testSetAttributeIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, ` + CREATE TABLE public.event_search ( + event_id text, + room_id text, + sender text, + key text, + vector tsvector, + origin_server_ts bigint, + stream_ordering bigint + ); + ALTER TABLE ONLY public.event_search ALTER COLUMN room_id SET (n_distinct=-0.01)`) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "ALTER TABLE ALTER column not supported yet", setColumnAttributeIssue) +} + +func testClusterOnIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, ` + CREATE TABLE test(ID INT PRIMARY KEY NOT NULL, + Name TEXT NOT NULL, + Age INT NOT NULL, + Address CHAR(50), + Salary REAL); + + CREATE UNIQUE INDEX test_age_salary ON public.test USING btree (age ASC NULLS LAST, salary ASC NULLS LAST); + + ALTER TABLE public.test CLUSTER ON test_age_salary`) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "ALTER TABLE CLUSTER not supported yet", alterTableClusterOnIssue) +} + +func testDisableRuleIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, ` + create table trule (a int); + + create rule trule_rule as on update to trule do instead nothing; + + ALTER TABLE trule DISABLE RULE trule_rule`) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "ALTER TABLE DISABLE RULE not supported yet", alterTableDisableRuleIssue) +} + +func testStorageParameterIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, ` + CREATE TABLE public.example ( + name text, + email text, + new_id integer NOT NULL, + id2 integer NOT NULL, + CONSTRAINT example_name_check CHECK ((char_length(name) > 3)) + ); + + ALTER TABLE ONLY public.example + ADD CONSTRAINT example_email_key UNIQUE (email) WITH (fillfactor = 70);`) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "unrecognized parameter", storageParameterIssue) +} + +func testLoDatatypeIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, ` + CREATE TABLE image (title text, raster lo);`) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "does not exist", loDatatypeIssue) +} + +func testMultiRangeDatatypeIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + queries := []string{ + `CREATE TABLE int_multirange_table ( + id SERIAL PRIMARY KEY, + value_ranges int4multirange + );`, + `CREATE TABLE bigint_multirange_table ( + id SERIAL PRIMARY KEY, + value_ranges int8multirange + );`, + `CREATE TABLE numeric_multirange_table ( + id SERIAL PRIMARY KEY, + price_ranges nummultirange + );`, + `CREATE TABLE timestamp_multirange_table ( + id SERIAL PRIMARY KEY, + event_times tsmultirange + );`, + `CREATE TABLE timestamptz_multirange_table ( + id SERIAL PRIMARY KEY, + global_event_times tstzmultirange + );`, + `CREATE TABLE date_multirange_table ( + id SERIAL PRIMARY KEY, + project_dates datemultirange + );`, + } + + for _, query := range queries { + _, err = conn.Exec(ctx, query) + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "does not exist", multiRangeDatatypeIssue) + } +} + +func testSecurityInvokerView(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, ` + CREATE TABLE public.employees ( + employee_id SERIAL PRIMARY KEY, + first_name VARCHAR(100), + last_name VARCHAR(100), + department VARCHAR(50) + ); + + CREATE VIEW public.view_explicit_security_invoker + WITH (security_invoker = true) AS + SELECT employee_id, first_name + FROM public.employees;`) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "unrecognized parameter", securityInvokerViewIssue) +} + +func testDeterministicCollationIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, ` + CREATE COLLATION case_insensitive (provider = icu, locale = 'und-u-ks-level2', deterministic = false);`) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, `collation attribute "deterministic" not recognized`, securityInvokerViewIssue) +} + +func testForeignKeyReferencesPartitionedTableIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, ` + CREATE TABLE abc1(id int PRIMARY KEY, val text) PARTITION BY RANGE (id); + CREATE TABLE abc_fk(id int PRIMARY KEY, abc_id INT REFERENCES abc1(id), val text) ;`) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, `cannot reference partitioned table "abc1"`, foreignKeyReferencesPartitionedTableIssue) +} + +func testUniqueNullsNotDistinctIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, ` + CREATE TABLE public.products ( + id INTEGER PRIMARY KEY, + product_name VARCHAR(100), + serial_number TEXT, + UNIQUE NULLS NOT DISTINCT (product_name, serial_number) + );`) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "syntax error", uniqueNullsNotDistinctIssue) +} + +func TestDDLIssuesInYBVersion(t *testing.T) { + var err error + ybVersion := os.Getenv("YB_VERSION") + if ybVersion == "" { + panic("YB_VERSION env variable is not set. Set YB_VERSION=2024.1.3.0-b105 for example") + } + + ybVersionWithoutBuild := strings.Split(ybVersion, "-")[0] + testYbVersion, err = ybversion.NewYBVersion(ybVersionWithoutBuild) + testutils.FatalIfError(t, err) + + testYugabytedbConnStr = os.Getenv("YB_CONN_STR") + if testYugabytedbConnStr == "" { + // spawn yugabytedb container + var err error + ctx := context.Background() + testYugabytedbContainer, err = yugabytedb.Run( + ctx, + "yugabytedb/yugabyte:"+ybVersion, + ) + assert.NoError(t, err) + defer testYugabytedbContainer.Terminate(context.Background()) + } + + // run tests + var success bool + success = t.Run(fmt.Sprintf("%s-%s", "xml functions", ybVersion), testXMLFunctionIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "stored generated functions", ybVersion), testStoredGeneratedFunctionsIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "unlogged table", ybVersion), testUnloggedTableIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "alter table add PK on partition", ybVersion), testAlterTableAddPKOnPartitionIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "set attribute", ybVersion), testSetAttributeIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "cluster on", ybVersion), testClusterOnIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "disable rule", ybVersion), testDisableRuleIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "storage parameter", ybVersion), testStorageParameterIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "lo datatype", ybVersion), testLoDatatypeIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "multi range datatype", ybVersion), testMultiRangeDatatypeIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "security invoker view", ybVersion), testSecurityInvokerView) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "deterministic attribute in collation", ybVersion), testDeterministicCollationIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "foreign key referenced partitioned table", ybVersion), testForeignKeyReferencesPartitionedTableIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "unique nulls not distinct", ybVersion), testUniqueNullsNotDistinctIssue) + assert.True(t, success) +} diff --git a/yb-voyager/src/query/queryissue/issues_dml.go b/yb-voyager/src/query/queryissue/issues_dml.go new file mode 100644 index 0000000000..8bc2c356cc --- /dev/null +++ b/yb-voyager/src/query/queryissue/issues_dml.go @@ -0,0 +1,237 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queryissue + +import ( + "sort" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" + "github.com/yugabyte/yb-voyager/yb-voyager/src/issue" +) + +var advisoryLocksIssue = issue.Issue{ + Type: ADVISORY_LOCKS, + Name: "Advisory Locks", + Impact: constants.IMPACT_LEVEL_2, + Description: "", + Suggestion: "", + GH: "https://github.com/yugabyte/yugabyte-db/issues/3642", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#advisory-locks-is-not-yet-implemented", +} + +func NewAdvisoryLocksIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(advisoryLocksIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var systemColumnsIssue = issue.Issue{ + Type: SYSTEM_COLUMNS, + Name: "System Columns", + Impact: constants.IMPACT_LEVEL_2, + Description: "", + Suggestion: "", + GH: "https://github.com/yugabyte/yugabyte-db/issues/24843", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#system-columns-is-not-yet-supported", +} + +func NewSystemColumnsIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(systemColumnsIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var xmlFunctionsIssue = issue.Issue{ + Type: XML_FUNCTIONS, + Name: "XML Functions", + Impact: constants.IMPACT_LEVEL_2, + Description: "", + Suggestion: "", + GH: "https://github.com/yugabyte/yugabyte-db/issues/1043", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#xml-functions-is-not-yet-supported", +} + +func NewXmlFunctionsIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(xmlFunctionsIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var regexFunctionsIssue = issue.Issue{ + Type: REGEX_FUNCTIONS, + Name: "Regex Functions", + Impact: constants.IMPACT_LEVEL_2, + Description: "", + Suggestion: "", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25575", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", +} + +func NewRegexFunctionsIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(regexFunctionsIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var aggregateFunctionIssue = issue.Issue{ + Type: AGGREGATE_FUNCTION, + Name: AGGREGATION_FUNCTIONS_NAME, + Impact: constants.IMPACT_LEVEL_2, + Description: "any_value, range_agg and range_intersect_agg functions not supported yet in YugabyteDB", + Suggestion: "", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25575", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", +} + +func NewAggregationFunctionIssue(objectType string, objectName string, sqlStatement string, funcNames []string) QueryIssue { + sort.Strings(funcNames) + details := map[string]interface{}{ + FUNCTION_NAMES: funcNames, //TODO USE it later when we start putting these in reports + } + return newQueryIssue(aggregateFunctionIssue, objectType, objectName, sqlStatement, details) +} + +var jsonConstructorFunctionsIssue = issue.Issue{ + Type: JSON_CONSTRUCTOR_FUNCTION, + Name: JSON_CONSTRUCTOR_FUNCTION_NAME, + Impact: constants.IMPACT_LEVEL_2, + Description: "Postgresql 17 features not supported yet in YugabyteDB", + Suggestion: "", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25575", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", +} + +func NewJsonConstructorFunctionIssue(objectType string, objectName string, sqlStatement string, funcNames []string) QueryIssue { + sort.Strings(funcNames) + details := map[string]interface{}{ + FUNCTION_NAMES: funcNames, //TODO USE it later when we start putting these in reports + } + return newQueryIssue(jsonConstructorFunctionsIssue, objectType, objectName, sqlStatement, details) +} + +var jsonQueryFunctionIssue = issue.Issue{ + Type: JSON_QUERY_FUNCTION, + Name: JSON_QUERY_FUNCTIONS_NAME, + Impact: constants.IMPACT_LEVEL_2, + Description: "Postgresql 17 features not supported yet in YugabyteDB", + Suggestion: "", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25575", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", +} + +func NewJsonQueryFunctionIssue(objectType string, objectName string, sqlStatement string, funcNames []string) QueryIssue { + sort.Strings(funcNames) + details := map[string]interface{}{ + FUNCTION_NAMES: funcNames, //TODO USE it later when we start putting these in reports + } + return newQueryIssue(jsonQueryFunctionIssue, objectType, objectName, sqlStatement, details) +} + +var loFunctionsIssue = issue.Issue{ + Type: LARGE_OBJECT_FUNCTIONS, + Name: LARGE_OBJECT_FUNCTIONS_NAME, + Impact: constants.IMPACT_LEVEL_2, + Description: "Large Objects functions are not supported in YugabyteDB", + Suggestion: "Large objects functions are not yet supported in YugabyteDB, no workaround available right now", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25318", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#large-objects-and-its-functions-are-currently-not-supported", +} + +func NewLOFuntionsIssue(objectType string, objectName string, sqlStatement string, funcNames []string) QueryIssue { + sort.Strings(funcNames) + details := map[string]interface{}{ + FUNCTION_NAMES: funcNames, //TODO USE it later when we start putting these in reports + } + return newQueryIssue(loFunctionsIssue, objectType, objectName, sqlStatement, details) +} + +var jsonbSubscriptingIssue = issue.Issue{ + Type: JSONB_SUBSCRIPTING, + Name: JSONB_SUBSCRIPTING_NAME, + Impact: constants.IMPACT_LEVEL_2, + Description: "Jsonb subscripting is not supported in YugabyteDB yet", + Suggestion: "Use Arrow operators (-> / ->>) to access the jsonb fields.", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25575", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#jsonb-subscripting", +} + +func NewJsonbSubscriptingIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(jsonbSubscriptingIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var jsonPredicateIssue = issue.Issue{ + Type: JSON_TYPE_PREDICATE, + Name: JSON_TYPE_PREDICATE_NAME, + Impact: constants.IMPACT_LEVEL_2, + Description: "IS JSON predicate expressions not supported yet in YugabyteDB", + Suggestion: "", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25575", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", +} + +func NewJsonPredicateIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(jsonPredicateIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var copyFromWhereIssue = issue.Issue{ + Type: COPY_FROM_WHERE, + Name: "COPY FROM ... WHERE", + Impact: constants.IMPACT_LEVEL_2, + Description: "", + Suggestion: "", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25575", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", +} + +func NewCopyFromWhereIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(copyFromWhereIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var copyOnErrorIssue = issue.Issue{ + Type: COPY_ON_ERROR, + Name: "COPY ... ON_ERROR", + Impact: constants.IMPACT_LEVEL_2, + Description: "", + Suggestion: "", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25575", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", +} + +func NewCopyOnErrorIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(copyOnErrorIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var fetchWithTiesIssue = issue.Issue{ + Type: FETCH_WITH_TIES, + Name: "FETCH .. WITH TIES", + Impact: constants.IMPACT_LEVEL_2, + Description: "FETCH .. WITH TIES is not supported in YugabyteDB", + Suggestion: "No workaround available right now", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25575", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#postgresql-12-and-later-features", +} + +func NewFetchWithTiesIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + return newQueryIssue(fetchWithTiesIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} + +var mergeStatementIssue = issue.Issue{ + Type: MERGE_STATEMENT, + Name: "Merge Statement", + Impact: constants.IMPACT_LEVEL_2, + Description: "This statement is not supported in YugabyteDB yet", + Suggestion: "Use PL/pgSQL to write the logic to get this functionality", + GH: "https://github.com/yugabyte/yugabyte-db/issues/25574", + DocsLink: "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/postgresql/#merge-command", +} + +func NewMergeStatementIssue(objectType string, objectName string, sqlStatement string) QueryIssue { + //MERGE STATEMENT is PG15 feature but MERGE .... RETURNING clause is PG17 feature so need to report it separately later. + return newQueryIssue(mergeStatementIssue, objectType, objectName, sqlStatement, map[string]interface{}{}) +} diff --git a/yb-voyager/src/query/queryissue/issues_dml_test.go b/yb-voyager/src/query/queryissue/issues_dml_test.go new file mode 100644 index 0000000000..c18352fc75 --- /dev/null +++ b/yb-voyager/src/query/queryissue/issues_dml_test.go @@ -0,0 +1,326 @@ +//go:build issues_integration + +/* +Copyright (c) YugabyteDB, Inc. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package queryissue + +import ( + "context" + "fmt" + "os" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/testcontainers/testcontainers-go/modules/yugabytedb" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/ybversion" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" +) + +func testLOFunctionsIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, ` + CREATE EXTENSION lo; + SELECT lo_create('2342');`) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "Transaction for catalog table write operation 'pg_largeobject_metadata' not found", loDatatypeIssue) +} + +func testJsonbSubscriptingIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, `SELECT ('{"a": {"b": {"c": 1}}}'::jsonb)['a']['b']['c'];`) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "cannot subscript type jsonb because it is not an array", loDatatypeIssue) +} + +func testRegexFunctionsIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + stmts := []string{ + `SELECT regexp_count('This is an example. Another example. Example is a common word.', 'example')`, + `SELECT regexp_instr('This is an example. Another example. Example is a common word.', 'example')`, + `SELECT regexp_like('This is an example. Another example. Example is a common word.', 'example')`, + } + + for _, stmt := range stmts { + _, err = conn.Exec(ctx, stmt) + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "does not exist", regexFunctionsIssue) + } +} + +func testFetchWithTiesIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + + stmts := []string{ + `SELECT * FROM employees + ORDER BY salary DESC + FETCH FIRST 2 ROWS WITH TIES;`, + } + + for _, stmt := range stmts { + _, err = conn.Exec(ctx, stmt) + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, `syntax error at or near "WITH"`, regexFunctionsIssue) + } +} + +func testCopyOnErrorIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + + // In case the COPY ... ON_ERROR construct gets supported in the future, this test will fail with a different error message-something related to the data.csv file not being found. + _, err = conn.Exec(ctx, `COPY pg_largeobject (loid, pageno, data) FROM '/path/to/data.csv' WITH (FORMAT csv, HEADER true, ON_ERROR IGNORE);`) + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "ERROR: option \"on_error\" not recognized (SQLSTATE 42601)", copyOnErrorIssue) +} + +func testCopyFromWhereIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + // In case the COPY FROM ... WHERE construct gets supported in the future, this test will fail with a different error message-something related to the data.csv file not being found. + _, err = conn.Exec(ctx, `COPY pg_largeobject (loid, pageno, data) FROM '/path/to/data.csv' WHERE loid = 1 WITH (FORMAT csv, HEADER true);`) + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, "ERROR: syntax error at or near \"WHERE\" (SQLSTATE 42601)", copyFromWhereIssue) +} + +func testJsonConstructorFunctions(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + sqls := map[string]string{ + `select json_object('code' VALUE 'P123', 'title': 'Jaws');`: `syntax error at or near "VALUE"`, + `select JSON_ARRAYAGG('[1, "2", null]');`: `does not exist`, + `SELECT json_objectagg(k VALUE v) AS json_result + FROM (VALUES ('a', 1), ('b', 2), ('c', 3)) AS t(k, v);`: `syntax error at or near "VALUE"`, + `SELECT JSON_ARRAY('PostgreSQL', 12, TRUE, NULL) AS json_array;`: `does not exist`, + } + for sql, expectedErr := range sqls { + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, sql) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, expectedErr, jsonConstructorFunctionsIssue) + } +} + +func testJsonPredicateIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, `SELECT js, js IS JSON "json?" FROM (VALUES ('123'), ('"abc"'), ('{"a": "b"}'), ('[1,2]'),('abc')) foo(js);`) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, `syntax error at or near "JSON"`, jsonConstructorFunctionsIssue) +} + +func testJsonQueryFunctions(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + sqls := []string{ + `SELECT id, JSON_QUERY(details, '$.author') AS author +FROM books;`, + `SELECT + id, + JSON_VALUE(details, '$.title') AS title, + JSON_VALUE(details, '$.price')::NUMERIC AS price +FROM books;`, + `SELECT id, details +FROM books +WHERE JSON_EXISTS(details, '$.author');`, + } + for _, sql := range sqls { + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, sql) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, `does not exist`, jsonConstructorFunctionsIssue) + } + + jsonTableSQL := `SELECT * FROM json_table( + '[{"a":10,"b":20},{"a":30,"b":40}]'::jsonb, + '$[*]' + COLUMNS ( + column_a int4 path '$.a', + column_b int4 path '$.b' + ) + );` + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, jsonTableSQL) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, `syntax error at or near "COLUMNS"`, jsonConstructorFunctionsIssue) +} + +func testMergeStmtIssue(t *testing.T) { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + sqls := []string{` + MERGE INTO customer_account ca +USING recent_transactions t +ON t.customer_id = ca.customer_id +WHEN MATCHED THEN + UPDATE SET balance = balance + transaction_value +WHEN NOT MATCHED THEN + INSERT (customer_id, balance) + VALUES (t.customer_id, t.transaction_value); +`, + ` + MERGE INTO wines w +USING wine_stock_changes s +ON s.winename = w.winename +WHEN NOT MATCHED AND s.stock_delta > 0 THEN + INSERT VALUES(s.winename, s.stock_delta) +WHEN MATCHED AND w.stock + s.stock_delta > 0 THEN + UPDATE SET stock = w.stock + s.stock_delta +WHEN MATCHED THEN + DELETE +RETURNING merge_action(), w.*; + `, // MERGE ... RETURNING statement >PG15 feature + } + + for _, sql := range sqls { + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, sql) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, `syntax error at or near "MERGE"`, mergeStatementIssue) + } + +} + +func testAggFunctions(t *testing.T) { + sqls := []string{ + `CREATE TABLE any_value_ex ( + department TEXT, + employee_name TEXT, + salary NUMERIC +); + +INSERT INTO any_value_ex VALUES +('HR', 'Alice', 50000), +('HR', 'Bob', 55000), +('IT', 'Charlie', 60000), +('IT', 'Diana', 62000); + +SELECT + department, + any_value(employee_name) AS any_employee +FROM any_value_ex +GROUP BY department;`, + + `CREATE TABLE events ( + id SERIAL PRIMARY KEY, + event_range daterange +); + +INSERT INTO events (event_range) VALUES + ('[2024-01-01, 2024-01-10]'::daterange), + ('[2024-01-05, 2024-01-15]'::daterange), + ('[2024-01-20, 2024-01-25]'::daterange); + +SELECT range_agg(event_range) AS union_of_ranges +FROM events; + +SELECT range_intersect_agg(event_range) AS intersection_of_ranges +FROM events;`, + } + + for _, sql := range sqls { + ctx := context.Background() + conn, err := getConn() + assert.NoError(t, err) + + defer conn.Close(context.Background()) + _, err = conn.Exec(ctx, sql) + + assertErrorCorrectlyThrownForIssueForYBVersion(t, err, `does not exist`, aggregateFunctionIssue) + } +} + +func TestDMLIssuesInYBVersion(t *testing.T) { + var err error + ybVersion := os.Getenv("YB_VERSION") + if ybVersion == "" { + panic("YB_VERSION env variable is not set. Set YB_VERSION=2024.1.3.0-b105 for example") + } + + ybVersionWithoutBuild := strings.Split(ybVersion, "-")[0] + testYbVersion, err = ybversion.NewYBVersion(ybVersionWithoutBuild) + testutils.FatalIfError(t, err) + + testYugabytedbConnStr = os.Getenv("YB_CONN_STR") + if testYugabytedbConnStr == "" { + // spawn yugabytedb container + var err error + ctx := context.Background() + testYugabytedbContainer, err = yugabytedb.Run( + ctx, + "yugabytedb/yugabyte:"+ybVersion, + ) + assert.NoError(t, err) + defer testYugabytedbContainer.Terminate(context.Background()) + } + + // run tests + success := t.Run(fmt.Sprintf("%s-%s", "lo functions", ybVersion), testLOFunctionsIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "regex functions", ybVersion), testRegexFunctionsIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "fetch with ties", ybVersion), testFetchWithTiesIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "copy on error", ybVersion), testCopyOnErrorIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "copy from where", ybVersion), testCopyFromWhereIssue) + assert.True(t, success) + success = t.Run(fmt.Sprintf("%s-%s", "json constructor functions", ybVersion), testJsonConstructorFunctions) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "json query functions", ybVersion), testJsonQueryFunctions) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "merge statement", ybVersion), testMergeStmtIssue) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "json subscripting", ybVersion), testJsonbSubscriptingIssue) + assert.True(t, success) + success = t.Run(fmt.Sprintf("%s-%s", "aggregate functions", ybVersion), testAggFunctions) + assert.True(t, success) + + success = t.Run(fmt.Sprintf("%s-%s", "json type predicate", ybVersion), testJsonPredicateIssue) + assert.True(t, success) + +} diff --git a/yb-voyager/src/query/queryissue/parser_issue_detector.go b/yb-voyager/src/query/queryissue/parser_issue_detector.go new file mode 100644 index 0000000000..5fbc47ca8e --- /dev/null +++ b/yb-voyager/src/query/queryissue/parser_issue_detector.go @@ -0,0 +1,468 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queryissue + +import ( + "fmt" + "slices" + "strings" + + "github.com/samber/lo" + log "github.com/sirupsen/logrus" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/query/queryparser" + "github.com/yugabyte/yb-voyager/yb-voyager/src/ybversion" +) + +type ParserIssueDetector struct { + /* + this will contain the information in this format: + public.table1 -> { + column1: citext | jsonb | inet | tsquery | tsvector | array + ... + } + schema2.table2 -> { + column3: citext | jsonb | inet | tsquery | tsvector | array + ... + } + Here only those columns on tables are stored which have unsupported type for Index in YB + */ + columnsWithUnsupportedIndexDatatypes map[string]map[string]string + /* + list of composite types with fully qualified typename in the exported schema + */ + compositeTypes []string + /* + list of enum types with fully qualified typename in the exported schema + */ + enumTypes []string + + partitionTablesMap map[string]bool + + // key is partitioned table, value is sqlInfo (sqlstmt, fpath) where the ADD PRIMARY KEY statement resides + primaryConsInAlter map[string]*queryparser.AlterTable + + //Boolean to check if there are any Gin indexes + IsGinIndexPresentInSchema bool + + // Boolean to check if there are any unlogged tables that were filtered + // out because they are fixed as per the target db version + IsUnloggedTablesIssueFiltered bool + + //Functions in exported schema + functionObjects []*queryparser.Function + + //columns names with jsonb type + jsonbColumns []string +} + +func NewParserIssueDetector() *ParserIssueDetector { + return &ParserIssueDetector{ + columnsWithUnsupportedIndexDatatypes: make(map[string]map[string]string), + compositeTypes: make([]string, 0), + enumTypes: make([]string, 0), + partitionTablesMap: make(map[string]bool), + primaryConsInAlter: make(map[string]*queryparser.AlterTable), + } +} + +func (p *ParserIssueDetector) GetCompositeTypes() []string { + return p.compositeTypes +} + +func (p *ParserIssueDetector) GetEnumTypes() []string { + return p.enumTypes +} + +func (p *ParserIssueDetector) GetAllIssues(query string, targetDbVersion *ybversion.YBVersion) ([]QueryIssue, error) { + issues, err := p.getAllIssues(query) + if err != nil { + return issues, err + } + + return p.getIssuesNotFixedInTargetDbVersion(issues, targetDbVersion) +} + +func (p *ParserIssueDetector) getAllIssues(query string) ([]QueryIssue, error) { + plpgsqlIssues, err := p.getPLPGSQLIssues(query) + if err != nil { + return nil, fmt.Errorf("error getting plpgsql issues: %v", err) + } + dmlIssues, err := p.getDMLIssues(query) + if err != nil { + return nil, fmt.Errorf("error getting generic issues: %v", err) + } + ddlIssues, err := p.getDDLIssues(query) + if err != nil { + return nil, fmt.Errorf("error getting ddl issues: %v", err) + } + return lo.Flatten([][]QueryIssue{plpgsqlIssues, dmlIssues, ddlIssues}), nil +} + +func (p *ParserIssueDetector) getIssuesNotFixedInTargetDbVersion(issues []QueryIssue, targetDbVersion *ybversion.YBVersion) ([]QueryIssue, error) { + var filteredIssues []QueryIssue + for _, i := range issues { + fixed, err := i.IsFixedIn(targetDbVersion) + if err != nil { + return nil, fmt.Errorf("checking if issue %v is supported: %w", i, err) + } + if !fixed { + filteredIssues = append(filteredIssues, i) + } else { + if i.Issue.Type == UNLOGGED_TABLE { + p.IsUnloggedTablesIssueFiltered = true + } + } + } + return filteredIssues, nil +} + +func (p *ParserIssueDetector) GetAllPLPGSQLIssues(query string, targetDbVersion *ybversion.YBVersion) ([]QueryIssue, error) { + issues, err := p.getPLPGSQLIssues(query) + if err != nil { + return issues, nil + } + + return p.getIssuesNotFixedInTargetDbVersion(issues, targetDbVersion) +} + +func (p *ParserIssueDetector) getPLPGSQLIssues(query string) ([]QueryIssue, error) { + parseTree, err := queryparser.Parse(query) + if err != nil { + return nil, fmt.Errorf("error parsing query: %w", err) + } + + if !queryparser.IsPLPGSQLObject(parseTree) { + return nil, nil + } + //TODO handle this in DDLPARSER, DDLIssueDetector + objType, objName := queryparser.GetObjectTypeAndObjectName(parseTree) + plpgsqlQueries, err := queryparser.GetAllPLPGSQLStatements(query) + if err != nil { + return nil, fmt.Errorf("error getting all the queries from query: %w", err) + } + var issues []QueryIssue + for _, plpgsqlQuery := range plpgsqlQueries { + issuesInQuery, err := p.getAllIssues(plpgsqlQuery) + if err != nil { + //there can be plpgsql expr queries no parseable via parser e.g. "withdrawal > balance" + log.Errorf("error getting issues in query-%s: %v", query, err) + continue + } + issues = append(issues, issuesInQuery...) + } + + percentTypeSyntaxIssues, err := p.GetPercentTypeSyntaxIssues(query) + if err != nil { + return nil, fmt.Errorf("error getting reference TYPE syntax issues: %v", err) + } + issues = append(issues, percentTypeSyntaxIssues...) + + return lo.Map(issues, func(i QueryIssue, _ int) QueryIssue { + //Replacing the objectType and objectName to the original ObjectType and ObjectName of the PLPGSQL object + //e.g. replacing the DML_QUERY and "" to FUNCTION and + i.ObjectType = objType + i.ObjectName = objName + return i + }), nil +} + +func (p *ParserIssueDetector) ParseRequiredDDLs(query string) error { + parseTree, err := queryparser.Parse(query) + if err != nil { + return fmt.Errorf("error parsing a query: %v", err) + } + ddlObj, err := queryparser.ProcessDDL(parseTree) + if err != nil { + return fmt.Errorf("error parsing DDL: %w", err) + } + + switch ddlObj.(type) { + case *queryparser.AlterTable: + alter, _ := ddlObj.(*queryparser.AlterTable) + if alter.ConstraintType == queryparser.PRIMARY_CONSTR_TYPE { + //For the case ALTER and CREATE are not not is expected order where ALTER is before CREATE + alter.Query = query + p.primaryConsInAlter[alter.GetObjectName()] = alter + } + case *queryparser.Table: + table, _ := ddlObj.(*queryparser.Table) + if table.IsPartitioned { + p.partitionTablesMap[table.GetObjectName()] = true + } + + for _, col := range table.Columns { + isUnsupportedType := slices.Contains(UnsupportedIndexDatatypes, col.TypeName) + isUDTType := slices.Contains(p.compositeTypes, col.GetFullTypeName()) + switch true { + case col.IsArrayType: + //For Array types and storing the type as "array" as of now we can enhance the to have specific type e.g. INT4ARRAY + _, ok := p.columnsWithUnsupportedIndexDatatypes[table.GetObjectName()] + if !ok { + p.columnsWithUnsupportedIndexDatatypes[table.GetObjectName()] = make(map[string]string) + } + p.columnsWithUnsupportedIndexDatatypes[table.GetObjectName()][col.ColumnName] = "array" + case isUnsupportedType || isUDTType: + _, ok := p.columnsWithUnsupportedIndexDatatypes[table.GetObjectName()] + if !ok { + p.columnsWithUnsupportedIndexDatatypes[table.GetObjectName()] = make(map[string]string) + } + p.columnsWithUnsupportedIndexDatatypes[table.GetObjectName()][col.ColumnName] = col.TypeName + if isUDTType { //For UDTs + p.columnsWithUnsupportedIndexDatatypes[table.GetObjectName()][col.ColumnName] = "user_defined_type" + } + } + + if col.TypeName == "jsonb" { + // used to detect the jsonb subscripting happening on these columns + p.jsonbColumns = append(p.jsonbColumns, col.ColumnName) + } + } + + case *queryparser.CreateType: + typeObj, _ := ddlObj.(*queryparser.CreateType) + if typeObj.IsEnum { + p.enumTypes = append(p.enumTypes, typeObj.GetObjectName()) + } else { + p.compositeTypes = append(p.compositeTypes, typeObj.GetObjectName()) + } + case *queryparser.Index: + index, _ := ddlObj.(*queryparser.Index) + if index.AccessMethod == GIN_ACCESS_METHOD { + p.IsGinIndexPresentInSchema = true + } + case *queryparser.Function: + fn, _ := ddlObj.(*queryparser.Function) + p.functionObjects = append(p.functionObjects, fn) + } + return nil +} + +func (p *ParserIssueDetector) GetDDLIssues(query string, targetDbVersion *ybversion.YBVersion) ([]QueryIssue, error) { + issues, err := p.getDDLIssues(query) + if err != nil { + return issues, nil + } + + return p.getIssuesNotFixedInTargetDbVersion(issues, targetDbVersion) + +} + +func (p *ParserIssueDetector) getDDLIssues(query string) ([]QueryIssue, error) { + parseTree, err := queryparser.Parse(query) + if err != nil { + return nil, fmt.Errorf("error parsing a query: %v", err) + } + isDDL, err := queryparser.IsDDL(parseTree) + if err != nil { + return nil, fmt.Errorf("error checking if query is ddl: %w", err) + } + if !isDDL { + return nil, nil + } + // Parse the query into a DDL object + ddlObj, err := queryparser.ProcessDDL(parseTree) + if err != nil { + return nil, fmt.Errorf("error parsing DDL: %w", err) + } + // Get the appropriate issue detector + detector, err := p.GetDDLDetector(ddlObj) + if err != nil { + return nil, fmt.Errorf("error getting issue detector: %w", err) + } + + // Detect issues + issues, err := detector.DetectIssues(ddlObj) + if err != nil { + return nil, fmt.Errorf("error detecting issues: %w", err) + } + + // Add the original query to each issue + for i := range issues { + if issues[i].SqlStatement == "" { + issues[i].SqlStatement = query + } + } + + /* + For detecting these generic issues (Advisory locks, XML functions and System columns as of now) on DDL example - + CREATE INDEX idx_invoices on invoices (xpath('/invoice/customer/text()', data)); + We need to call it on DDLs as well + */ + genericIssues, err := p.genericIssues(query) + if err != nil { + return nil, fmt.Errorf("error getting generic issues: %w", err) + } + + for _, i := range genericIssues { + //In case of genericIssues we don't populate the proper obj type and obj name + i.ObjectType = ddlObj.GetObjectType() + i.ObjectName = ddlObj.GetObjectName() + issues = append(issues, i) + } + return issues, nil +} + +func (p *ParserIssueDetector) GetPercentTypeSyntaxIssues(query string) ([]QueryIssue, error) { + parseTree, err := queryparser.Parse(query) + if err != nil { + return nil, fmt.Errorf("error parsing the query-%s: %v", query, err) + } + + objType, objName := queryparser.GetObjectTypeAndObjectName(parseTree) + typeNames, err := queryparser.GetAllTypeNamesInPlpgSQLStmt(query) + if err != nil { + return nil, fmt.Errorf("error getting type names in PLPGSQL: %v", err) + } + + /* + Caveats of GetAllTypeNamesInPlpgSQLStmt(): + 1. Not returning typename for variables in function parameter from this function (in correct in json as UNKNOWN), for that using the GetTypeNamesFromFuncParameters() + 2. Not returning the return type from this function (not available in json), for that using the GetReturnTypeOfFunc() + */ + if queryparser.IsFunctionObject(parseTree) { + typeNames = append(typeNames, queryparser.GetReturnTypeOfFunc(parseTree)) + } + typeNames = append(typeNames, queryparser.GetFuncParametersTypeNames(parseTree)...) + var issues []QueryIssue + for _, typeName := range typeNames { + if strings.HasSuffix(typeName, "%TYPE") { + issues = append(issues, NewPercentTypeSyntaxIssue(objType, objName, typeName)) // TODO: confirm + } + } + return issues, nil +} + +func (p *ParserIssueDetector) GetDMLIssues(query string, targetDbVersion *ybversion.YBVersion) ([]QueryIssue, error) { + issues, err := p.getDMLIssues(query) + if err != nil { + return issues, err + } + + return p.getIssuesNotFixedInTargetDbVersion(issues, targetDbVersion) +} + +func (p *ParserIssueDetector) getDMLIssues(query string) ([]QueryIssue, error) { + parseTree, err := queryparser.Parse(query) + if err != nil { + return nil, fmt.Errorf("error parsing query: %w", err) + } + isDDL, err := queryparser.IsDDL(parseTree) + if err != nil { + return nil, fmt.Errorf("error checking if query is a DDL: %v", err) + } + if isDDL { + //Skip all the DDLs coming to this function + return nil, nil + } + issues, err := p.genericIssues(query) + if err != nil { + return issues, err + } + return issues, err +} + +func (p *ParserIssueDetector) genericIssues(query string) ([]QueryIssue, error) { + parseTree, err := queryparser.Parse(query) + if err != nil { + return nil, fmt.Errorf("error parsing query: %w", err) + } + var result []QueryIssue + visited := make(map[protoreflect.Message]bool) + detectors := []UnsupportedConstructDetector{ + NewFuncCallDetector(query), + NewColumnRefDetector(query), + NewXmlExprDetector(query), + NewRangeTableFuncDetector(query), + NewSelectStmtDetector(query), + NewCopyCommandUnsupportedConstructsDetector(query), + NewJsonConstructorFuncDetector(query), + NewJsonQueryFunctionDetector(query), + NewMergeStatementDetector(query), + NewJsonbSubscriptingDetector(query, p.jsonbColumns, p.getJsonbReturnTypeFunctions()), + NewUniqueNullsNotDistinctDetector(query), + NewJsonPredicateExprDetector(query), + } + + processor := func(msg protoreflect.Message) error { + for _, detector := range detectors { + log.Debugf("running detector %T", detector) + err := detector.Detect(msg) + if err != nil { + log.Debugf("error in detector %T: %v", detector, err) + return fmt.Errorf("error in detectors %T: %w", detector, err) + } + } + return nil + } + + parseTreeProtoMsg := queryparser.GetProtoMessageFromParseTree(parseTree) + err = queryparser.TraverseParseTree(parseTreeProtoMsg, visited, processor) + if err != nil { + return result, fmt.Errorf("error traversing parse tree message: %w", err) + } + + xmlIssueAdded := false + for _, detector := range detectors { + issues := detector.GetIssues() + for _, issue := range issues { + if issue.Type == XML_FUNCTIONS { + if xmlIssueAdded { + // currently, both FuncCallDetector and XmlExprDetector can detect XMLFunctionsIssue + // but we want to only return one XMLFunctionsIssue. + // TODO: refactor to avoid this + // Possible Solutions: + // 1. Have a dedicated detector for XMLFunctions and Expressions so that a single issue is returned + // 2. Separate issue types for XML Functions and XML expressions. + continue + } else { + xmlIssueAdded = true + } + } + result = append(result, issue) + } + } + + return result, nil +} + +func (p *ParserIssueDetector) getJsonbReturnTypeFunctions() []string { + var jsonbFunctions []string + jsonbColumns := p.jsonbColumns + for _, function := range p.functionObjects { + returnType := function.ReturnType + if strings.HasSuffix(returnType, "%TYPE") { + // e.g. public.table_name.column%TYPE + qualifiedColumn := strings.TrimSuffix(returnType, "%TYPE") + parts := strings.Split(qualifiedColumn, ".") + column := parts[len(parts)-1] + if slices.Contains(jsonbColumns, column) { + jsonbFunctions = append(jsonbFunctions, function.FuncName) + } + } else { + // e.g. public.udt_type, text, trigger, jsonb + parts := strings.Split(returnType, ".") + typeName := parts[len(parts)-1] + if typeName == "jsonb" { + jsonbFunctions = append(jsonbFunctions, function.FuncName) + } + } + } + jsonbFunctions = append(jsonbFunctions, catalogFunctionsReturningJsonb.ToSlice()...) + return jsonbFunctions +} diff --git a/yb-voyager/src/query/queryissue/parser_issue_detector_test.go b/yb-voyager/src/query/queryissue/parser_issue_detector_test.go new file mode 100644 index 0000000000..744abc25f8 --- /dev/null +++ b/yb-voyager/src/query/queryissue/parser_issue_detector_test.go @@ -0,0 +1,1042 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package queryissue + +import ( + "fmt" + "slices" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/samber/lo" + "github.com/stretchr/testify/assert" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/ybversion" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" +) + +const ( + stmt1 = `CREATE OR REPLACE FUNCTION list_high_earners(threshold NUMERIC) RETURNS public.emp1.salary%TYPE AS $$ +DECLARE + emp_name employees.name%TYPE; + emp_salary employees.salary%TYPE; +BEGIN + FOR emp_name, emp_salary IN + SELECT name, salary FROM employees WHERE salary > threshold + LOOP + RAISE NOTICE 'Employee: %, Salary: %', emp_name, emp_salary; + END LOOP; + EXECUTE 'ALTER TABLE employees CLUSTER ON idx;'; + PERFORM pg_advisory_unlock(sender_id); + PERFORM pg_advisory_unlock(receiver_id); + + -- Conditional logic + IF balance >= withdrawal THEN + RAISE NOTICE 'Sufficient balance, processing withdrawal.'; + -- Add the amount to the receiver's account + UPDATE accounts SET balance = balance + amount WHERE account_id = receiver; + ELSIF balance > 0 AND balance < withdrawal THEN + RAISE NOTICE 'Insufficient balance, consider reducing the amount.'; + -- Add the amount to the receiver's account + UPDATE accounts SET balance = balance + amount WHERE account_id = receiver; + ELSE + -- Add the amount to the receiver's account + UPDATE accounts SET balance = balance + amount WHERE account_id = receiver; + RAISE NOTICE 'No funds available.'; + END IF; + + SELECT id, xpath('/person/name/text()', data) AS name FROM test_xml_type; + + SELECT * FROM employees e WHERE e.xmax = (SELECT MAX(xmax) FROM employees WHERE department = e.department); + RETURN emp_salary; + +END; +$$ LANGUAGE plpgsql;` + stmt2 = `CREATE OR REPLACE FUNCTION process_order(orderid orders.id%TYPE) RETURNS VOID AS $$ +DECLARE + lock_acquired BOOLEAN; +BEGIN + lock_acquired := pg_try_advisory_lock(orderid); -- not able to report this as it is an assignment statement TODO: fix when support this + + IF NOT lock_acquired THEN + RAISE EXCEPTION 'Order % already being processed by another session', orderid; + END IF; + + UPDATE orders + SET processed_at = NOW() + WHERE orders.order_id = orderid; + + RAISE NOTICE 'Order % processed successfully', orderid; + + EXECUTE 'ALTER TABLE ONLY public.example ADD CONSTRAINT example_email_key UNIQUE (email) WITH (fillfactor=70)'; + + EXECUTE 'CREATE UNLOGGED TABLE tbl_unlog (id int, val text);'; + + EXECUTE 'CREATE INDEX idx_example ON example_table USING gin(name, name1);'; + + EXECUTE 'CREATE INDEX idx_example ON schema1.example_table USING gist(name);'; + + PERFORM pg_advisory_unlock(orderid); +END; +$$ LANGUAGE plpgsql;` + stmt3 = `CREATE INDEX abc ON public.example USING btree (new_id) WITH (fillfactor='70');` + stmt4 = `ALTER TABLE public.example DISABLE RULE example_rule;` + stmt5 = `ALTER TABLE abc ADD CONSTRAINT cnstr_id UNIQUE (id) DEFERRABLE;` + stmt6 = `SELECT id, first_name FROM employees WHERE pg_try_advisory_lock(600) IS TRUE AND salary > 700;` + stmt7 = `SELECT xmin, COUNT(*) FROM employees GROUP BY xmin HAVING COUNT(*) > 1;` + stmt8 = `SELECT id, xml_column, xpath('/root/element/@attribute', xml_column) as xpath_resuls FROM xml_documents;` + stmt9 = `CREATE TABLE order_details ( + detail_id integer NOT NULL, + quantity integer, + price_per_unit numeric, + amount numeric GENERATED ALWAYS AS (((quantity)::numeric * price_per_unit)) STORED +);` + stmt10 = `CREATE TABLE test_non_pk_multi_column_list ( + id numeric NOT NULL PRIMARY KEY, + country_code varchar(3), + record_type varchar(5), + descriptions varchar(50) +) PARTITION BY LIST (country_code, record_type) ;` + + stmt11 = `CREATE TABLE "Test"( + id int, + room_id int, + time_range trange, + roomid int, + timerange tsrange, + EXCLUDE USING gist (room_id WITH =, time_range WITH &&), + CONSTRAINT no_time_overlap_constr EXCLUDE USING gist (roomid WITH =, timerange WITH &&) +);` + stmt12 = `CREATE TABLE test_dt (id int, d daterange);` + stmt13 = `CREATE INDEX idx_on_daterange on test_dt (d);` + stmt14 = `CREATE MATERIALIZED VIEW public.sample_data_view AS + SELECT sample_data.id, + sample_data.name, + sample_data.description, + XMLFOREST(sample_data.name AS name, sample_data.description AS description) AS xml_data, + pg_try_advisory_lock((sample_data.id)::bigint) AS lock_acquired, + sample_data.ctid AS row_ctid, + sample_data.xmin AS xmin_value + FROM public.sample_data + WITH NO DATA;` + stmt15 = `CREATE VIEW public.orders_view AS + SELECT orders.order_id, + orders.customer_name, + orders.product_name, + orders.quantity, + orders.price, + XMLELEMENT(NAME "OrderDetails", XMLELEMENT(NAME "Customer", orders.customer_name), XMLELEMENT(NAME "Product", orders.product_name), XMLELEMENT(NAME "Quantity", orders.quantity), XMLELEMENT(NAME "TotalPrice", (orders.price * (orders.quantity)::numeric))) AS order_xml, + XMLCONCAT(XMLELEMENT(NAME "Customer", orders.customer_name), XMLELEMENT(NAME "Product", orders.product_name)) AS summary_xml, + pg_try_advisory_lock((hashtext((orders.customer_name || orders.product_name)))::bigint) AS lock_acquired, + orders.ctid AS row_ctid, + orders.xmin AS transaction_id + FROM public.orders + WITH LOCAL CHECK OPTION;` + stmt16 = `CREATE TABLE public.xml_data_example ( + id SERIAL PRIMARY KEY, + name VARCHAR(255), + d daterange Unique, + description XML DEFAULT xmlparse(document 'Default Product100.00Electronics'), + created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP +) PARTITION BY LIST(id, name);` + stmt17 = `ALTER TABLE invoices +ADD CONSTRAINT valid_invoice_structure +CHECK (xpath_exists('/invoice/customer', data));` + stmt18 = `CREATE INDEX idx_invoices on invoices (xpath('/invoice/customer/text()', data));` + stmt19 = `create table test_lo_default (id int, raster lo DEFAULT lo_import('3242'));` + stmt20 = `CREATE VIEW public.view_explicit_security_invoker + WITH (security_invoker = true) AS + SELECT employee_id, first_name + FROM public.employees;` + stmt21 = `CREATE COLLATION case_insensitive (provider = icu, locale = 'und-u-ks-level2', deterministic = false);` + stmt22 = `CREATE COLLATION new_schema.ignore_accents (provider = icu, locale = 'und-u-ks-level1-kc-true', deterministic = false);` + stmt23 = `CREATE COLLATION upperfirst (provider = icu, locale = 'en-u-kf-upper', deterministic = true);` + stmt24 = `CREATE COLLATION special (provider = icu, locale = 'en-u-kf-upper-kr-grek-latn');` + stmt25 = `CREATE TABLE public.products ( + id INTEGER PRIMARY KEY, + product_name VARCHAR(100), + serial_number TEXT, + UNIQUE NULLS NOT DISTINCT (product_name, serial_number) + );` + stmt26 = `ALTER TABLE public.products ADD CONSTRAINT unique_product_name UNIQUE NULLS NOT DISTINCT (product_name);` + stmt27 = `CREATE UNIQUE INDEX unique_email_idx ON users (email) NULLS NOT DISTINCT;` +) + +func modifiedIssuesforPLPGSQL(issues []QueryIssue, objType string, objName string) []QueryIssue { + return lo.Map(issues, func(i QueryIssue, _ int) QueryIssue { + i.ObjectType = objType + i.ObjectName = objName + return i + }) +} +func TestAllIssues(t *testing.T) { + requiredDDLs := []string{stmt12} + parserIssueDetector := NewParserIssueDetector() + stmtsWithExpectedIssues := map[string][]QueryIssue{ + stmt1: []QueryIssue{ + NewPercentTypeSyntaxIssue("FUNCTION", "list_high_earners", "public.emp1.salary%TYPE"), + NewPercentTypeSyntaxIssue("FUNCTION", "list_high_earners", "employees.name%TYPE"), + NewPercentTypeSyntaxIssue("FUNCTION", "list_high_earners", "employees.salary%TYPE"), + NewClusterONIssue("TABLE", "employees", "ALTER TABLE employees CLUSTER ON idx;"), + NewAdvisoryLocksIssue("DML_QUERY", "", "SELECT pg_advisory_unlock(sender_id);"), + NewAdvisoryLocksIssue("DML_QUERY", "", "SELECT pg_advisory_unlock(receiver_id);"), + NewXmlFunctionsIssue("DML_QUERY", "", "SELECT id, xpath('/person/name/text()', data) AS name FROM test_xml_type;"), + NewSystemColumnsIssue("DML_QUERY", "", "SELECT * FROM employees e WHERE e.xmax = (SELECT MAX(xmax) FROM employees WHERE department = e.department);"), + }, + stmt2: []QueryIssue{ + NewPercentTypeSyntaxIssue("FUNCTION", "process_order", "orders.id%TYPE"), + NewStorageParameterIssue("TABLE", "public.example", "ALTER TABLE ONLY public.example ADD CONSTRAINT example_email_key UNIQUE (email) WITH (fillfactor=70);"), + NewMultiColumnGinIndexIssue("INDEX", "idx_example ON example_table", "CREATE INDEX idx_example ON example_table USING gin(name, name1);"), + NewUnsupportedIndexMethodIssue("INDEX", "idx_example ON schema1.example_table", "CREATE INDEX idx_example ON schema1.example_table USING gist(name);", "gist"), + NewAdvisoryLocksIssue("DML_QUERY", "", "SELECT pg_advisory_unlock(orderid);"), + }, + stmt3: []QueryIssue{ + NewStorageParameterIssue("INDEX", "abc ON public.example", stmt3), + }, + stmt4: []QueryIssue{ + NewAlterTableDisableRuleIssue("TABLE", "public.example", stmt4, "example_rule"), + }, + stmt5: []QueryIssue{ + NewDeferrableConstraintIssue("TABLE", "abc", stmt5, "cnstr_id"), + }, + stmt6: []QueryIssue{ + NewAdvisoryLocksIssue("DML_QUERY", "", stmt6), + }, + stmt7: []QueryIssue{ + NewSystemColumnsIssue("DML_QUERY", "", stmt7), + }, + stmt8: []QueryIssue{ + NewXmlFunctionsIssue("DML_QUERY", "", stmt8), + }, + stmt9: []QueryIssue{ + NewGeneratedColumnsIssue("TABLE", "order_details", stmt9, []string{"amount"}), + }, + stmt10: []QueryIssue{ + NewMultiColumnListPartition("TABLE", "test_non_pk_multi_column_list", stmt10), + NewInsufficientColumnInPKForPartition("TABLE", "test_non_pk_multi_column_list", stmt10, []string{"country_code", "record_type"}), + }, + stmt11: []QueryIssue{ + NewExclusionConstraintIssue("TABLE", "Test", stmt11, "Test_room_id_time_range_excl"), + NewExclusionConstraintIssue("TABLE", "Test", stmt11, "no_time_overlap_constr"), + }, + stmt13: []QueryIssue{ + NewIndexOnComplexDatatypesIssue("INDEX", "idx_on_daterange ON test_dt", stmt13, "daterange"), + }, + } + + //Should modify it in similar way we do it actual code as the particular DDL issue in plpgsql can have different Details map on the basis of objectType + stmtsWithExpectedIssues[stmt1] = modifiedIssuesforPLPGSQL(stmtsWithExpectedIssues[stmt1], "FUNCTION", "list_high_earners") + + stmtsWithExpectedIssues[stmt2] = modifiedIssuesforPLPGSQL(stmtsWithExpectedIssues[stmt2], "FUNCTION", "process_order") + + for _, stmt := range requiredDDLs { + err := parserIssueDetector.ParseRequiredDDLs(stmt) + assert.NoError(t, err, "Error parsing required ddl: %s", stmt) + } + for stmt, expectedIssues := range stmtsWithExpectedIssues { + issues, err := parserIssueDetector.GetAllIssues(stmt, ybversion.LatestStable) + assert.NoError(t, err, "Error detecting issues for statement: %s", stmt) + + assert.Equal(t, len(expectedIssues), len(issues), "Mismatch in issue count for statement: %s", stmt) + for _, expectedIssue := range expectedIssues { + found := slices.ContainsFunc(issues, func(queryIssue QueryIssue) bool { + return cmp.Equal(expectedIssue, queryIssue) + }) + assert.True(t, found, "Expected issue not found: %v in statement: %s", expectedIssue, stmt) + } + } + +} + +func TestDDLIssues(t *testing.T) { + requiredDDLs := []string{stmt16} + parserIssueDetector := NewParserIssueDetector() + stmtsWithExpectedIssues := map[string][]QueryIssue{ + stmt14: []QueryIssue{ + NewAdvisoryLocksIssue("MVIEW", "public.sample_data_view", stmt14), + NewSystemColumnsIssue("MVIEW", "public.sample_data_view", stmt14), + NewXmlFunctionsIssue("MVIEW", "public.sample_data_view", stmt14), + }, + stmt15: []QueryIssue{ + NewAdvisoryLocksIssue("VIEW", "public.orders_view", stmt15), + NewSystemColumnsIssue("VIEW", "public.orders_view", stmt15), + NewXmlFunctionsIssue("VIEW", "public.orders_view", stmt15), + //TODO: Add CHECK OPTION issue when we move it from regex to parser logic + }, + stmt16: []QueryIssue{ + NewXmlFunctionsIssue("TABLE", "public.xml_data_example", stmt16), + NewPrimaryOrUniqueConsOnUnsupportedIndexTypesIssue("TABLE", "public.xml_data_example", stmt16, "daterange", "xml_data_example_d_key"), + NewMultiColumnListPartition("TABLE", "public.xml_data_example", stmt16), + NewInsufficientColumnInPKForPartition("TABLE", "public.xml_data_example", stmt16, []string{"name"}), + NewXMLDatatypeIssue("TABLE", "public.xml_data_example", stmt16, "description"), + }, + stmt17: []QueryIssue{ + NewXmlFunctionsIssue("TABLE", "invoices", stmt17), + }, + stmt18: []QueryIssue{ + NewXmlFunctionsIssue("INDEX", "idx_invoices ON invoices", stmt18), + }, + stmt19: []QueryIssue{ + NewLODatatypeIssue("TABLE", "test_lo_default", stmt19, "raster"), + NewLOFuntionsIssue("TABLE", "test_lo_default", stmt19, []string{"lo_import"}), + }, + stmt20: []QueryIssue{ + NewSecurityInvokerViewIssue("VIEW", "public.view_explicit_security_invoker", stmt20), + }, + stmt21: []QueryIssue{ + NewDeterministicOptionCollationIssue("COLLATION", "case_insensitive", stmt21), + }, + stmt22: []QueryIssue{ + NewDeterministicOptionCollationIssue("COLLATION", "new_schema.ignore_accents", stmt22), + }, + stmt23: []QueryIssue{ + NewDeterministicOptionCollationIssue("COLLATION", "upperfirst", stmt23), + }, + stmt24: []QueryIssue{}, + stmt25: []QueryIssue{ + NewUniqueNullsNotDistinctIssue("TABLE", "public.products", stmt25), + }, + stmt26: []QueryIssue{ + NewUniqueNullsNotDistinctIssue("TABLE", "public.products", stmt26), + }, + stmt27: []QueryIssue{ + NewUniqueNullsNotDistinctIssue("INDEX", "unique_email_idx ON users", stmt27), + }, + } + for _, stmt := range requiredDDLs { + err := parserIssueDetector.ParseRequiredDDLs(stmt) + assert.NoError(t, err, "Error parsing required ddl: %s", stmt) + } + for stmt, expectedIssues := range stmtsWithExpectedIssues { + issues, err := parserIssueDetector.GetDDLIssues(stmt, ybversion.LatestStable) + assert.NoError(t, err, "Error detecting issues for statement: %s", stmt) + + assert.Equal(t, len(expectedIssues), len(issues), "Mismatch in issue count for statement: %s", stmt) + for _, expectedIssue := range expectedIssues { + found := slices.ContainsFunc(issues, func(queryIssue QueryIssue) bool { + return cmp.Equal(expectedIssue, queryIssue) + }) + assert.True(t, found, "Expected issue not found: %v in statement: %s. \nFound: %v", expectedIssue, stmt, issues) + } + } +} + +func TestUnloggedTableIssueReportedInOlderVersion(t *testing.T) { + stmt := "CREATE UNLOGGED TABLE tbl_unlog (id int, val text);" + parserIssueDetector := NewParserIssueDetector() + + // Not reported by default + issues, err := parserIssueDetector.GetDDLIssues(stmt, ybversion.LatestStable) + testutils.FatalIfError(t, err) + assert.Equal(t, 0, len(issues)) + + // older version should report the issue + issues, err = parserIssueDetector.GetDDLIssues(stmt, ybversion.V2024_1_0_0) + testutils.FatalIfError(t, err) + assert.Equal(t, 1, len(issues)) + assert.True(t, cmp.Equal(issues[0], NewUnloggedTableIssue("TABLE", "tbl_unlog", stmt))) +} + +func TestLargeObjectIssues(t *testing.T) { + sqls := []string{ + `CREATE OR REPLACE FUNCTION manage_large_object(loid OID) RETURNS VOID AS $$ +BEGIN + IF loid IS NOT NULL THEN + -- Unlink the large object to free up storage + PERFORM lo_unlink(loid); + END IF; +END; +$$ LANGUAGE plpgsql;`, + `CREATE OR REPLACE FUNCTION import_file_to_table(file_path TEXT, doc_title TEXT) +RETURNS VOID AS $$ +DECLARE + loid OID; +BEGIN + -- Import the file and get the large object OID + loid := lo_import(file_path); -- NOT DETECTED + + -- Insert the file metadata and OID into the table + INSERT INTO documents (title, content_oid) VALUES (doc_title, lo_import(file_path)); + + RAISE NOTICE 'File imported with OID % and linked to title %', loid, doc_title; +END; +$$ LANGUAGE plpgsql; +`, + `CREATE OR REPLACE FUNCTION export_large_object(doc_title TEXT, file_path TEXT) +RETURNS VOID AS $$ +DECLARE + loid OID; +BEGIN + -- Retrieve the OID of the large object associated with the given title + SELECT content_oid INTO loid FROM documents WHERE title = doc_title; + + -- Check if the large object exists + IF loid IS NULL THEN + RAISE EXCEPTION 'No large object found for title %', doc_title; + END IF; + + -- Export the large object to the specified file + PERFORM lo_export(loid, file_path); + + RAISE NOTICE 'Large object with OID % exported to %', loid, file_path; +END; +$$ LANGUAGE plpgsql; +`, + `CREATE OR REPLACE PROCEDURE read_large_object(doc_title TEXT) +AS $$ +DECLARE + loid OID; + fd INTEGER; + buffer BYTEA; + content TEXT; +BEGIN + -- Retrieve the OID of the large object associated with the given title + SELECT content_oid INTO loid FROM documents WHERE title = doc_title; + + -- Check if the large object exists + IF loid IS NULL THEN + RAISE EXCEPTION 'No large object found for title %', doc_title; + END IF; + + -- Open the large object for reading + fd := lo_open(loid, 262144); -- 262144 = INV_READ + + -- Read data from the large object + buffer := lo_get(fd); + content := convert_from(buffer, 'UTF8'); + + -- Close the large object + PERFORM lo_close(fd); + +END; +$$ LANGUAGE plpgsql; +`, + `CREATE OR REPLACE FUNCTION write_to_large_object(doc_title TEXT, new_data TEXT) +RETURNS VOID AS $$ +DECLARE + loid OID; + fd INTEGER; +BEGIN + -- Create the table if it doesn't already exist + EXECUTE 'CREATE TABLE IF NOT EXISTS test_large_objects(id INT, raster lo DEFAULT lo_import(3242));'; + + -- Retrieve the OID of the large object associated with the given title + SELECT content_oid INTO loid FROM documents WHERE title = doc_title; + + -- Check if the large object exists + IF loid IS NULL THEN + RAISE EXCEPTION 'No large object found for title %', doc_title; + END IF; + + -- Open the large object for writing + fd := lo_open(loid, 524288); -- 524288 = INV_WRITE + + -- Write new data to the large object + PERFORM lo_put(fd, convert_to(new_data, 'UTF8')); + + -- Close the large object + PERFORM lo_close(fd); + + RAISE NOTICE 'Data written to large object with OID %', loid; +END; +$$ LANGUAGE plpgsql; +`, + `CREATE TRIGGER t_raster BEFORE UPDATE OR DELETE ON image + FOR EACH ROW EXECUTE FUNCTION lo_manage(raster);`, + } + + expectedSQLsWithIssues := map[string][]QueryIssue{ + sqls[0]: []QueryIssue{ + NewLOFuntionsIssue("DML_QUERY", "", "SELECT lo_unlink(loid);", []string{"lo_unlink"}), + }, + sqls[1]: []QueryIssue{ + NewLOFuntionsIssue("DML_QUERY", "", "INSERT INTO documents (title, content_oid) VALUES (doc_title, lo_import(file_path));", []string{"lo_import"}), + }, + sqls[2]: []QueryIssue{ + NewLOFuntionsIssue("DML_QUERY", "", "SELECT lo_export(loid, file_path);", []string{"lo_export"}), + }, + sqls[3]: []QueryIssue{ + NewLOFuntionsIssue("DML_QUERY", "", "SELECT lo_close(fd);", []string{"lo_close"}), + }, + sqls[4]: []QueryIssue{ + NewLOFuntionsIssue("DML_QUERY", "", "SELECT lo_put(fd, convert_to(new_data, 'UTF8'));", []string{"lo_put"}), + NewLOFuntionsIssue("DML_QUERY", "", "SELECT lo_close(fd);", []string{"lo_close"}), + NewLODatatypeIssue("TABLE", "test_large_objects", "CREATE TABLE IF NOT EXISTS test_large_objects(id INT, raster lo DEFAULT lo_import(3242));", "raster"), + NewLOFuntionsIssue("TABLE", "test_large_objects", "CREATE TABLE IF NOT EXISTS test_large_objects(id INT, raster lo DEFAULT lo_import(3242));", []string{"lo_import"}), + }, + sqls[5]: []QueryIssue{ + NewLOFuntionsIssue("TRIGGER", "t_raster ON image", sqls[5], []string{"lo_manage"}), + }, + } + expectedSQLsWithIssues[sqls[0]] = modifiedIssuesforPLPGSQL(expectedSQLsWithIssues[sqls[0]], "FUNCTION", "manage_large_object") + expectedSQLsWithIssues[sqls[1]] = modifiedIssuesforPLPGSQL(expectedSQLsWithIssues[sqls[1]], "FUNCTION", "import_file_to_table") + expectedSQLsWithIssues[sqls[2]] = modifiedIssuesforPLPGSQL(expectedSQLsWithIssues[sqls[2]], "FUNCTION", "export_large_object") + expectedSQLsWithIssues[sqls[3]] = modifiedIssuesforPLPGSQL(expectedSQLsWithIssues[sqls[3]], "PROCEDURE", "read_large_object") + expectedSQLsWithIssues[sqls[4]] = modifiedIssuesforPLPGSQL(expectedSQLsWithIssues[sqls[4]], "FUNCTION", "write_to_large_object") + + parserIssueDetector := NewParserIssueDetector() + + for stmt, expectedIssues := range expectedSQLsWithIssues { + issues, err := parserIssueDetector.GetAllIssues(stmt, ybversion.LatestStable) + fmt.Printf("%v", issues) + + assert.NoError(t, err, "Error detecting issues for statement: %s", stmt) + assert.Equal(t, len(expectedIssues), len(issues), "Mismatch in issue count for statement: %s", stmt) + for _, expectedIssue := range expectedIssues { + found := slices.ContainsFunc(issues, func(queryIssue QueryIssue) bool { + return cmp.Equal(expectedIssue, queryIssue) + }) + assert.True(t, found, "Expected issue not found: %v in statement: %s", expectedIssue, stmt) + } + } +} + +// currently, both FuncCallDetector and XmlExprDetector can detect XMLFunctionsIssue +// statement below has both XML functions and XML expressions. +// but we want to only return one XMLFunctionsIssue from parserIssueDetector.getDMLIssues +// and there is some workaround in place to avoid returning multiple issues in .genericIssues method +func TestSingleXMLIssueIsDetected(t *testing.T) { + stmt := ` + SELECT e.id, x.employee_xml + FROM employees e + JOIN ( + SELECT xmlelement(name "employee", xmlattributes(e.id AS "id"), e.name) AS employee_xml + FROM employees e + ) x ON x.employee_xml IS NOT NULL + WHERE xmlexists('//employee[name="John Doe"]' PASSING BY REF x.employee_xml);` + + parserIssueDetector := NewParserIssueDetector() + issues, err := parserIssueDetector.getDMLIssues(stmt) + testutils.FatalIfError(t, err) + assert.Equal(t, 1, len(issues)) +} + +func TestJsonUnsupportedFeatures(t *testing.T) { + sqls := []string{ + `SELECT department, JSON_ARRAYAGG(name) AS employees_json + FROM employees + GROUP BY department;`, + `INSERT INTO movies (details) +VALUES ( + JSON_OBJECT('title' VALUE 'Dune', 'director' VALUE 'Denis Villeneuve', 'year' VALUE 2021) +);`, + `SELECT json_objectagg(k VALUE v) AS json_result + FROM (VALUES ('a', 1), ('b', 2), ('c', 3)) AS t(k, v);`, + `SELECT JSON_OBJECT( + 'movie' VALUE JSON_OBJECT('code' VALUE 'P123', 'title' VALUE 'Jaws'), + 'director' VALUE 'Steven Spielberg' +) AS nested_json_object;`, + `select JSON_ARRAYAGG('[1, "2", null]');`, + `SELECT JSON_OBJECT( + 'code' VALUE 'P123', + 'title' VALUE 'Jaws', + 'price' VALUE 19.99, + 'available' VALUE TRUE +) AS json_obj;`, + `SELECT id, JSON_QUERY(details, '$.author') AS author +FROM books;`, + `SELECT jt.* FROM + my_films, + JSON_TABLE (js, '$.favorites[*]' COLUMNS ( + id FOR ORDINALITY, + kind text PATH '$.kind', + title text PATH '$.films[*].title' WITH WRAPPER, + director text PATH '$.films[*].director' WITH WRAPPER)) AS jt;`, + `SELECT jt.* FROM + my_films, + JSON_TABLE (js, $1 COLUMNS ( + id FOR ORDINALITY, + kind text PATH '$.kind', + title text PATH '$.films[*].title' WITH WRAPPER, + director text PATH '$.films[*].director' WITH WRAPPER)) AS jt;`, + `SELECT id, details +FROM books +WHERE JSON_EXISTS(details, '$.author');`, + `SELECT id, JSON_QUERY(details, '$.author') AS author +FROM books;`, + `SELECT + id, + JSON_VALUE(details, '$.title') AS title, + JSON_VALUE(details, '$.price')::NUMERIC AS price +FROM books;`, + `SELECT id, JSON_VALUE(details, '$.title') AS title +FROM books +WHERE JSON_EXISTS(details, '$.price ? (@ > $price)' PASSING 30 AS price);`, + `SELECT js, js IS JSON "json?", js IS JSON SCALAR "scalar?", js IS JSON OBJECT "object?", js IS JSON ARRAY "array?" +FROM (VALUES ('123'), ('"abc"'), ('{"a": "b"}'), ('[1,2]'),('abc')) foo(js);`, + `SELECT js, + js IS JSON OBJECT "object?", + js IS JSON ARRAY "array?", + js IS JSON ARRAY WITH UNIQUE KEYS "array w. UK?", + js IS JSON ARRAY WITHOUT UNIQUE KEYS "array w/o UK?" +FROM (VALUES ('[{"a":"1"}, + {"b":"2","b":"3"}]')) foo(js);`, + `SELECT js, + js IS JSON OBJECT "object?" + FROM (VALUES ('[{"a":"1"}, + {"b":"2","b":"3"}]')) foo(js); `, + `CREATE MATERIALIZED VIEW public.test_jsonb_view AS +SELECT + id, + data->>'name' AS name, + JSON_VALUE(data, '$.age' RETURNING INTEGER) AS age, + JSON_EXISTS(data, '$.skills[*] ? (@ == "JSON")') AS knows_json, + jt.skill +FROM public.test_jsonb, +JSON_TABLE(data, '$.skills[*]' + COLUMNS ( + skill TEXT PATH '$' + ) +) AS jt;`, + `SELECT JSON_ARRAY($1, 12, TRUE, $2) AS json_array;`, + `CREATE TABLE sales.json_data ( + id int PRIMARY KEY, + array_column TEXT CHECK (array_column IS JSON ARRAY), + unique_keys_column TEXT CHECK (unique_keys_column IS JSON WITH UNIQUE KEYS) +);`, + } + sqlsWithExpectedIssues := map[string][]QueryIssue{ + sqls[0]: []QueryIssue{ + NewJsonConstructorFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[0], []string{JSON_ARRAYAGG}), + }, + sqls[1]: []QueryIssue{ + NewJsonConstructorFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[1], []string{JSON_OBJECT}), + }, + sqls[2]: []QueryIssue{ + NewJsonConstructorFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[2], []string{JSON_OBJECTAGG}), + }, + sqls[3]: []QueryIssue{ + NewJsonConstructorFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[3], []string{JSON_OBJECT}), + }, + sqls[4]: []QueryIssue{ + NewJsonConstructorFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[4], []string{JSON_ARRAYAGG}), + }, + sqls[5]: []QueryIssue{ + NewJsonConstructorFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[5], []string{JSON_OBJECT}), + }, + sqls[6]: []QueryIssue{ + NewJsonQueryFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[6], []string{JSON_QUERY}), + }, + sqls[7]: []QueryIssue{ + NewJsonQueryFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[7], []string{JSON_TABLE}), + }, + // sqls[8]: []QueryIssue{ + // NewJsonQueryFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[8]), + //NOT REPORTED YET because of PARSER failing if JSON_TABLE has a parameterized values $1, $2 ... + //https://github.com/pganalyze/pg_query_go/issues/127 + // }, + sqls[9]: []QueryIssue{ + NewJsonQueryFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[9], []string{JSON_EXISTS}), + }, + sqls[10]: []QueryIssue{ + NewJsonQueryFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[10], []string{JSON_QUERY}), + }, + sqls[11]: []QueryIssue{ + NewJsonQueryFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[11], []string{JSON_VALUE}), + }, + sqls[12]: []QueryIssue{ + NewJsonQueryFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[12], []string{JSON_VALUE, JSON_EXISTS}), + }, + sqls[13]: []QueryIssue{ + NewJsonPredicateIssue(DML_QUERY_OBJECT_TYPE, "", sqls[13]), + }, + sqls[14]: []QueryIssue{ + NewJsonPredicateIssue(DML_QUERY_OBJECT_TYPE, "", sqls[14]), + }, + sqls[15]: []QueryIssue{ + NewJsonPredicateIssue(DML_QUERY_OBJECT_TYPE, "", sqls[15]), + }, + sqls[16]: []QueryIssue{ + NewJsonQueryFunctionIssue("MVIEW", "public.test_jsonb_view", sqls[16], []string{JSON_VALUE, JSON_EXISTS, JSON_TABLE}), + }, + sqls[17]: []QueryIssue{ + NewJsonConstructorFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[17], []string{JSON_ARRAY}), + }, + sqls[18]: []QueryIssue{ + NewJsonPredicateIssue("TABLE", "sales.json_data", sqls[18]), + }, + } + parserIssueDetector := NewParserIssueDetector() + for stmt, expectedIssues := range sqlsWithExpectedIssues { + issues, err := parserIssueDetector.GetAllIssues(stmt, ybversion.LatestStable) + assert.NoError(t, err, "Error detecting issues for statement: %s", stmt) + assert.Equal(t, len(expectedIssues), len(issues), "Mismatch in issue count for statement: %s", stmt) + for _, expectedIssue := range expectedIssues { + found := slices.ContainsFunc(issues, func(queryIssue QueryIssue) bool { + return cmp.Equal(expectedIssue, queryIssue) + }) + assert.True(t, found, "Expected issue not found: %v in statement: %s", expectedIssue, stmt) + } + } +} + +func TestJsonbSubscriptingIssue(t *testing.T) { + ddlSqls := []string{ + `CREATE TABLE test_jsonb1 ( + id SERIAL PRIMARY KEY, + data JSONB +);`, + `CREATE TABLE users ( + id SERIAL PRIMARY KEY, + name TEXT, + email TEXT, + active BOOLEAN +);`, + `CREATE TABLE test_json_chk ( + id int, + data1 jsonb, + CHECK (data1['key']<>'') +);`, + `CREATE OR REPLACE FUNCTION get_user_info(user_id INT) +RETURNS JSONB AS $$ +BEGIN + RETURN ( + SELECT jsonb_build_object( + 'id', id, + 'name', name, + 'email', email, + 'active', active + ) + FROM users + WHERE id = user_id + ); +END; +$$ LANGUAGE plpgsql;`, + } + sqls := []string{ + + `CREATE TABLE test_json_chk ( + id int, + data1 jsonb, + CHECK (data1['key']<>'') +);`, + `SELECT + data->>'name' AS name, + data['scores'][1] AS second_score +FROM test_jsonb1;`, + `SELECT ('[{"key": "value1"}, {"key": "value2"}]'::jsonb)[1]['key'] AS object_in_array; `, + `SELECT (JSON_OBJECT( + 'movie' VALUE JSON_OBJECT('code' VALUE 'P123', 'title' VALUE 'Jaws'), + 'director' VALUE 'Steven Spielberg' +)::JSONB)['movie'] AS nested_json_object;`, + `SELECT (jsonb_build_object('name', 'PostgreSQL', 'version', 14, 'open_source', TRUE))['name'] AS json_obj;`, + `SELECT ('{"key": "value1"}'::jsonb || '{"key": "value2"}'::jsonb)['key'] AS object_in_array;`, + `SELECT ('{"key": "value1"}'::jsonb || '{"key": "value2"}')['key'] AS object_in_array;`, + `SELECT (data || '{"new_key": "new_value"}' )['name'] FROM test_jsonb;`, + `SELECT (jsonb_build_object('name', 'PostgreSQL', 'version', 14, 'open_source', TRUE))['name'] AS json_obj;`, + `SELECT (jsonb_build_object('name', 'PostgreSQL', 'version', 14, 'open_source', TRUE) || '{"key": "value2"}')['name'] AS json_obj;`, + `SELECT (ROW('Alice', 'Smith', 25))['0'] ;`, + `SELECT (get_user_info(2))['name'] AS user_info;`, + } + + stmtsWithExpectedIssues := map[string][]QueryIssue{ + sqls[0]: []QueryIssue{ + NewJsonbSubscriptingIssue(TABLE_OBJECT_TYPE, "test_json_chk", sqls[0]), + }, + sqls[1]: []QueryIssue{ + NewJsonbSubscriptingIssue(DML_QUERY_OBJECT_TYPE, "", sqls[1]), + }, + sqls[2]: []QueryIssue{ + NewJsonbSubscriptingIssue(DML_QUERY_OBJECT_TYPE, "", sqls[2]), + }, + sqls[3]: []QueryIssue{ + NewJsonbSubscriptingIssue(DML_QUERY_OBJECT_TYPE, "", sqls[3]), + NewJsonConstructorFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[3], []string{JSON_OBJECT}), + }, + sqls[4]: []QueryIssue{ + NewJsonbSubscriptingIssue(DML_QUERY_OBJECT_TYPE, "", sqls[4]), + }, + sqls[5]: []QueryIssue{ + NewJsonbSubscriptingIssue(DML_QUERY_OBJECT_TYPE, "", sqls[5]), + }, + sqls[6]: []QueryIssue{ + NewJsonbSubscriptingIssue(DML_QUERY_OBJECT_TYPE, "", sqls[6]), + }, + sqls[7]: []QueryIssue{ + NewJsonbSubscriptingIssue(DML_QUERY_OBJECT_TYPE, "", sqls[7]), + }, + sqls[8]: []QueryIssue{ + NewJsonbSubscriptingIssue(DML_QUERY_OBJECT_TYPE, "", sqls[8]), + }, + sqls[9]: []QueryIssue{ + NewJsonbSubscriptingIssue(DML_QUERY_OBJECT_TYPE, "", sqls[9]), + }, + sqls[10]: []QueryIssue{}, + sqls[11]: []QueryIssue{ + NewJsonbSubscriptingIssue(DML_QUERY_OBJECT_TYPE, "", sqls[11]), + }, + } + + parserIssueDetector := NewParserIssueDetector() + for _, stmt := range ddlSqls { + err := parserIssueDetector.ParseRequiredDDLs(stmt) + assert.NoError(t, err, "Error parsing required ddl: %s", stmt) + } + for stmt, expectedIssues := range stmtsWithExpectedIssues { + issues, err := parserIssueDetector.GetAllIssues(stmt, ybversion.LatestStable) + assert.NoError(t, err, "Error detecting issues for statement: %s", stmt) + assert.Equal(t, len(expectedIssues), len(issues), "Mismatch in issue count for statement: %s", stmt) + for _, expectedIssue := range expectedIssues { + found := slices.ContainsFunc(issues, func(queryIssue QueryIssue) bool { + return cmp.Equal(expectedIssue, queryIssue) + }) + assert.True(t, found, "Expected issue not found: %v in statement: %s", expectedIssue, stmt) + } + } +} +func TestAggregateFunctions(t *testing.T) { + sqls := []string{ + `SELECT + department, + any_value(employee_name) AS any_employee + FROM employees + GROUP BY department;`, + `SELECT range_intersect_agg(multi_event_range) AS intersection_of_multiranges +FROM multiranges;`, + `SELECT range_agg(multi_event_range) AS union_of_multiranges +FROM multiranges;`, + `CREATE OR REPLACE FUNCTION aggregate_ranges() +RETURNS INT4MULTIRANGE AS $$ +DECLARE + aggregated_range INT4MULTIRANGE; +BEGIN + SELECT range_agg(range_value) INTO aggregated_range FROM ranges; + SELECT + department, + any_value(employee_name) AS any_employee + FROM employees + GROUP BY department; + RETURN aggregated_range; +END; +$$ LANGUAGE plpgsql;`, + } + aggregateSqls := map[string][]QueryIssue{ + sqls[0]: []QueryIssue{ + NewAggregationFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[0], []string{"any_value"}), + }, + sqls[1]: []QueryIssue{ + NewAggregationFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[1], []string{"range_intersect_agg"}), + }, + sqls[2]: []QueryIssue{ + NewAggregationFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[2], []string{"range_agg"}), + }, + sqls[3]: []QueryIssue{ + NewAggregationFunctionIssue(DML_QUERY_OBJECT_TYPE, "", "SELECT range_agg(range_value) FROM ranges;", []string{"range_agg"}), + NewAggregationFunctionIssue(DML_QUERY_OBJECT_TYPE, "", sqls[0], []string{"any_value"}), + }, + } + aggregateSqls[sqls[3]] = modifiedIssuesforPLPGSQL(aggregateSqls[sqls[3]], "FUNCTION", "aggregate_ranges") + + parserIssueDetector := NewParserIssueDetector() + for stmt, expectedIssues := range aggregateSqls { + issues, err := parserIssueDetector.GetAllIssues(stmt, ybversion.LatestStable) + assert.NoError(t, err, "Error detecting issues for statement: %s", stmt) + assert.Equal(t, len(expectedIssues), len(issues), "Mismatch in issue count for statement: %s", stmt) + for _, expectedIssue := range expectedIssues { + found := slices.ContainsFunc(issues, func(queryIssue QueryIssue) bool { + return cmp.Equal(expectedIssue, queryIssue) + }) + assert.True(t, found, "Expected issue not found: %v in statement: %s", expectedIssue, stmt) + } + } +} + +func TestRegexFunctionsIssue(t *testing.T) { + dmlStmts := []string{ + `SELECT regexp_count('This is an example. Another example. Example is a common word.', 'example')`, + `SELECT regexp_instr('This is an example. Another example. Example is a common word.', 'example')`, + `SELECT regexp_like('This is an example. Another example. Example is a common word.', 'example')`, + `SELECT regexp_count('abc','abc'), regexp_instr('abc','abc'), regexp_like('abc','abc')`, + } + + ddlStmts := []string{ + `CREATE TABLE x (id INT PRIMARY KEY, id2 INT DEFAULT regexp_count('This is an example. Another example. Example is a common word.', 'example'))`, + } + + parserIssueDetector := NewParserIssueDetector() + + for _, stmt := range dmlStmts { + issues, err := parserIssueDetector.getDMLIssues(stmt) + testutils.FatalIfError(t, err) + assert.Equal(t, 1, len(issues)) + assert.Equal(t, NewRegexFunctionsIssue(DML_QUERY_OBJECT_TYPE, "", stmt), issues[0]) + } + + for _, stmt := range ddlStmts { + issues, err := parserIssueDetector.getDDLIssues(stmt) + testutils.FatalIfError(t, err) + assert.Equal(t, 1, len(issues)) + assert.Equal(t, NewRegexFunctionsIssue(TABLE_OBJECT_TYPE, "x", stmt), issues[0]) + } + +} + +func TestFetchWithTiesInSelect(t *testing.T) { + + stmt1 := ` + SELECT * FROM employees + ORDER BY salary DESC + FETCH FIRST 2 ROWS WITH TIES;` + + // subquery + stmt2 := `SELECT * + FROM ( + SELECT * FROM employees + ORDER BY salary DESC + FETCH FIRST 2 ROWS WITH TIES + ) AS top_employees;` + + stmt3 := `CREATE VIEW top_employees_view AS + SELECT * + FROM ( + SELECT * FROM employees + ORDER BY salary DESC + FETCH FIRST 2 ROWS WITH TIES + ) AS top_employees;` + + expectedIssues := map[string][]QueryIssue{ + stmt1: []QueryIssue{NewFetchWithTiesIssue("DML_QUERY", "", stmt1)}, + stmt2: []QueryIssue{NewFetchWithTiesIssue("DML_QUERY", "", stmt2)}, + } + expectedDDLIssues := map[string][]QueryIssue{ + stmt3: []QueryIssue{NewFetchWithTiesIssue("VIEW", "top_employees_view", stmt3)}, + } + + parserIssueDetector := NewParserIssueDetector() + + for stmt, expectedIssues := range expectedIssues { + issues, err := parserIssueDetector.GetDMLIssues(stmt, ybversion.LatestStable) + + assert.NoError(t, err, "Error detecting issues for statement: %s", stmt) + + assert.Equal(t, len(expectedIssues), len(issues), "Mismatch in issue count for statement: %s", stmt) + for _, expectedIssue := range expectedIssues { + found := slices.ContainsFunc(issues, func(queryIssue QueryIssue) bool { + return cmp.Equal(expectedIssue, queryIssue) + }) + assert.True(t, found, "Expected issue not found: %v in statement: %s", expectedIssue, stmt) + } + } + + for stmt, expectedIssues := range expectedDDLIssues { + issues, err := parserIssueDetector.GetDDLIssues(stmt, ybversion.LatestStable) + + assert.NoError(t, err, "Error detecting issues for statement: %s", stmt) + + assert.Equal(t, len(expectedIssues), len(issues), "Mismatch in issue count for statement: %s", stmt) + for _, expectedIssue := range expectedIssues { + found := slices.ContainsFunc(issues, func(queryIssue QueryIssue) bool { + return cmp.Equal(expectedIssue, queryIssue) + }) + assert.True(t, found, "Expected issue not found: %v in statement: %s", expectedIssue, stmt) + } + } +} + +func TestCopyUnsupportedConstructIssuesDetected(t *testing.T) { + expectedIssues := map[string][]QueryIssue{ + `COPY my_table FROM '/path/to/data.csv' WHERE col1 > 100;`: {NewCopyFromWhereIssue("DML_QUERY", "", `COPY my_table FROM '/path/to/data.csv' WHERE col1 > 100;`)}, + `COPY my_table(col1, col2) FROM '/path/to/data.csv' WHERE col2 = 'test';`: {NewCopyFromWhereIssue("DML_QUERY", "", `COPY my_table(col1, col2) FROM '/path/to/data.csv' WHERE col2 = 'test';`)}, + `COPY my_table FROM '/path/to/data.csv' WHERE TRUE;`: {NewCopyFromWhereIssue("DML_QUERY", "", `COPY my_table FROM '/path/to/data.csv' WHERE TRUE;`)}, + `COPY employees (id, name, age) + FROM STDIN WITH (FORMAT csv) + WHERE age > 30;`: {NewCopyFromWhereIssue("DML_QUERY", "", `COPY employees (id, name, age) + FROM STDIN WITH (FORMAT csv) + WHERE age > 30;`)}, + + `COPY table_name (name, age) FROM '/path/to/data.csv' WITH (FORMAT csv, HEADER true, ON_ERROR IGNORE);`: {NewCopyOnErrorIssue("DML_QUERY", "", `COPY table_name (name, age) FROM '/path/to/data.csv' WITH (FORMAT csv, HEADER true, ON_ERROR IGNORE);`)}, + `COPY table_name (name, age) FROM '/path/to/data.csv' WITH (FORMAT csv, HEADER true, ON_ERROR STOP);`: {NewCopyOnErrorIssue("DML_QUERY", "", `COPY table_name (name, age) FROM '/path/to/data.csv' WITH (FORMAT csv, HEADER true, ON_ERROR STOP);`)}, + + `COPY table_name (name, age) FROM '/path/to/data.csv' WITH (FORMAT csv, HEADER true, ON_ERROR IGNORE) WHERE age > 18;`: {NewCopyFromWhereIssue("DML_QUERY", "", `COPY table_name (name, age) FROM '/path/to/data.csv' WITH (FORMAT csv, HEADER true, ON_ERROR IGNORE) WHERE age > 18;`), NewCopyOnErrorIssue("DML_QUERY", "", `COPY table_name (name, age) FROM '/path/to/data.csv' WITH (FORMAT csv, HEADER true, ON_ERROR IGNORE) WHERE age > 18;`)}, + `COPY table_name (name, age) FROM '/path/to/data.csv' WITH (FORMAT csv, HEADER true, ON_ERROR STOP) WHERE name = 'Alice';`: {NewCopyFromWhereIssue("DML_QUERY", "", `COPY table_name (name, age) FROM '/path/to/data.csv' WITH (FORMAT csv, HEADER true, ON_ERROR STOP) WHERE name = 'Alice';`), NewCopyOnErrorIssue("DML_QUERY", "", `COPY table_name (name, age) FROM '/path/to/data.csv' WITH (FORMAT csv, HEADER true, ON_ERROR STOP) WHERE name = 'Alice';`)}, + + `COPY my_table FROM '/path/to/data.csv' WITH (FORMAT csv);`: {}, + `COPY my_table FROM '/path/to/data.csv' WITH (FORMAT text);`: {}, + `COPY my_table FROM '/path/to/data.csv';`: {}, + `COPY my_table FROM '/path/to/data.csv' WITH (DELIMITER ',');`: {}, + `COPY my_table(col1, col2) FROM '/path/to/data.csv' WITH (FORMAT csv, HEADER true);`: {}, + } + + parserIssueDetector := NewParserIssueDetector() + + for stmt, expectedIssues := range expectedIssues { + issues, err := parserIssueDetector.getDMLIssues(stmt) + testutils.FatalIfError(t, err) + assert.Equal(t, len(expectedIssues), len(issues)) + + for _, expectedIssue := range expectedIssues { + found := slices.ContainsFunc(issues, func(queryIssue QueryIssue) bool { + return cmp.Equal(expectedIssue, queryIssue) + }) + assert.True(t, found, "Expected issue not found: %v in statement: %s", expectedIssue, stmt) + } + } +} + +func TestForeignKeyReferencesPartitionedTableIssues(t *testing.T) { + requiredDDLs := []string{ + `CREATE TABLE abc1(id int PRIMARY KEY, val text) PARTITION BY RANGE (id);`, + `CREATE TABLE schema1.abc(id int PRIMARY KEY, val text) PARTITION BY RANGE (id);`, + } + stmt1 := `CREATE TABLE abc_fk(id int PRIMARY KEY, abc_id INT REFERENCES abc1(id), val text) ;` + stmt2 := `ALTER TABLE schema1.abc_fk1 +ADD CONSTRAINT fk FOREIGN KEY (abc1_id) +REFERENCES schema1.abc (id); +` + stmt3 := `CREATE TABLE abc_fk ( + id INT PRIMARY KEY, + abc_id INT, + val TEXT, + CONSTRAINT fk_abc FOREIGN KEY (abc_id) REFERENCES abc1(id) +); +` + + stmt4 := `CREATE TABLE schema1.abc_fk(id int PRIMARY KEY, abc_id INT, val text, FOREIGN KEY (abc_id) REFERENCES schema1.abc(id));` + + ddlStmtsWithIssues := map[string][]QueryIssue{ + stmt1: []QueryIssue{ + NewForeignKeyReferencesPartitionedTableIssue(TABLE_OBJECT_TYPE, "abc_fk", stmt1, "abc_fk_abc_id_fkey"), + }, + stmt2: []QueryIssue{ + NewForeignKeyReferencesPartitionedTableIssue(TABLE_OBJECT_TYPE, "schema1.abc_fk1", stmt2, "fk"), + }, + stmt3: []QueryIssue{ + NewForeignKeyReferencesPartitionedTableIssue(TABLE_OBJECT_TYPE, "abc_fk", stmt3, "fk_abc"), + }, + stmt4: []QueryIssue{ + NewForeignKeyReferencesPartitionedTableIssue(TABLE_OBJECT_TYPE, "schema1.abc_fk", stmt4, "abc_fk_abc_id_fkey"), + }, + } + parserIssueDetector := NewParserIssueDetector() + for _, stmt := range requiredDDLs { + err := parserIssueDetector.ParseRequiredDDLs(stmt) + assert.NoError(t, err, "Error parsing required ddl: %s", stmt) + } + for stmt, expectedIssues := range ddlStmtsWithIssues { + issues, err := parserIssueDetector.GetDDLIssues(stmt, ybversion.LatestStable) + assert.NoError(t, err, "Error detecting issues for statement: %s", stmt) + + assert.Equal(t, len(expectedIssues), len(issues), "Mismatch in issue count for statement: %s", stmt) + for _, expectedIssue := range expectedIssues { + found := slices.ContainsFunc(issues, func(queryIssue QueryIssue) bool { + return cmp.Equal(expectedIssue, queryIssue) + }) + assert.True(t, found, "Expected issue not found: %v in statement: %s", expectedIssue, stmt) + } + } +} diff --git a/yb-voyager/src/query/queryissue/query_issue.go b/yb-voyager/src/query/queryissue/query_issue.go new file mode 100644 index 0000000000..5359879f11 --- /dev/null +++ b/yb-voyager/src/query/queryissue/query_issue.go @@ -0,0 +1,42 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +This package has all logic related to detecting issues in queries (DDL or DML). +Entry point is ParserIssueDetector, which makes use queryparser pkg to parse +the query and multiple detectors to figure out issues in the parseTree. +*/ +package queryissue + +import "github.com/yugabyte/yb-voyager/yb-voyager/src/issue" + +type QueryIssue struct { + issue.Issue + ObjectType string // TABLE, FUNCTION, DML_QUERY? + ObjectName string // table name/function name/etc + SqlStatement string + Details map[string]interface{} // additional details about the issue +} + +func newQueryIssue(issue issue.Issue, objectType string, objectName string, sqlStatement string, details map[string]interface{}) QueryIssue { + return QueryIssue{ + Issue: issue, + ObjectType: objectType, + ObjectName: objectName, + SqlStatement: sqlStatement, + Details: details, + } +} diff --git a/yb-voyager/src/query/queryparser/ddl_processor.go b/yb-voyager/src/query/queryparser/ddl_processor.go new file mode 100644 index 0000000000..895d6f518f --- /dev/null +++ b/yb-voyager/src/query/queryparser/ddl_processor.go @@ -0,0 +1,1139 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package queryparser + +import ( + "fmt" + "slices" + "strings" + + pg_query "github.com/pganalyze/pg_query_go/v6" + "github.com/samber/lo" + log "github.com/sirupsen/logrus" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" +) + +// Base parser interface +/* +Whenever adding a new DDL type to prasing for detecting issues, need to extend this DDLProcessor +with the Process() function to adding logic to get the required information out from the parseTree of that DDL +and store it in a DDLObject struct +*/ +type DDLProcessor interface { + Process(*pg_query.ParseResult) (DDLObject, error) +} + +// Base DDL object interface +/* +Whenever adding a new DDL type, You need to extend this DDLObject struct to be extended for that object type +with the required for storing the information which should have these required function also extended for the objeect Name and schema name +*/ +type DDLObject interface { + GetObjectName() string + GetObjectType() string + GetSchemaName() string +} + +//=========== TABLE PROCESSOR ================================ + +// TableProcessor handles parsing CREATE TABLE statements +type TableProcessor struct{} + +func NewTableProcessor() *TableProcessor { + return &TableProcessor{} +} + +/* +e.g. CREATE TABLE "Test"( + + id int, + room_id int, + time_range tsrange, + room_id1 int, + time_range1 tsrange + EXCLUDE USING gist (room_id WITH =, time_range WITH &&), + EXCLUDE USING gist (room_id1 WITH =, time_range1 WITH &&) + ); + +create_stmt:{relation:{relname:"Test" inh:true relpersistence:"p" location:14} table_elts:...table_elts:{constraint:{contype:CONSTR_EXCLUSION +location:226 exclusions:{list:{items:{index_elem:{name:"room_id" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} +items:{list:{items:{string:{sval:"="}}}}}} exclusions:{list:{items:{index_elem:{name:"time_range" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} +items:{list:{items:{string:{sval:"&&"}}}}}} access_method:"gist"}} table_elts:{constraint:{contype:CONSTR_EXCLUSION location:282 exclusions:{list: +{items:{index_elem:{name:"room_id1" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} items:{list:{items:{string:{sval:"="}}}}}} +exclusions:{list:{items:{index_elem:{name:"time_range1" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} items:{list:{items:{string:{sval:"&&"}}}}}} +access_method:"gist"}} oncommit:ONCOMMIT_NOOP}} stmt_len:365} + +here we are iterating over all the table_elts - table elements and which are comma separated column info in +the DDL so each column has column_def(column definition) in the parse tree but in case it is a constraint, the column_def +is nil. + +e.g. In case if PRIMARY KEY is included in column definition + + CREATE TABLE example2 ( + id numeric NOT NULL PRIMARY KEY, + country_code varchar(3), + record_type varchar(5) + +) PARTITION BY RANGE (country_code, record_type) ; +stmts:{stmt:{create_stmt:{relation:{relname:"example2" inh:true relpersistence:"p" location:193} table_elts:{column_def:{colname:"id" +type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"numeric"}} typemod:-1 location:208} is_local:true +constraints:{constraint:{contype:CONSTR_NOTNULL location:216}} constraints:{constraint:{contype:CONSTR_PRIMARY location:225}} +location:205}} ... partspec:{strategy:PARTITION_STRATEGY_RANGE +part_params:{partition_elem:{name:"country_code" location:310}} part_params:{partition_elem:{name:"record_type" location:324}} +location:290} oncommit:ONCOMMIT_NOOP}} stmt_location:178 stmt_len:159} + +In case if PRIMARY KEY in column list CREATE TABLE example1 (..., PRIMARY KEY(id,country_code) ) PARTITION BY RANGE (country_code, record_type); +stmts:{stmt:{create_stmt:{relation:{relname:"example1" inh:true relpersistence:"p" location:15} table_elts:{column_def:{colname:"id" +type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"numeric"}} ... table_elts:{constraint:{contype:CONSTR_PRIMARY +location:98 keys:{string:{sval:"id"}} keys:{string:{sval:"country_code"}}}} partspec:{strategy:PARTITION_STRATEGY_RANGE +part_params:{partition_elem:{name:"country_code" location:150}} part_params:{partition_elem:{name:"record_type" ... +*/ +func (tableProcessor *TableProcessor) Process(parseTree *pg_query.ParseResult) (DDLObject, error) { + createTableNode, ok := getCreateTableStmtNode(parseTree) + if !ok { + return nil, fmt.Errorf("not a CREATE TABLE statement") + } + + table := &Table{ + SchemaName: createTableNode.CreateStmt.Relation.Schemaname, + TableName: createTableNode.CreateStmt.Relation.Relname, + /* + e.g CREATE UNLOGGED TABLE tbl_unlogged (id int, val text); + stmt:{create_stmt:{relation:{schemaname:"public" relname:"tbl_unlogged" inh:true relpersistence:"u" location:19} + */ + IsUnlogged: createTableNode.CreateStmt.Relation.GetRelpersistence() == "u", + IsPartitioned: createTableNode.CreateStmt.GetPartspec() != nil, + IsInherited: tableProcessor.checkInheritance(createTableNode), + GeneratedColumns: make([]string, 0), + Constraints: make([]TableConstraint, 0), + PartitionColumns: make([]string, 0), + } + + // Parse columns and their properties + tableProcessor.parseTableElts(createTableNode.CreateStmt.TableElts, table) + + if table.IsPartitioned { + + partitionElements := createTableNode.CreateStmt.GetPartspec().GetPartParams() + table.PartitionStrategy = createTableNode.CreateStmt.GetPartspec().GetStrategy() + + for _, partElem := range partitionElements { + if partElem.GetPartitionElem().GetExpr() != nil { + table.IsExpressionPartition = true + } else { + table.PartitionColumns = append(table.PartitionColumns, partElem.GetPartitionElem().GetName()) + } + } + } + + return table, nil +} + +func (tableProcessor *TableProcessor) parseTableElts(tableElts []*pg_query.Node, table *Table) { + /* + Parsing the table elements liek column definitions constraint basically all the things inside the () of CREATE TABLE test(id int, CONSTRAINT Pk PRIMARY KEY (id)....); + storing all the information of columns - name, typename, isArraytype and constraints - constraint name, columns involved, type of constraint, is deferrable or not + + */ + for _, element := range tableElts { + if element.GetColumnDef() != nil { + if tableProcessor.isGeneratedColumn(element.GetColumnDef()) { + table.GeneratedColumns = append(table.GeneratedColumns, element.GetColumnDef().Colname) + } + colName := element.GetColumnDef().GetColname() + + typeNames := element.GetColumnDef().GetTypeName().GetNames() + typeSchemaName, typeName := getSchemaAndObjectName(typeNames) + /* + e.g. CREATE TABLE test_xml_type(id int, data xml); + relation:{relname:"test_xml_type" inh:true relpersistence:"p" location:15} table_elts:{column_def:{colname:"id" + type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"int4"}} typemod:-1 location:32} + is_local:true location:29}} table_elts:{column_def:{colname:"data" type_name:{names:{string:{sval:"xml"}} + typemod:-1 location:42} is_local:true location:37}} oncommit:ONCOMMIT_NOOP}} + + here checking the type of each column as type definition can be a list names for types which are native e.g. int + it has type names - [pg_catalog, int4] both to determine but for complex types like text,json or xml etc. if doesn't have + info about pg_catalog. so checking the 0th only in case XML/XID to determine the type and report + */ + table.Columns = append(table.Columns, TableColumn{ + ColumnName: colName, + TypeName: typeName, + TypeSchema: typeSchemaName, + IsArrayType: isArrayType(element.GetColumnDef().GetTypeName()), + }) + + constraints := element.GetColumnDef().GetConstraints() + if constraints != nil { + for idx, c := range constraints { + constraint := c.GetConstraint() + if slices.Contains(deferrableConstraintsList, constraint.Contype) { + /* + e.g. create table unique_def_test(id int UNIQUE DEFERRABLE, c1 int); + + create_stmt:{relation:{relname:"unique_def_test" inh:true relpersistence:"p" location:15} + table_elts:{column_def:{colname:"id" type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"int4"}} + typemod:-1 location:34} is_local:true constraints:{constraint:{contype:CONSTR_UNIQUE location:38}} + constraints:{constraint:{contype:CONSTR_ATTR_DEFERRABLE location:45}} location:31}} .... + + here checking the case where this clause is in column definition so iterating over each column_def and in that + constraint type has deferrable or not and also it should not be a foreign constraint as Deferrable on FKs are + supported. + */ + if idx > 0 { + lastConstraint := table.Constraints[len(table.Constraints)-1] + lastConstraint.IsDeferrable = true + table.Constraints[len(table.Constraints)-1] = lastConstraint + } + } else { + /* + table_elts:{column_def:{colname:"abc_id" type_name:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"int4"}} typemod:-1 + location:45} is_local:true constraints:{constraint:{contype:CONSTR_FOREIGN initially_valid:true pktable:{schemaname:"schema1" + relname:"abc" inh:true relpersistence:"p" location:60} pk_attrs:{string:{sval:"id"}} fk_matchtype:"s" fk_upd_action:"a" fk_del_action:"a" + + In case of FKs there is field called PkTable which has reference table information + */ + table.addConstraint(constraint.Contype, []string{colName}, constraint.Conname, false, constraint.Pktable) + } + } + } + + } else if element.GetConstraint() != nil { + /* + e.g. create table uniquen_def_test1(id int, c1 int, UNIQUE(id) DEFERRABLE INITIALLY DEFERRED); + {create_stmt:{relation:{relname:"unique_def_test1" inh:true relpersistence:"p" location:80} table_elts:{column_def:{colname:"id" + type_name:{.... names:{string:{sval:"int4"}} typemod:-1 location:108} is_local:true location:105}} + table_elts:{constraint:{contype:CONSTR_UNIQUE deferrable:true initdeferred:true location:113 keys:{string:{sval:"id"}}}} .. + + here checking the case where this constraint is at the at the end as a constraint only, so checking deferrable field in constraint + in case of its not a FK. + */ + constraint := element.GetConstraint() + conType := element.GetConstraint().Contype + columns := parseColumnsFromKeys(constraint.GetKeys()) + switch conType { + case EXCLUSION_CONSTR_TYPE: + //In case CREATE DDL has EXCLUDE USING gist(room_id '=', time_range WITH &&) - it will be included in columns but won't have columnDef as its a constraint + exclusions := constraint.GetExclusions() + //exclusions:{list:{items:{index_elem:{name:"room_id" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} + //items:{list:{items:{string:{sval:"="}}}}}} + columns = tableProcessor.parseColumnsFromExclusions(exclusions) + case FOREIGN_CONSTR_TYPE: + // In case of Foreign key constraint if it is present at the end of table column definition + fkAttrs := constraint.FkAttrs + //CREATE TABLE schema1.abc_fk(id int, abc_id INT, val text, PRIMARY KEY(id), FOREIGN KEY (abc_id) REFERENCES schema1.abc(id)); + //table_elts:{constraint:{contype:CONSTR_FOREIGN initially_valid:true pktable:{schemaname:"schema1" relname:"abc" inh:true relpersistence:"p" location:109} + //fk_attrs:{string:{sval:"abc_id"}} pk_attrs:{string:{sval:"id"}} + columns = parseColumnsFromKeys(fkAttrs) + } + table.addConstraint(conType, columns, constraint.Conname, constraint.Deferrable, constraint.Pktable) + + } + } +} + +func (tableProcessor *TableProcessor) checkInheritance(createTableNode *pg_query.Node_CreateStmt) bool { + /* + CREATE TABLE Test(id int, name text) inherits(test_parent); + stmts:{stmt:{create_stmt:{relation:{relname:"test" inh:true relpersistence:"p" location:13} table_elts:{column_def:{colname:"id" .... + inh_relations:{range_var:{relname:"test_parent" inh:true relpersistence:"p" location:46}} oncommit:ONCOMMIT_NOOP}} stmt_len:58} + + CREATE TABLE accounts_list_partitioned_p_northwest PARTITION OF accounts_list_partitioned FOR VALUES IN ('OR', 'WA'); + version:160001 stmts:{stmt:{create_stmt:{relation:{relname:"accounts_list_partitioned_p_northwest" inh:true relpersistence:"p" location:14} + inh_relations:{range_var:{relname:"accounts_list_partitioned" inh:true relpersistence:"p" location:65}} partbound:{strategy:"l" listdatums:{a_const:{sval:{sval:"OR"} location:106}} + listdatums:{a_const:{sval:{sval:"WA"} location:112}} location:102} oncommit:ONCOMMIT_NOOP}} + */ + inheritsRel := createTableNode.CreateStmt.GetInhRelations() + if inheritsRel != nil { + isPartitionOf := createTableNode.CreateStmt.GetPartbound() != nil + return !isPartitionOf + } + return false +} + +func (tableProcessor *TableProcessor) parseColumnsFromExclusions(list []*pg_query.Node) []string { + var res []string + for _, k := range list { + res = append(res, k.GetList().GetItems()[0].GetIndexElem().Name) // every first element of items in exclusions will be col name + } + return res +} + +func parseColumnsFromKeys(keys []*pg_query.Node) []string { + var res []string + for _, k := range keys { + res = append(res, k.GetString_().Sval) + } + return res + +} + +func (tableProcessor *TableProcessor) isGeneratedColumn(colDef *pg_query.ColumnDef) bool { + for _, constraint := range colDef.Constraints { + if constraint.GetConstraint().Contype == pg_query.ConstrType_CONSTR_GENERATED { + return true + } + } + return false +} + +type Table struct { + SchemaName string + TableName string + IsUnlogged bool + IsInherited bool + IsPartitioned bool + Columns []TableColumn + IsExpressionPartition bool + PartitionStrategy pg_query.PartitionStrategy + PartitionColumns []string + GeneratedColumns []string + Constraints []TableConstraint +} + +type TableColumn struct { + ColumnName string + TypeName string + TypeSchema string + IsArrayType bool +} + +func (tc *TableColumn) GetFullTypeName() string { + return utils.BuildObjectName(tc.TypeSchema, tc.TypeName) +} + +type TableConstraint struct { + ConstraintType pg_query.ConstrType + ConstraintName string + IsDeferrable bool + ReferencedTable string + Columns []string +} + +func (c *TableConstraint) IsPrimaryKeyORUniqueConstraint() bool { + return c.ConstraintType == PRIMARY_CONSTR_TYPE || c.ConstraintType == UNIQUE_CONSTR_TYPE +} + +func (c *TableConstraint) generateConstraintName(tableName string) string { + suffix := "" + //Deferrable is only applicable to following constraint + //https://www.postgresql.org/docs/current/sql-createtable.html#:~:text=Currently%2C%20only%20UNIQUE%2C%20PRIMARY%20KEY%2C%20EXCLUDE%2C%20and%20REFERENCES + switch c.ConstraintType { + case pg_query.ConstrType_CONSTR_UNIQUE: + suffix = "_key" + case pg_query.ConstrType_CONSTR_PRIMARY: + suffix = "_pkey" + case pg_query.ConstrType_CONSTR_EXCLUSION: + suffix = "_excl" + case pg_query.ConstrType_CONSTR_FOREIGN: + suffix = "_fkey" + } + + return fmt.Sprintf("%s_%s%s", tableName, strings.Join(c.Columns, "_"), suffix) +} + +func (t *Table) GetObjectName() string { + return utils.BuildObjectName(t.SchemaName, t.TableName) +} +func (t *Table) GetSchemaName() string { return t.SchemaName } + +func (t *Table) GetObjectType() string { return TABLE_OBJECT_TYPE } + +func (t *Table) PrimaryKeyColumns() []string { + for _, c := range t.Constraints { + if c.ConstraintType == PRIMARY_CONSTR_TYPE { + return c.Columns + } + } + return []string{} +} + +func (t *Table) UniqueKeyColumns() []string { + uniqueCols := make([]string, 0) + for _, c := range t.Constraints { + if c.ConstraintType == UNIQUE_CONSTR_TYPE { + uniqueCols = append(uniqueCols, c.Columns...) + } + } + return uniqueCols +} + +func (t *Table) addConstraint(conType pg_query.ConstrType, columns []string, specifiedConName string, deferrable bool, referencedTable *pg_query.RangeVar) { + tc := TableConstraint{ + ConstraintType: conType, + Columns: columns, + IsDeferrable: deferrable, + } + generatedConName := tc.generateConstraintName(t.TableName) + conName := lo.Ternary(specifiedConName == "", generatedConName, specifiedConName) + tc.ConstraintName = conName + if conType == FOREIGN_CONSTR_TYPE { + tc.ReferencedTable = utils.BuildObjectName(referencedTable.Schemaname, referencedTable.Relname) + } + t.Constraints = append(t.Constraints, tc) +} + +//===========FOREIGN TABLE PROCESSOR ================================ + +type ForeignTableProcessor struct{} + +func NewForeignTableProcessor() *ForeignTableProcessor { + return &ForeignTableProcessor{} +} + +func (ftProcessor *ForeignTableProcessor) Process(parseTree *pg_query.ParseResult) (DDLObject, error) { + foreignTableNode, ok := getForeignTableStmtNode(parseTree) + if !ok { + return nil, fmt.Errorf("not a CREATE FOREIGN TABLE statement") + } + baseStmt := foreignTableNode.CreateForeignTableStmt.BaseStmt + relation := baseStmt.Relation + table := Table{ + TableName: relation.GetRelname(), + SchemaName: relation.GetSchemaname(), + //Not populating rest info + } + for _, element := range baseStmt.TableElts { + if element.GetColumnDef() != nil { + colName := element.GetColumnDef().GetColname() + + typeNames := element.GetColumnDef().GetTypeName().GetNames() + typeSchemaName, typeName := getSchemaAndObjectName(typeNames) + table.Columns = append(table.Columns, TableColumn{ + ColumnName: colName, + TypeName: typeName, + TypeSchema: typeSchemaName, + IsArrayType: isArrayType(element.GetColumnDef().GetTypeName()), + }) + } + } + return &ForeignTable{ + Table: table, + ServerName: foreignTableNode.CreateForeignTableStmt.GetServername(), + }, nil + +} + +type ForeignTable struct { + Table + ServerName string +} + +func (f *ForeignTable) GetObjectName() string { + return utils.BuildObjectName(f.SchemaName, f.TableName) +} +func (f *ForeignTable) GetSchemaName() string { return f.SchemaName } + +func (t *ForeignTable) GetObjectType() string { return FOREIGN_TABLE_OBJECT_TYPE } + +//===========INDEX PROCESSOR ================================ + +// IndexProcessor handles parsing CREATE INDEX statements +type IndexProcessor struct{} + +func NewIndexProcessor() *IndexProcessor { + return &IndexProcessor{} +} + +func (indexProcessor *IndexProcessor) Process(parseTree *pg_query.ParseResult) (DDLObject, error) { + indexNode, ok := getCreateIndexStmtNode(parseTree) + if !ok { + return nil, fmt.Errorf("not a CREATE INDEX statement") + } + + index := &Index{ + SchemaName: indexNode.IndexStmt.Relation.Schemaname, + IndexName: indexNode.IndexStmt.Idxname, + TableName: indexNode.IndexStmt.Relation.Relname, + AccessMethod: indexNode.IndexStmt.AccessMethod, + /* + e.g. CREATE INDEX idx on table_name(id) with (fillfactor='70'); + index_stmt:{idxname:"idx" relation:{relname:"table_name" inh:true relpersistence:"p" location:21} access_method:"btree" + index_params:{index_elem:{name:"id" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} + options:{def_elem:{defname:"fillfactor" arg:{string:{sval:"70"}} ... + here again similar to ALTER table Storage parameters options is the high level field in for WITH options. + */ + NumStorageOptions: len(indexNode.IndexStmt.GetOptions()), + Params: indexProcessor.parseIndexParams(indexNode.IndexStmt.IndexParams), + } + + return index, nil +} + +func (indexProcessor *IndexProcessor) parseIndexParams(params []*pg_query.Node) []IndexParam { + /* + e.g. + 1. CREATE INDEX tsvector_idx ON public.documents (title_tsvector, id); + stmt:{index_stmt:{idxname:"tsvector_idx" relation:{schemaname:"public" relname:"documents" inh:true relpersistence:"p" location:510} access_method:"btree" + index_params:{index_elem:{name:"title_tsvector" ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}} index_params:{index_elem:{name:"id" + ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}}}} stmt_location:479 stmt_len:69 + + 2. CREATE INDEX idx_json ON public.test_json ((data::jsonb)); + stmt:{index_stmt:{idxname:"idx_json" relation:{schemaname:"public" relname:"test_json" inh:true relpersistence:"p" location:703} access_method:"btree" + index_params:{index_elem:{expr:{type_cast:{arg:{column_ref:{fields:{string:{sval:"data"}} location:722}} type_name:{names:{string:{sval:"jsonb"}} typemod:-1 + location:728} location:726}} ordering:SORTBY_DEFAULT nulls_ordering:SORTBY_NULLS_DEFAULT}}}} stmt_location:676 stmt_len:59 + */ + var indexParams []IndexParam + for _, i := range params { + ip := IndexParam{ + SortByOrder: i.GetIndexElem().Ordering, + ColName: i.GetIndexElem().GetName(), + IsExpression: i.GetIndexElem().GetExpr() != nil, + } + if ip.IsExpression { + //For the expression index case to report in case casting to unsupported types #3 + typeNames := i.GetIndexElem().GetExpr().GetTypeCast().GetTypeName().GetNames() + ip.ExprCastTypeSchema, ip.ExprCastTypeName = getSchemaAndObjectName(typeNames) + ip.IsExprCastArrayType = isArrayType(i.GetIndexElem().GetExpr().GetTypeCast().GetTypeName()) + } + indexParams = append(indexParams, ip) + } + return indexParams +} + +type Index struct { + SchemaName string + IndexName string + TableName string + AccessMethod string + NumStorageOptions int + Params []IndexParam +} + +type IndexParam struct { + SortByOrder pg_query.SortByDir + ColName string + IsExpression bool + ExprCastTypeName string //In case of expression and casting to a type + ExprCastTypeSchema string //In case of expression and casting to a type + IsExprCastArrayType bool + //Add more fields +} + +func (indexParam *IndexParam) GetFullExprCastTypeName() string { + return utils.BuildObjectName(indexParam.ExprCastTypeSchema, indexParam.ExprCastTypeName) +} + +func (i *Index) GetObjectName() string { + return fmt.Sprintf("%s ON %s", i.IndexName, i.GetTableName()) +} +func (i *Index) GetSchemaName() string { return i.SchemaName } + +func (i *Index) GetTableName() string { + return utils.BuildObjectName(i.SchemaName, i.TableName) +} + +func (i *Index) GetObjectType() string { return INDEX_OBJECT_TYPE } + +//===========ALTER TABLE PROCESSOR ================================ + +// AlterTableProcessor handles parsing ALTER TABLE statements +type AlterTableProcessor struct{} + +func NewAlterTableProcessor() *AlterTableProcessor { + return &AlterTableProcessor{} +} + +func (atProcessor *AlterTableProcessor) Process(parseTree *pg_query.ParseResult) (DDLObject, error) { + alterNode, ok := getAlterStmtNode(parseTree) + if !ok { + return nil, fmt.Errorf("not an ALTER TABLE statement") + } + + alter := &AlterTable{ + SchemaName: alterNode.AlterTableStmt.Relation.Schemaname, + TableName: alterNode.AlterTableStmt.Relation.Relname, + AlterType: alterNode.AlterTableStmt.Cmds[0].GetAlterTableCmd().GetSubtype(), + } + + // Parse specific alter command + cmd := alterNode.AlterTableStmt.Cmds[0].GetAlterTableCmd() + switch alter.AlterType { + case pg_query.AlterTableType_AT_SetOptions: + /* + e.g. alter table test_1 alter column col1 set (attribute_option=value); + cmds:{alter_table_cmd:{subtype:AT_SetOptions name:"col1" def:{list:{items:{def_elem:{defname:"attribute_option" + arg:{type_name:{names:{string:{sval:"value"}} typemod:-1 location:263}} defaction:DEFELEM_UNSPEC location:246}}}}... + + for set attribute issue we will the type of alter setting the options and in the 'def' definition field which has the + information of the type, we will check if there is any list which will only present in case there is syntax like (...) + */ + alter.NumSetAttributes = len(cmd.GetDef().GetList().GetItems()) + case pg_query.AlterTableType_AT_AddConstraint: + alter.NumStorageOptions = len(cmd.GetDef().GetConstraint().GetOptions()) + /* + e.g. + ALTER TABLE example2 + ADD CONSTRAINT example2_pkey PRIMARY KEY (id); + tmts:{stmt:{alter_table_stmt:{relation:{relname:"example2" inh:true relpersistence:"p" location:693} + cmds:{alter_table_cmd:{subtype:AT_AddConstraint def:{constraint:{contype:CONSTR_PRIMARY conname:"example2_pkey" + location:710 keys:{string:{sval:"id"}}}} behavior:DROP_RESTRICT}} objtype:OBJECT_TABLE}} stmt_location:679 stmt_len:72} + + e.g. ALTER TABLE ONLY public.meeting ADD CONSTRAINT no_time_overlap EXCLUDE USING gist (room_id WITH =, time_range WITH &&); + cmds:{alter_table_cmd:{subtype:AT_AddConstraint def:{constraint:{contype:CONSTR_EXCLUSION conname:"no_time_overlap" location:41 + here again same checking the definition of the alter stmt if it has constraint and checking its type + + e.g. ALTER TABLE ONLY public.users ADD CONSTRAINT users_email_key UNIQUE (email) DEFERRABLE; + alter_table_cmd:{subtype:AT_AddConstraint def:{constraint:{contype:CONSTR_UNIQUE conname:"users_email_key" + deferrable:true location:196 keys:{string:{sval:"email"}}}} behavior:DROP_RESTRICT}} objtype:OBJECT_TABLE}} + + similar to CREATE table 2nd case where constraint is at the end of column definitions mentioning the constraint only + so here as well while adding constraint checking the type of constraint and the deferrable field of it. + + ALTER TABLE test ADD CONSTRAINT chk check (id<>'') NOT VALID; + stmts:{stmt:...subtype:AT_AddConstraint def:{constraint:{contype:CONSTR_CHECK conname:"chk" location:22 + raw_expr:{a_expr:{kind:AEXPR_OP name:{string:{sval:"<>"}} lexpr:{column_ref:{fields:{string:{sval:"id"}} location:43}} rexpr:{a_const:{sval:{} + location:47}} location:45}} skip_validation:true}} behavior:DROP_RESTRICT}} objtype:OBJECT_TABLE}} stmt_len:60} + */ + constraint := cmd.GetDef().GetConstraint() + alter.ConstraintType = constraint.Contype + alter.ConstraintName = constraint.Conname + alter.IsDeferrable = constraint.Deferrable + alter.ConstraintNotValid = constraint.SkipValidation // this is set for the NOT VALID clause + alter.ConstraintColumns = parseColumnsFromKeys(constraint.GetKeys()) + if alter.ConstraintType == FOREIGN_CONSTR_TYPE { + /* + alter_table_cmd:{subtype:AT_AddConstraint def:{constraint:{contype:CONSTR_FOREIGN conname:"fk" initially_valid:true + pktable:{schemaname:"schema1" relname:"abc" inh:true relpersistence:"p" + In case of FKs the reference table is in PKTable field and columns are in FkAttrs + */ + alter.ConstraintColumns = parseColumnsFromKeys(constraint.FkAttrs) + alter.ConstraintReferencedTable = utils.BuildObjectName(constraint.Pktable.Schemaname, constraint.Pktable.Relname) + } + + case pg_query.AlterTableType_AT_DisableRule: + /* + e.g. ALTER TABLE example DISABLE example_rule; + cmds:{alter_table_cmd:{subtype:AT_DisableRule name:"example_rule" behavior:DROP_RESTRICT}} objtype:OBJECT_TABLE}} + checking the subType is sufficient in this case + */ + alter.RuleName = cmd.Name + //case CLUSTER ON + /* + e.g. ALTER TABLE example CLUSTER ON idx; + stmt:{alter_table_stmt:{relation:{relname:"example" inh:true relpersistence:"p" location:13} + cmds:{alter_table_cmd:{subtype:AT_ClusterOn name:"idx" behavior:DROP_RESTRICT}} objtype:OBJECT_TABLE}} stmt_len:32 + + */ + } + + return alter, nil +} + +type AlterTable struct { + Query string + SchemaName string + TableName string + AlterType pg_query.AlterTableType + RuleName string + NumSetAttributes int + NumStorageOptions int + //In case AlterType - ADD_CONSTRAINT + ConstraintType pg_query.ConstrType + ConstraintName string + ConstraintNotValid bool + ConstraintReferencedTable string + IsDeferrable bool + ConstraintColumns []string +} + +func (a *AlterTable) GetObjectName() string { + return utils.BuildObjectName(a.SchemaName, a.TableName) +} +func (a *AlterTable) GetSchemaName() string { return a.SchemaName } + +func (a *AlterTable) GetObjectType() string { return TABLE_OBJECT_TYPE } + +func (a *AlterTable) AddPrimaryKeyOrUniqueCons() bool { + return a.ConstraintType == PRIMARY_CONSTR_TYPE || a.ConstraintType == UNIQUE_CONSTR_TYPE +} + +func (a *AlterTable) IsAddConstraintType() bool { + return a.AlterType == pg_query.AlterTableType_AT_AddConstraint +} + +//===========POLICY PROCESSOR ================================ + +// PolicyProcessor handles parsing CREATE POLICY statements +type PolicyProcessor struct{} + +func NewPolicyProcessor() *PolicyProcessor { + return &PolicyProcessor{} +} + +func (policyProcessor *PolicyProcessor) Process(parseTree *pg_query.ParseResult) (DDLObject, error) { + policyNode, ok := getPolicyStmtNode(parseTree) + if !ok { + return nil, fmt.Errorf("not a CREATE POLICY statement") + } + + policy := &Policy{ + PolicyName: policyNode.CreatePolicyStmt.GetPolicyName(), + SchemaName: policyNode.CreatePolicyStmt.GetTable().GetSchemaname(), + TableName: policyNode.CreatePolicyStmt.GetTable().GetRelname(), + RoleNames: make([]string, 0), + } + roles := policyNode.CreatePolicyStmt.GetRoles() + /* + e.g. CREATE POLICY P ON tbl1 TO regress_rls_eve, regress_rls_frank USING (true); + stmt:{create_policy_stmt:{policy_name:"p" table:{relname:"tbl1" inh:true relpersistence:"p" location:20} cmd_name:"all" + permissive:true roles:{role_spec:{roletype:ROLESPEC_CSTRING rolename:"regress_rls_eve" location:28}} roles:{role_spec: + {roletype:ROLESPEC_CSTRING rolename:"regress_rls_frank" location:45}} qual:{a_const:{boolval:{boolval:true} location:70}}}} + stmt_len:75 + + here role_spec of each roles is managing the roles related information in a POLICY DDL if any, so we can just check if there is + a role name available in it which means there is a role associated with this DDL. Hence report it. + + */ + for _, role := range roles { + roleName := role.GetRoleSpec().GetRolename() // only in case there is role associated with a policy it will error out in schema migration + if roleName != "" { + //this means there is some role or grants used in this Policy, so detecting it + policy.RoleNames = append(policy.RoleNames, roleName) + } + } + return policy, nil +} + +type Policy struct { + SchemaName string + TableName string + PolicyName string + RoleNames []string +} + +func (p *Policy) GetObjectName() string { + qualifiedTable := utils.BuildObjectName(p.SchemaName, p.TableName) + return fmt.Sprintf("%s ON %s", p.PolicyName, qualifiedTable) +} +func (p *Policy) GetSchemaName() string { return p.SchemaName } + +func (p *Policy) GetObjectType() string { return POLICY_OBJECT_TYPE } + +//=====================TRIGGER PROCESSOR ================== + +// TriggerProcessor handles parsing CREATE Trigger statements +type TriggerProcessor struct{} + +func NewTriggerProcessor() *TriggerProcessor { + return &TriggerProcessor{} +} + +/* +e.g.CREATE CONSTRAINT TRIGGER some_trig + + AFTER DELETE ON xyz_schema.abc + DEFERRABLE INITIALLY DEFERRED + FOR EACH ROW EXECUTE PROCEDURE xyz_schema.some_trig(); + +create_trig_stmt:{isconstraint:true trigname:"some_trig" relation:{schemaname:"xyz_schema" relname:"abc" inh:true relpersistence:"p" +location:56} funcname:{string:{sval:"xyz_schema"}} funcname:{string:{sval:"some_trig"}} row:true events:8 deferrable:true initdeferred:true}} +stmt_len:160} + +e.g. CREATE TRIGGER projects_loose_fk_trigger + + AFTER DELETE ON public.projects + REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT EXECUTE FUNCTION xyz_schema.some_trig(); + +stmt:{create_trig_stmt:{trigname:"projects_loose_fk_trigger" relation:{schemaname:"public" relname:"projects" inh:true +relpersistence:"p" location:58} funcname:{string:{sval:"xyz_schema"}} funcname:{string:{sval:"some_trig"}} events:8 +transition_rels:{trigger_transition:{name:"old_table" is_table:true}}}} stmt_len:167} +*/ +func (triggerProcessor *TriggerProcessor) Process(parseTree *pg_query.ParseResult) (DDLObject, error) { + triggerNode, ok := getCreateTriggerStmtNode(parseTree) + if !ok { + return nil, fmt.Errorf("not a CREATE TRIGGER statement") + } + + trigger := &Trigger{ + SchemaName: triggerNode.CreateTrigStmt.Relation.Schemaname, + TableName: triggerNode.CreateTrigStmt.Relation.Relname, + TriggerName: triggerNode.CreateTrigStmt.Trigname, + + IsConstraint: triggerNode.CreateTrigStmt.Isconstraint, + NumTransitionRelations: len(triggerNode.CreateTrigStmt.GetTransitionRels()), + Timing: triggerNode.CreateTrigStmt.Timing, + Events: triggerNode.CreateTrigStmt.Events, + ForEachRow: triggerNode.CreateTrigStmt.Row, + } + _, trigger.FuncName = getSchemaAndObjectName(triggerNode.CreateTrigStmt.Funcname) + + return trigger, nil +} + +type Trigger struct { + SchemaName string + TableName string + TriggerName string + IsConstraint bool + NumTransitionRelations int + ForEachRow bool + Timing int32 + Events int32 + FuncName string //Unqualified function name +} + +func (t *Trigger) GetObjectName() string { + return fmt.Sprintf("%s ON %s", t.TriggerName, t.GetTableName()) +} + +func (t *Trigger) GetTableName() string { + return utils.BuildObjectName(t.SchemaName, t.TableName) +} + +func (t *Trigger) GetSchemaName() string { return t.SchemaName } + +func (t *Trigger) GetObjectType() string { return TRIGGER_OBJECT_TYPE } + +/* +e.g.CREATE TRIGGER after_insert_or_delete_trigger + + BEFORE INSERT OR DELETE ON main_table + FOR EACH ROW + EXECUTE FUNCTION handle_insert_or_delete(); + +stmt:{create_trig_stmt:{trigname:"after_insert_or_delete_trigger" relation:{relname:"main_table" inh:true relpersistence:"p" +location:111} funcname:{string:{sval:"handle_insert_or_delete"}} row:true timing:2 events:12}} stmt_len:177} + +here, +timing - bits of BEFORE/AFTER/INSTEAD +events - bits of "OR" INSERT/UPDATE/DELETE/TRUNCATE +row - FOR EACH ROW (true), FOR EACH STATEMENT (false) +refer - https://github.com/pganalyze/pg_query_go/blob/c3a818d346a927c18469460bb18acb397f4f4301/parser/include/postgres/catalog/pg_trigger_d.h#L49 + + TRIGGER_TYPE_BEFORE (1 << 1) + TRIGGER_TYPE_INSERT (1 << 2) + TRIGGER_TYPE_DELETE (1 << 3) + TRIGGER_TYPE_UPDATE (1 << 4) + TRIGGER_TYPE_TRUNCATE (1 << 5) + TRIGGER_TYPE_INSTEAD (1 << 6) +*/ +func (t *Trigger) IsBeforeRowTrigger() bool { + isSecondBitSet := t.Timing&(1<<1) != 0 + return t.ForEachRow && isSecondBitSet +} + +// ========================TYPE PROCESSOR====================== + +type TypeProcessor struct{} + +func NewTypeProcessor() *TypeProcessor { + return &TypeProcessor{} +} + +func (typeProcessor *TypeProcessor) Process(parseTree *pg_query.ParseResult) (DDLObject, error) { + compositeNode, isComposite := getCompositeTypeStmtNode(parseTree) + enumNode, isEnum := getEnumTypeStmtNode(parseTree) + + switch { + case isComposite: + createType := &CreateType{ + TypeName: compositeNode.CompositeTypeStmt.Typevar.GetRelname(), + SchemaName: compositeNode.CompositeTypeStmt.Typevar.GetSchemaname(), + } + return createType, nil + case isEnum: + typeNames := enumNode.CreateEnumStmt.GetTypeName() + typeSchemaName, typeName := getSchemaAndObjectName(typeNames) + createType := &CreateType{ + TypeName: typeName, + SchemaName: typeSchemaName, + IsEnum: true, + } + return createType, nil + + default: + return nil, fmt.Errorf("not CREATE TYPE statement") + } + +} + +type CreateType struct { + TypeName string + SchemaName string + IsEnum bool +} + +func (c *CreateType) GetObjectName() string { + return utils.BuildObjectName(c.SchemaName, c.TypeName) +} +func (c *CreateType) GetSchemaName() string { return c.SchemaName } + +func (c *CreateType) GetObjectType() string { return TYPE_OBJECT_TYPE } + +//===========================VIEW PROCESSOR=================== + +type ViewProcessor struct{} + +func NewViewProcessor() *ViewProcessor { + return &ViewProcessor{} +} + +func (v *ViewProcessor) Process(parseTree *pg_query.ParseResult) (DDLObject, error) { + viewNode, ok := getCreateViewNode(parseTree) + if !ok { + return nil, fmt.Errorf("not a CREATE VIEW statement") + } + + viewSchemaName := viewNode.ViewStmt.View.Schemaname + viewName := viewNode.ViewStmt.View.Relname + qualifiedViewName := utils.BuildObjectName(viewSchemaName, viewName) + + /* + view_stmt:{view:{schemaname:"public" relname:"invoker_view" inh:true relpersistence:"p" location:12} + query:{select_stmt:{target_list:{res_target:{val:{column_ref:{fields:{string:{sval:"id"}} location:95}} location:95}} + from_clause:{...} + where_clause:{...} + options:{def_elem:{defname:"security_invoker" arg:{string:{sval:"true"}} defaction:DEFELEM_UNSPEC location:32}} + options:{def_elem:{defname:"security_barrier" arg:{string:{sval:"false"}} defaction:DEFELEM_UNSPEC location:57}} + with_check_option:NO_CHECK_OPTION} + */ + log.Infof("checking the view '%s' is security invoker view", qualifiedViewName) + msg := GetProtoMessageFromParseTree(parseTree) + defNames, err := TraverseAndExtractDefNamesFromDefElem(msg) + if err != nil { + return nil, err + } + + view := View{ + SchemaName: viewSchemaName, + ViewName: viewName, + SecurityInvoker: slices.Contains(defNames, "security_invoker"), + } + return &view, nil +} + +type View struct { + SchemaName string + ViewName string + SecurityInvoker bool +} + +func (v *View) GetObjectName() string { + return utils.BuildObjectName(v.SchemaName, v.ViewName) +} +func (v *View) GetSchemaName() string { return v.SchemaName } + +func (v *View) GetObjectType() string { return VIEW_OBJECT_TYPE } + +//===========================MVIEW PROCESSOR=================== + +type MViewProcessor struct{} + +func NewMViewProcessor() *MViewProcessor { + return &MViewProcessor{} +} + +func (mv *MViewProcessor) Process(parseTree *pg_query.ParseResult) (DDLObject, error) { + mviewNode, ok := getCreateTableAsStmtNode(parseTree) + if !ok { + return nil, fmt.Errorf("not a CREATE VIEW statement") + } + mview := MView{ + SchemaName: mviewNode.CreateTableAsStmt.Into.Rel.Schemaname, + ViewName: mviewNode.CreateTableAsStmt.Into.Rel.Relname, + } + return &mview, nil +} + +type MView struct { + SchemaName string + ViewName string +} + +func (mv *MView) GetObjectName() string { + return utils.BuildObjectName(mv.SchemaName, mv.ViewName) +} +func (mv *MView) GetSchemaName() string { return mv.SchemaName } + +func (mv *MView) GetObjectType() string { return MVIEW_OBJECT_TYPE } + +//=============================COLLATION PROCESSOR ============== + +type CollationProcessor struct{} + +func NewCollationProcessor() *CollationProcessor { + return &CollationProcessor{} +} + +func (cp *CollationProcessor) Process(parseTree *pg_query.ParseResult) (DDLObject, error) { + defineStmt, ok := getDefineStmtNode(parseTree) + if !ok { + return nil, fmt.Errorf("not a CREATE COLLATION statement") + } + schema, colName := getSchemaAndObjectName(defineStmt.Defnames) + defNames, err := TraverseAndExtractDefNamesFromDefElem(defineStmt.ProtoReflect()) + if err != nil { + return nil, fmt.Errorf("error getting the defElems in collation: %v", err) + } + collation := Collation{ + SchemaName: schema, + CollationName: colName, + Options: defNames, + } + return &collation, nil +} + +type Collation struct { + SchemaName string + CollationName string + Options []string +} + +func (c *Collation) GetObjectName() string { + return lo.Ternary(c.SchemaName != "", fmt.Sprintf("%s.%s", c.SchemaName, c.CollationName), c.CollationName) +} +func (c *Collation) GetSchemaName() string { return c.SchemaName } + +func (c *Collation) GetObjectType() string { return COLLATION_OBJECT_TYPE } + +// ============================Function Processor ================= + +type FunctionProcessor struct{} + +func NewFunctionProcessor() *FunctionProcessor { + return &FunctionProcessor{} +} + +func (mv *FunctionProcessor) Process(parseTree *pg_query.ParseResult) (DDLObject, error) { + funcNode, ok := getCreateFuncStmtNode(parseTree) + if !ok { + return nil, fmt.Errorf("not a CREATE FUNCTION statement") + } + + funcNameList := funcNode.CreateFunctionStmt.GetFuncname() + funcSchemaName, funcName := getSchemaAndObjectName(funcNameList) + function := Function{ + SchemaName: funcSchemaName, + FuncName: funcName, + ReturnType: GetReturnTypeOfFunc(parseTree), + } + return &function, nil +} + +type Function struct { + SchemaName string + FuncName string + ReturnType string +} + +func (f *Function) GetObjectName() string { + return lo.Ternary(f.SchemaName != "", fmt.Sprintf("%s.%s", f.SchemaName, f.FuncName), f.FuncName) +} +func (f *Function) GetSchemaName() string { return f.SchemaName } + +func (f *Function) GetObjectType() string { return FUNCTION_OBJECT_TYPE } + +//=============================No-Op PROCESSOR ================== + +//No op Processor for objects we don't have Processor yet + +type NoOpProcessor struct{} + +func NewNoOpProcessor() *NoOpProcessor { + return &NoOpProcessor{} +} + +type Object struct { + ObjectName string + SchemaName string +} + +func (o *Object) GetObjectName() string { return o.ObjectName } +func (o *Object) GetSchemaName() string { return o.SchemaName } +func (o *Object) GetObjectType() string { return "OBJECT" } + +func (n *NoOpProcessor) Process(parseTree *pg_query.ParseResult) (DDLObject, error) { + return &Object{}, nil +} + +func GetDDLProcessor(parseTree *pg_query.ParseResult) (DDLProcessor, error) { + stmtType := GetStatementType(parseTree.Stmts[0].Stmt.ProtoReflect()) + switch stmtType { + case PG_QUERY_CREATE_STMT: + return NewTableProcessor(), nil + case PG_QUERY_INDEX_STMT: + return NewIndexProcessor(), nil + case PG_QUERY_ALTER_TABLE_STMT: + return NewAlterTableProcessor(), nil + case PG_QUERY_POLICY_STMT: + return NewPolicyProcessor(), nil + case PG_QUERY_CREATE_TRIG_STMT: + return NewTriggerProcessor(), nil + case PG_QUERY_COMPOSITE_TYPE_STMT, PG_QUERY_ENUM_TYPE_STMT: + return NewTypeProcessor(), nil + case PG_QUERY_FOREIGN_TABLE_STMT: + return NewForeignTableProcessor(), nil + case PG_QUERY_VIEW_STMT: + return NewViewProcessor(), nil + case PG_QUERY_CREATE_TABLE_AS_STMT: + if IsMviewObject(parseTree) { + return NewMViewProcessor(), nil + } + return NewNoOpProcessor(), nil + case PG_QUERY_DEFINE_STMT_NODE: + if IsCollationObject(parseTree) { + return NewCollationProcessor(), nil + } + return NewNoOpProcessor(), nil + case PG_QUERY_CREATE_FUNCTION_STMT: + return NewFunctionProcessor(), nil + default: + return NewNoOpProcessor(), nil + } +} + +const ( + TABLE_OBJECT_TYPE = "TABLE" + TYPE_OBJECT_TYPE = "TYPE" + VIEW_OBJECT_TYPE = "VIEW" + MVIEW_OBJECT_TYPE = "MVIEW" + FOREIGN_TABLE_OBJECT_TYPE = "FOREIGN TABLE" + FUNCTION_OBJECT_TYPE = "FUNCTION" + PROCEDURE_OBJECT_TYPE = "PROCEDURE" + INDEX_OBJECT_TYPE = "INDEX" + POLICY_OBJECT_TYPE = "POLICY" + TRIGGER_OBJECT_TYPE = "TRIGGER" + COLLATION_OBJECT_TYPE = "COLLATION" + ADD_CONSTRAINT = pg_query.AlterTableType_AT_AddConstraint + SET_OPTIONS = pg_query.AlterTableType_AT_SetOptions + DISABLE_RULE = pg_query.AlterTableType_AT_DisableRule + CLUSTER_ON = pg_query.AlterTableType_AT_ClusterOn + EXCLUSION_CONSTR_TYPE = pg_query.ConstrType_CONSTR_EXCLUSION + FOREIGN_CONSTR_TYPE = pg_query.ConstrType_CONSTR_FOREIGN + DEFAULT_SORTING_ORDER = pg_query.SortByDir_SORTBY_DEFAULT + PRIMARY_CONSTR_TYPE = pg_query.ConstrType_CONSTR_PRIMARY + UNIQUE_CONSTR_TYPE = pg_query.ConstrType_CONSTR_UNIQUE + LIST_PARTITION = pg_query.PartitionStrategy_PARTITION_STRATEGY_LIST + PG_QUERY_CREATE_STMT = "pg_query.CreateStmt" + PG_QUERY_INDEX_STMT = "pg_query.IndexStmt" + PG_QUERY_ALTER_TABLE_STMT = "pg_query.AlterTableStmt" + PG_QUERY_POLICY_STMT = "pg_query.CreatePolicyStmt" + PG_QUERY_CREATE_TRIG_STMT = "pg_query.CreateTrigStmt" + PG_QUERY_COMPOSITE_TYPE_STMT = "pg_query.CompositeTypeStmt" + PG_QUERY_ENUM_TYPE_STMT = "pg_query.CreateEnumStmt" + PG_QUERY_FOREIGN_TABLE_STMT = "pg_query.CreateForeignTableStmt" + PG_QUERY_VIEW_STMT = "pg_query.ViewStmt" + PG_QUERY_CREATE_TABLE_AS_STMT = "pg_query.CreateTableAsStmt" + PG_QUERY_CREATE_FUNCTION_STMT = "pg_query.CreateFunctionStmt" +) + +var deferrableConstraintsList = []pg_query.ConstrType{ + pg_query.ConstrType_CONSTR_ATTR_DEFERRABLE, + pg_query.ConstrType_CONSTR_ATTR_DEFERRED, + pg_query.ConstrType_CONSTR_ATTR_IMMEDIATE, +} diff --git a/yb-voyager/src/query/queryparser/helpers_protomsg.go b/yb-voyager/src/query/queryparser/helpers_protomsg.go new file mode 100644 index 0000000000..174270d72b --- /dev/null +++ b/yb-voyager/src/query/queryparser/helpers_protomsg.go @@ -0,0 +1,481 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package queryparser + +import ( + "fmt" + "strings" + + pg_query "github.com/pganalyze/pg_query_go/v6" + log "github.com/sirupsen/logrus" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +const ( + DOCS_LINK_PREFIX = "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/" + POSTGRESQL_PREFIX = "postgresql/" + ADVISORY_LOCKS_DOC_LINK = DOCS_LINK_PREFIX + POSTGRESQL_PREFIX + "#advisory-locks-is-not-yet-implemented" + SYSTEM_COLUMNS_DOC_LINK = DOCS_LINK_PREFIX + POSTGRESQL_PREFIX + "#system-columns-is-not-yet-supported" + XML_FUNCTIONS_DOC_LINK = DOCS_LINK_PREFIX + POSTGRESQL_PREFIX + "#xml-functions-is-not-yet-supported" +) + +func GetProtoMessageFromParseTree(parseTree *pg_query.ParseResult) protoreflect.Message { + return parseTree.Stmts[0].Stmt.ProtoReflect() + +} + +func GetMsgFullName(msg protoreflect.Message) string { + return string(msg.Descriptor().FullName()) +} + +// Sample example: {func_call:{funcname:{string:{sval:"pg_advisory_lock"}} +func GetFuncNameFromFuncCall(funcCallNode protoreflect.Message) (string, string) { + if GetMsgFullName(funcCallNode) != PG_QUERY_FUNCCALL_NODE { + return "", "" + } + + funcnameList := GetListField(funcCallNode, "funcname") + var names []string + for i := 0; i < funcnameList.Len(); i++ { + item := funcnameList.Get(i) + name := GetStringValueFromNode(item.Message()) + if name != "" { + names = append(names, name) + } + } + + if len(names) == 0 { + return "", "" + } else if len(names) == 1 { + return "", names[0] + } + return names[0], names[1] +} + +// Sample example:: {column_ref:{fields:{string:{sval:"xmax"}} +func GetColNameFromColumnRef(columnRefNode protoreflect.Message) (string, string) { + if GetMsgFullName(columnRefNode) != PG_QUERY_COLUMNREF_NODE { + return "", "" + } + + fieldsList := GetListField(columnRefNode, "fields") + var names []string + for i := 0; i < fieldsList.Len(); i++ { + item := fieldsList.Get(i) + name := GetStringValueFromNode(item.Message()) + if name != "" { + names = append(names, name) + } + } + + if len(names) == 0 { + return "", "" + } else if len(names) == 1 { + return "", names[0] + } + return names[0], names[1] +} + +// Sample example:: {column_ref:{fields:{string:{sval:"s"}} fields:{string:{sval:"tableoid"}} location:7} +func GetStringValueFromNode(nodeMsg protoreflect.Message) string { + if nodeMsg == nil || !nodeMsg.IsValid() { + return "" + } + + nodeField := getOneofActiveField(nodeMsg, "node") + if nodeField == nil { + return "" + } + + // Get the message corresponding to the set field + nodeValue := nodeMsg.Get(nodeField) + node := nodeValue.Message() + if node == nil || !node.IsValid() { + return "" + } + + nodeType := node.Descriptor().FullName() + switch nodeType { + // Represents a simple string literal in a query, such as names or values directly provided in the SQL text. + case PG_QUERY_STRING_NODE: + return GetStringField(node, "sval") + // Represents a constant value in SQL expressions, often used for literal values like strings, numbers, or keywords. + case PG_QUERY_ACONST_NODE: + return getStringFromAConstMsg(node) + // Represents a type casting operation in SQL, where a value is explicitly converted from one type to another using a cast expression. + case PG_QUERY_TYPECAST_NODE: + return getStringFromTypeCastMsg(node, "arg") + // Represents the asterisk '*' used in SQL to denote the selection of all columns in a table. Example: SELECT * FROM employees; + case PG_QUERY_ASTAR_NODE: + return "" + default: + return "" + } +} + +// getStringFromAConstMsg extracts the string from an 'A_Const' node's 'sval' field +// Sample example:: rowexpr:{a_const:{sval:{sval:"//Product"} location:124}} +func getStringFromAConstMsg(aConstMsg protoreflect.Message) string { + svalMsg := GetMessageField(aConstMsg, "sval") + if svalMsg == nil { + return "" + } + + return GetStringField(svalMsg, "sval") +} + +// getStringFromTypeCastMsg traverses to a specified field and extracts the 'A_Const' string value +// Sample example:: rowexpr:{type_cast:{arg:{a_const:{sval:{sval:"/order/item"} +func getStringFromTypeCastMsg(nodeMsg protoreflect.Message, fieldName string) string { + childMsg := GetMessageField(nodeMsg, fieldName) + if childMsg == nil { + return "" + } + + return GetStringValueFromNode(childMsg) +} + +/* +Note: XMLTABLE() is not a simple function(stored in FuncCall node), its a table function +which generates set of rows using the info(about rows, columns, content) provided to it +Hence its requires a more complex node structure(RangeTableFunc node) to represent. + +XMLTABLE transforms XML data into relational table format, making it easier to query XML structures. +Detection in RangeTableFunc Node: +- docexpr: Refers to the XML data source, usually a column storing XML. +- rowexpr: XPath expression (starting with '/' or '//') defining the rows in the XML. +- columns: Specifies the data extraction from XML into relational columns. + +Example: Converting XML data about books into a table: +SQL Query: + + SELECT x.* + FROM XMLTABLE( + '/bookstore/book' + PASSING xml_column + COLUMNS + title TEXT PATH 'title', + author TEXT PATH 'author' + ) AS x; + +Parsetree: stmt:{select_stmt:{target_list:{res_target:{val:{column_ref:{fields:{string:{sval:"x"}} fields:{a_star:{}} location:7}} location:7}} +from_clause:{range_table_func: + {docexpr:{column_ref:{fields:{string:{sval:"xml_column"}} location:57}} + rowexpr:{a_const:{sval:{sval:"/bookstore/book"} location:29}} + columns:{range_table_func_col:{colname:"title" type_name:{names:{string:{sval:"text"}} typemod:-1 location:87} colexpr:{a_const:{sval:{sval:"title"} location:97}} location:81}} + columns:{range_table_func_col:{colname:"author" type_name:{names:{string:{sval:"text"}} typemod:-1 location:116} colexpr:{a_const:{sval:{sval:"author"} location:126}} location:109}} +alias:{aliasname:"x"} location:17}} limit_option:LIMIT_OPTION_DEFAULT op:SETOP_NONE}} stmt_len:142 + +Here, 'docexpr' points to 'xml_column' containing XML data, 'rowexpr' selects each 'book' node, and 'columns' extract 'title' and 'author' from each book. +Hence Presence of XPath in 'rowexpr' and structured 'columns' typically indicates XMLTABLE usage. +*/ +// Function to detect if a RangeTableFunc node represents XMLTABLE() +func IsXMLTable(rangeTableFunc protoreflect.Message) bool { + if GetMsgFullName(rangeTableFunc) != PG_QUERY_RANGETABLEFUNC_NODE { + return false + } + + log.Debug("checking if range table func node is for XMLTABLE()") + // Check for 'docexpr' field + docexprField := rangeTableFunc.Descriptor().Fields().ByName("docexpr") + if docexprField == nil { + return false + } + docexprNode := rangeTableFunc.Get(docexprField).Message() + if docexprNode == nil { + return false + } + + // Check for 'rowexpr' field + rowexprField := rangeTableFunc.Descriptor().Fields().ByName("rowexpr") + if rowexprField == nil { + return false + } + rowexprNode := rangeTableFunc.Get(rowexprField).Message() + if rowexprNode == nil { + return false + } + + xpath := GetStringValueFromNode(rowexprNode) + log.Debugf("xpath expression in the node: %s\n", xpath) + // Keep both cases check(param placeholder and absolute check) + if xpath == "" { + // Attempt to check if 'rowexpr' is a parameter placeholder like '$1' + isPlaceholder := IsParameterPlaceholder(rowexprNode) + if !isPlaceholder { + return false + } + } else if !IsXPathExprForXmlTable(xpath) { + return false + } + + // Check for 'columns' field + columnsField := rangeTableFunc.Descriptor().Fields().ByName("columns") + if columnsField == nil { + return false + } + + columnsList := rangeTableFunc.Get(columnsField).List() + if columnsList.Len() == 0 { + return false + } + + // this means all the required fields of RangeTableFunc node for being a XMLTABLE() are present + return true +} + +/* +isXPathExprForXmlTable checks whether a given string is a valid XPath expression for XMLTABLE()'s rowexpr. +It returns true if the expression starts with '/' or '//', indicating an absolute or anywhere path. +This covers the primary cases used in XMLTABLE() for selecting XML nodes as rows. + +XPath Expression Cases Covered for XMLTABLE(): +1. Absolute Paths: +- Starts with a single '/' indicating the root node. +- Example: "/library/book" + +2. Anywhere Paths: +- Starts with double '//' indicating selection from anywhere in the document. +- Example: "//book/author" + +For a comprehensive overview of XPath expressions, refer to: +https://developer.mozilla.org/en-US/docs/Web/XPath +*/ +func IsXPathExprForXmlTable(expression string) bool { + // Trim leading and trailing whitespace + expression = strings.TrimSpace(expression) + if expression == "" { + return false + } + + // Check if the expression starts with '/' or '//' + return strings.HasPrefix(expression, "/") || strings.HasPrefix(expression, "//") +} + +// IsParameterPlaceholder checks if the given node represents a parameter placeholder like $1 +func IsParameterPlaceholder(nodeMsg protoreflect.Message) bool { + if nodeMsg == nil || !nodeMsg.IsValid() { + return false + } + + nodeField := getOneofActiveField(nodeMsg, "node") + if nodeField == nil { + return false + } + + // Get the message corresponding to the set field + nodeValue := nodeMsg.Get(nodeField) + node := nodeValue.Message() + if node == nil || !node.IsValid() { + return false + } + + // Identify the type of the node + nodeType := node.Descriptor().FullName() + if nodeType == PG_QUERY_PARAMREF_NODE { + // This node represents a parameter reference like $1 + return true + } + + return false +} + +// getOneofActiveField retrieves the active field from a specified oneof in a Protobuf message. +// It returns the FieldDescriptor of the active field if a field is set, or nil if no field is set or the oneof is not found. +func getOneofActiveField(msg protoreflect.Message, oneofName string) protoreflect.FieldDescriptor { + if msg == nil { + return nil + } + + // Get the descriptor of the message and find the oneof by name + descriptor := msg.Descriptor() + if descriptor == nil { + return nil + } + + oneofDescriptor := descriptor.Oneofs().ByName(protoreflect.Name(oneofName)) + if oneofDescriptor == nil { + return nil + } + + // Determine which field within the oneof is set + return msg.WhichOneof(oneofDescriptor) +} + +func GetStatementType(msg protoreflect.Message) string { + nodeMsg := getOneofActiveField(msg, "node") + if nodeMsg == nil { + return "" + } + + // Get the message corresponding to the set field + nodeValue := msg.Get(nodeMsg) + node := nodeValue.Message() + if node == nil || !node.IsValid() { + return "" + } + return GetMsgFullName(node) +} + +func getOneofActiveNode(msg protoreflect.Message) protoreflect.Message { + nodeField := getOneofActiveField(msg, "node") + if nodeField == nil { + return nil + } + + value := msg.Get(nodeField) + node := value.Message() + if node == nil || !node.IsValid() { + return nil + } + + return node +} + +// == Generic helper functions == + +// GetStringField retrieves a string field from a message. +// Sample example:: {column_ref:{fields:{string:{sval:"s"}} fields:{string:{sval:"tableoid"}} location:7} +func GetStringField(msg protoreflect.Message, fieldName string) string { + field := msg.Descriptor().Fields().ByName(protoreflect.Name(fieldName)) + if field != nil && msg.Has(field) { + return msg.Get(field).String() + } + return "" +} + +// GetMessageField retrieves a message field from a message. +func GetMessageField(msg protoreflect.Message, fieldName string) protoreflect.Message { + field := msg.Descriptor().Fields().ByName(protoreflect.Name(fieldName)) + if field != nil && msg.Has(field) { + return msg.Get(field).Message() + } + return nil +} + +func GetBoolField(msg protoreflect.Message, fieldName string) bool { + field := msg.Descriptor().Fields().ByName(protoreflect.Name(fieldName)) + if field != nil && msg.Has(field) { + return msg.Get(field).Bool() + } + return false +} + +// GetListField retrieves a list field from a message. +func GetListField(msg protoreflect.Message, fieldName string) protoreflect.List { + field := msg.Descriptor().Fields().ByName(protoreflect.Name(fieldName)) + if field != nil && msg.Has(field) { + return msg.Get(field).List() + } + return nil +} + +// GetEnumNumField retrieves a enum field from a message +// FieldDescriptor{Syntax: proto3, FullName: pg_query.JsonFuncExpr.op, Number: 1, Cardinality: optional, Kind: enum, HasJSONName: true, JSONName: "op", Enum: pg_query.JsonExprOp} +// val:{json_func_expr:{op:JSON_QUERY_OP context_item:{raw_expr:{column_ref:{fields:{string:{sval:"details"}} location:2626}} format:{format_type:JS_FORMAT_DEFAULT encoding:JS_ENC_DEFAULT +func GetEnumNumField(msg protoreflect.Message, fieldName string) protoreflect.EnumNumber { + field := msg.Descriptor().Fields().ByName(protoreflect.Name(fieldName)) + if field != nil && msg.Has(field) { + return msg.Get(field).Enum() + } + return 0 +} + +// GetSchemaAndObjectName extracts the schema and object name from a list. +func GetSchemaAndObjectName(nameList protoreflect.List) (string, string) { + var schemaName, objectName string + + if nameList.Len() == 1 { + objectName = GetStringField(nameList.Get(0).Message(), "string") + } else if nameList.Len() == 2 { + schemaName = GetStringField(nameList.Get(0).Message(), "string") + objectName = GetStringField(nameList.Get(1).Message(), "string") + } + return schemaName, objectName +} + +func ProtoAsSelectStmt(msg protoreflect.Message) (*pg_query.SelectStmt, error) { + protoMsg, ok := msg.Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to cast msg to proto.Message") + } + selectStmtNode, ok := protoMsg.(*pg_query.SelectStmt) + if !ok { + return nil, fmt.Errorf("failed to cast msg to %s", PG_QUERY_SELECTSTMT_NODE) + } + return selectStmtNode, nil +} + +func ProtoAsIndexStmt(msg protoreflect.Message) (*pg_query.IndexStmt, error) { + protoMsg, ok := msg.Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to cast msg to proto.Message") + } + indexStmtNode, ok := protoMsg.(*pg_query.IndexStmt) + if !ok { + return nil, fmt.Errorf("failed to cast msg to %s", PG_QUERY_INDEX_STMT_NODE) + } + return indexStmtNode, nil +} + +func ProtoAsTableConstraint(msg protoreflect.Message) (*pg_query.Constraint, error) { + proto, ok := msg.Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to cast msg to proto.Message") + } + constraintNode, ok := proto.(*pg_query.Constraint) + if !ok { + return nil, fmt.Errorf("failed to cast msg to %s", PG_QUERY_CONSTRAINT_NODE) + } + return constraintNode, nil +} + +/* +Example: +options:{def_elem:{defname:"security_invoker" arg:{string:{sval:"true"}} defaction:DEFELEM_UNSPEC location:32}} +options:{def_elem:{defname:"security_barrier" arg:{string:{sval:"false"}} defaction:DEFELEM_UNSPEC location:57}} +Extract all defnames from the def_eleme node +*/ +func TraverseAndExtractDefNamesFromDefElem(msg protoreflect.Message) ([]string, error) { + var defNames []string + collectorFunc := func(msg protoreflect.Message) error { + if GetMsgFullName(msg) != PG_QUERY_DEFELEM_NODE { + return nil + } + + defName := GetStringField(msg, "defname") + // TODO(future): + // defValNode = GetMessageField(msg, "arg") + // defVal = GetStringField(defValNode, "sval") + + defNames = append(defNames, defName) + return nil + } + visited := make(map[protoreflect.Message]bool) + err := TraverseParseTree(msg, visited, collectorFunc) + if err != nil { + return nil, fmt.Errorf("failed to traverse parse tree for fetching defnames: %w", err) + } + + return defNames, nil +} + +func GetAIndirectionNode(msg protoreflect.Message) (*pg_query.A_Indirection, bool) { + protoMsg := msg.Interface().(protoreflect.ProtoMessage) + aIndirection, ok := protoMsg.(*pg_query.A_Indirection) + return aIndirection, ok +} diff --git a/yb-voyager/src/query/queryparser/helpers_struct.go b/yb-voyager/src/query/queryparser/helpers_struct.go new file mode 100644 index 0000000000..46d02b5edb --- /dev/null +++ b/yb-voyager/src/query/queryparser/helpers_struct.go @@ -0,0 +1,347 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package queryparser + +import ( + "fmt" + "slices" + "strings" + + pg_query "github.com/pganalyze/pg_query_go/v6" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" +) + +const ( + LIMIT_OPTION_WITH_TIES = pg_query.LimitOption_LIMIT_OPTION_WITH_TIES +) + +func IsPLPGSQLObject(parseTree *pg_query.ParseResult) bool { + // CREATE FUNCTION is same parser NODE for FUNCTION/PROCEDURE + _, isPlPgSQLObject := getCreateFuncStmtNode(parseTree) + return isPlPgSQLObject +} + +func IsViewObject(parseTree *pg_query.ParseResult) bool { + _, isViewStmt := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_ViewStmt) + return isViewStmt +} + +func IsMviewObject(parseTree *pg_query.ParseResult) bool { + createAsNode, isCreateAsStmt := getCreateTableAsStmtNode(parseTree) //for MVIEW case + return isCreateAsStmt && createAsNode.CreateTableAsStmt.Objtype == pg_query.ObjectType_OBJECT_MATVIEW +} + +func getDefineStmtNode(parseTree *pg_query.ParseResult) (*pg_query.DefineStmt, bool) { + node, ok := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_DefineStmt) + return node.DefineStmt, ok +} + +func IsCollationObject(parseTree *pg_query.ParseResult) bool { + collation, ok := getDefineStmtNode(parseTree) + /* + stmts:{stmt:{define_stmt:{kind:OBJECT_COLLATION defnames:{string:{sval:"ignore_accents"}} definition:{def_elem:{defname:"provider" + arg:{type_name:{names:{string:{sval:"icu"}} typemod:-1 location:48}} defaction:DEFELEM_UNSPEC location:37}} definition:{def_elem:{defname:"locale" + arg:{string:{sval:"und-u-ks-level1-kc-true"}} defaction:DEFELEM_UNSPEC location:55}} definition:{def_elem:{defname:"deterministic" + arg:{string:{sval:"false"}} defaction:DEFELEM_UNSPEC location:91}}}} stmt_len:113} + */ + return ok && collation.Kind == pg_query.ObjectType_OBJECT_COLLATION +} + +func GetObjectTypeAndObjectName(parseTree *pg_query.ParseResult) (string, string) { + createFuncNode, isCreateFunc := getCreateFuncStmtNode(parseTree) + viewNode, isViewStmt := getCreateViewNode(parseTree) + createAsNode, _ := getCreateTableAsStmtNode(parseTree) + createTableNode, isCreateTable := getCreateTableStmtNode(parseTree) + createIndexNode, isCreateIndex := getCreateIndexStmtNode(parseTree) + alterTableNode, isAlterTable := getAlterStmtNode(parseTree) + switch true { + case isCreateFunc: + /* + version:160001 stmts:{stmt:{create_function_stmt:{replace:true funcname:{string:{sval:"public"}} funcname:{string:{sval:"add_employee"}} + parameters:{function_parameter:{name:"emp_name" arg_type:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"varchar"}} + typemod:-1 location:62} mode:FUNC_PARAM_DEFAULT}} parameters:{funct ... + + version:160001 stmts:{stmt:{create_function_stmt:{is_procedure:true replace:true funcname:{string:{sval:"public"}} + funcname:{string:{sval:"add_employee"}} parameters:{function_parameter:{name:"emp_name" arg_type:{names:{string:{sval:"pg_catalog"}} + names:{string:{sval:"varchar"}} typemod:-1 location:63} mode:FUNC_PARAM_DEFAULT}} ... + */ + stmt := createFuncNode.CreateFunctionStmt + objectType := "FUNCTION" + if stmt.IsProcedure { + objectType = "PROCEDURE" + } + funcNameList := stmt.GetFuncname() + funcSchemaName, funcName := getSchemaAndObjectName(funcNameList) + return objectType, utils.BuildObjectName(funcSchemaName, funcName) + case isViewStmt: + viewName := viewNode.ViewStmt.View + return "VIEW", getObjectNameFromRangeVar(viewName) + case IsMviewObject(parseTree): + intoMview := createAsNode.CreateTableAsStmt.Into.Rel + return "MVIEW", getObjectNameFromRangeVar(intoMview) + case isCreateTable: + return "TABLE", getObjectNameFromRangeVar(createTableNode.CreateStmt.Relation) + case isAlterTable: + return "TABLE", getObjectNameFromRangeVar(alterTableNode.AlterTableStmt.Relation) + case isCreateIndex: + indexName := createIndexNode.IndexStmt.Idxname + schemaName := createIndexNode.IndexStmt.Relation.GetSchemaname() + tableName := createIndexNode.IndexStmt.Relation.GetRelname() + fullyQualifiedName := utils.BuildObjectName(schemaName, tableName) + displayObjName := fmt.Sprintf("%s ON %s", indexName, fullyQualifiedName) + return "INDEX", displayObjName + default: + panic("unsupported type of parseResult") + } +} + +func isArrayType(typeName *pg_query.TypeName) bool { + return len(typeName.GetArrayBounds()) > 0 +} + +// Range Var is the struct to get the relation information like relation name, schema name, persisted relation or not, etc.. +func getObjectNameFromRangeVar(obj *pg_query.RangeVar) string { + schema := obj.Schemaname + name := obj.Relname + return utils.BuildObjectName(schema, name) +} + +func getSchemaAndObjectName(nameList []*pg_query.Node) (string, string) { + objName := "" + schemaName := "" + if len(nameList) > 0 { + objName = nameList[len(nameList)-1].GetString_().Sval // obj name can be qualified / unqualifed or native / non-native proper func name will always be available at last index + } + if len(nameList) >= 2 { // Names list will have all the parts of qualified func name + schemaName = nameList[len(nameList)-2].GetString_().Sval // // obj name can be qualified / unqualifed or native / non-native proper schema name will always be available at last 2nd index + } + return schemaName, objName +} + +func getCreateTableAsStmtNode(parseTree *pg_query.ParseResult) (*pg_query.Node_CreateTableAsStmt, bool) { + node, ok := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CreateTableAsStmt) + return node, ok +} + +func getCreateViewNode(parseTree *pg_query.ParseResult) (*pg_query.Node_ViewStmt, bool) { + node, ok := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_ViewStmt) + return node, ok +} + +func getCreateFuncStmtNode(parseTree *pg_query.ParseResult) (*pg_query.Node_CreateFunctionStmt, bool) { + node, ok := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CreateFunctionStmt) + return node, ok +} + +func getCreateTableStmtNode(parseTree *pg_query.ParseResult) (*pg_query.Node_CreateStmt, bool) { + node, ok := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CreateStmt) + return node, ok +} + +func getCreateIndexStmtNode(parseTree *pg_query.ParseResult) (*pg_query.Node_IndexStmt, bool) { + node, ok := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_IndexStmt) + return node, ok +} + +func getAlterStmtNode(parseTree *pg_query.ParseResult) (*pg_query.Node_AlterTableStmt, bool) { + node, ok := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_AlterTableStmt) + return node, ok +} + +func getCreateTriggerStmtNode(parseTree *pg_query.ParseResult) (*pg_query.Node_CreateTrigStmt, bool) { + node, ok := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CreateTrigStmt) + return node, ok +} + +func getPolicyStmtNode(parseTree *pg_query.ParseResult) (*pg_query.Node_CreatePolicyStmt, bool) { + node, ok := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CreatePolicyStmt) + return node, ok +} + +func getCompositeTypeStmtNode(parseTree *pg_query.ParseResult) (*pg_query.Node_CompositeTypeStmt, bool) { + node, ok := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CompositeTypeStmt) + return node, ok +} + +func getEnumTypeStmtNode(parseTree *pg_query.ParseResult) (*pg_query.Node_CreateEnumStmt, bool) { + node, ok := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CreateEnumStmt) + return node, ok +} +func getForeignTableStmtNode(parseTree *pg_query.ParseResult) (*pg_query.Node_CreateForeignTableStmt, bool) { + node, ok := parseTree.Stmts[0].Stmt.Node.(*pg_query.Node_CreateForeignTableStmt) + return node, ok +} + +func IsFunctionObject(parseTree *pg_query.ParseResult) bool { + funcNode, ok := getCreateFuncStmtNode(parseTree) + if !ok { + return false + } + return !funcNode.CreateFunctionStmt.IsProcedure +} + +/* +return type ex- +CREATE OR REPLACE FUNCTION public.process_combined_tbl( + + ... + +) +RETURNS public.combined_tbl.maddr%TYPE AS +return_type:{names:{string:{sval:"public"}} names:{string:{sval:"combined_tbl"}} names:{string:{sval:"maddr"}} +pct_type:true typemod:-1 location:226} +*/ +func GetReturnTypeOfFunc(parseTree *pg_query.ParseResult) string { + funcNode, _ := getCreateFuncStmtNode(parseTree) + returnType := funcNode.CreateFunctionStmt.GetReturnType() + return convertParserTypeNameToString(returnType) +} + +func getQualifiedTypeName(typeNames []*pg_query.Node) string { + var typeNameStrings []string + for _, n := range typeNames { + typeNameStrings = append(typeNameStrings, n.GetString_().Sval) + } + return strings.Join(typeNameStrings, ".") +} + +func convertParserTypeNameToString(typeVar *pg_query.TypeName) string { + if typeVar == nil { + return "" + } + typeNames := typeVar.GetNames() + finalTypeName := getQualifiedTypeName(typeNames) // type name can qualified table_name.column in case of %TYPE + if typeVar.PctType { // %TYPE declaration, so adding %TYPE for using it further + return finalTypeName + "%TYPE" + } + return finalTypeName +} + +/* +function ex - +CREATE OR REPLACE FUNCTION public.process_combined_tbl( + + p_id int, + p_c public.combined_tbl.c%TYPE, + p_bitt public.combined_tbl.bitt%TYPE, + .. + +) +parseTree- +parameters:{function_parameter:{name:"p_id" arg_type:{names:{string:{sval:"pg_catalog"}} names:{string:{sval:"int4"}} typemod:-1 location:66} +mode:FUNC_PARAM_DEFAULT}} parameters:{function_parameter:{name:"p_c" arg_type:{names:{string:{sval:"public"}} names:{string:{sval:"combined_tbl"}} +names:{string:{sval:"c"}} pct_type:true typemod:-1 location:87} mode:FUNC_PARAM_DEFAULT}} parameters:{function_parameter:{name:"p_bitt" +arg_type:{names:{string:{sval:"public"}} names:{string:{sval:"combined_tbl"}} names:{string:{sval:"bitt"}} pct_type:true typemod:-1 +location:136} mode:FUNC_PARAM_DEFAULT}} +*/ +func GetFuncParametersTypeNames(parseTree *pg_query.ParseResult) []string { + funcNode, _ := getCreateFuncStmtNode(parseTree) + parameters := funcNode.CreateFunctionStmt.GetParameters() + var paramTypeNames []string + for _, param := range parameters { + funcParam, ok := param.Node.(*pg_query.Node_FunctionParameter) + if ok { + paramType := funcParam.FunctionParameter.ArgType + paramTypeNames = append(paramTypeNames, convertParserTypeNameToString(paramType)) + } + } + return paramTypeNames +} + +func IsDDL(parseTree *pg_query.ParseResult) (bool, error) { + ddlParser, err := GetDDLProcessor(parseTree) + if err != nil { + return false, fmt.Errorf("error getting a ddl parser: %w", err) + } + _, ok := ddlParser.(*NoOpProcessor) + //Considering all the DDLs we have a Processor for as of now. + //Not Full-proof as we don't have all DDL types but atleast we will skip all the types we know currently + return !ok, nil +} + +/* +this function checks whether the current node handles the jsonb data or not by evaluating all different type of nodes - +column ref - column of jsonb type +type cast - constant data with type casting to jsonb type +func call - function call returning the jsonb data +Expression - if any of left and right operands are of node type handling jsonb data +*/ +func DoesNodeHandleJsonbData(node *pg_query.Node, jsonbColumns []string, jsonbFunctions []string) bool { + switch { + case node.GetColumnRef() != nil: + /* + SELECT numbers[1] AS first_number + FROM array_data; + {a_indirection:{arg:{column_ref:{fields:{string:{sval:"numbers"}} location:69}} + indirection:{a_indices:{uidx:{a_const:{ival:{ival:1} location:77}}}}}} location:69}} + */ + _, col := GetColNameFromColumnRef(node.GetColumnRef().ProtoReflect()) + if slices.Contains(jsonbColumns, col) { + return true + } + + case node.GetTypeCast() != nil: + /* + SELECT ('{"a": {"b": {"c": 1}}}'::jsonb)['a']['b']['c']; + {a_indirection:{arg:{type_cast:{arg:{a_const:{sval:{sval:"{\"a\": {\"b\": {\"c\": 1}}}"} location:280}} + type_name:{names:{string:{sval:"jsonb"}} typemod:-1 location:306} location:304}} + */ + typeCast := node.GetTypeCast() + _, typeName := getSchemaAndObjectName(typeCast.GetTypeName().GetNames()) + if typeName == "jsonb" { + return true + } + case node.GetFuncCall() != nil: + /* + SELECT (jsonb_build_object('name', 'PostgreSQL', 'version', 14, 'open_source', TRUE))['name'] AS json_obj; + val:{a_indirection:{arg:{func_call:{funcname:{string:{sval:"jsonb_build_object"}} args:{a_const:{sval:{sval:"name"} + location:194}} args:{a_const:{sval:{sval:"PostgreSQL"} location:202}} args:{a_const:{sval:{sval:"version"} location:216}} + args:{a_const:{ival:{ival:14} location:227}} + */ + funcCall := node.GetFuncCall() + _, funcName := getSchemaAndObjectName(funcCall.Funcname) + if slices.Contains(jsonbFunctions, funcName) { + return true + } + case node.GetAExpr() != nil: + /* + SELECT ('{"key": "value1"}'::jsonb || '{"key1": "value2"}'::jsonb)['key'] AS object_in_array; + val:{a_indirection:{arg:{a_expr:{kind:AEXPR_OP name:{string:{sval:"||"}} lexpr:{type_cast:{arg:{a_const:{sval:{sval:"{\"key\": \"value1\"}"} + location:81}} type_name:{names:{string:{sval:"jsonb"}} typemod:-1 location:102} location:100}} rexpr:{type_cast:{arg:{a_const:{sval:{sval:"{\"key1\": \"value2\"}"} + location:111}} type_name:{names:{string:{sval:"jsonb"}} typemod:-1 location:132} location:130}} location:108}} indirection:{a_indices:{uidx:{a_const:{sval:{sval:"key"} + location:139}}}}}} + + SELECT (data || '{"new_key": "new_value"}' )['name'] FROM test_jsonb; + {val:{a_indirection:{arg:{a_expr:{kind:AEXPR_OP name:{string:{sval:"||"}} lexpr:{column_ref:{fields:{string:{sval:"data"}} location:10}} rexpr:{a_const:{sval:{sval:"{\"new_key\": \"new_value\"}"} + location:18}} location:15}} indirection:{a_indices:{uidx:{a_const:{sval:{sval:"name"} + + SELECT (jsonb_build_object('name', 'PostgreSQL', 'version', 14, 'open_source', TRUE) || '{"key": "value2"}')['name'] AS json_obj; + {val:{a_indirection:{arg:{a_expr:{kind:AEXPR_OP name:{string:{sval:"||"}} lexpr:{column_ref:{fields:{string:{sval:"data"}} location:10}} rexpr:{a_const:{sval:{sval:"{\"new_key\": \"new_value\"}"} + location:18}} location:15}} indirection:{a_indices:{uidx:{a_const:{sval:{sval:"name"} location:47}}}}}} location:9}} + */ + expr := node.GetAExpr() + lExpr := expr.GetLexpr() + rExpr := expr.GetRexpr() + if lExpr != nil && DoesNodeHandleJsonbData(lExpr, jsonbColumns, jsonbFunctions) { + return true + } + if rExpr != nil && DoesNodeHandleJsonbData(rExpr, jsonbColumns, jsonbFunctions) { + return true + } + } + return false +} diff --git a/yb-voyager/src/query/queryparser/object_collector.go b/yb-voyager/src/query/queryparser/object_collector.go new file mode 100644 index 0000000000..9a6020699b --- /dev/null +++ b/yb-voyager/src/query/queryparser/object_collector.go @@ -0,0 +1,142 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package queryparser + +import ( + "strings" + + "github.com/samber/lo" + log "github.com/sirupsen/logrus" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" +) + +// ObjectPredicate defines a function signature that decides whether to include an object. +type ObjectPredicate func(schemaName string, objectName string, objectType string) bool + +// Using a predicate makes it easy to adapt or swap filtering logic in the future without changing the collector. + +// ObjectCollector collects unique schema-qualified object names based on the provided predicate. +type ObjectCollector struct { + objectSet map[string]bool + predicate ObjectPredicate +} + +// AllObjectsPredicate always returns true, meaning it will collect all objects found. +func AllObjectsPredicate(schemaName, objectName, objectType string) bool { + return true +} + +// TablesOnlyPredicate returns true only if the object type is "table". +func TablesOnlyPredicate(schemaName, objectName, objectType string) bool { + return objectType == constants.TABLE +} + +func NewObjectCollector(predicate ObjectPredicate) *ObjectCollector { + if predicate == nil { + predicate = AllObjectsPredicate + } + + return &ObjectCollector{ + objectSet: make(map[string]bool), + predicate: predicate, + } +} + +/* +Collect processes a given node and extracts object names based on node type. +Cases covered: +1. [DML] SELECT queries - collect table/function object in it +2. [DML]Insert/Update/Delete queries +3. TODO: Coverage for DDLs (right now it worked with some cases like CREATE VIEW and CREATE SEQUENCE, but need to ensure all cases are covered) + +Collect() should be called from TraverseParseTree() to get all the objects in the parse tree of a stmt +*/ +func (c *ObjectCollector) Collect(msg protoreflect.Message) { + if msg == nil || !msg.IsValid() { + return + } + + nodeType := GetMsgFullName(msg) + switch nodeType { + // Extract table or view names in FROM clauses + case PG_QUERY_RANGEVAR_NODE: + schemaName := GetStringField(msg, "schemaname") + relName := GetStringField(msg, "relname") + objectName := utils.BuildObjectName(schemaName, relName) + log.Debugf("[RangeVar] fetched schemaname=%s relname=%s objectname=%s field\n", schemaName, relName, objectName) + // it will be either table or view, considering objectType=table for both + if c.predicate(schemaName, relName, constants.TABLE) { + c.addObject(objectName) + } + + // Extract target table names from DML statements + case PG_QUERY_INSERTSTMT_NODE, PG_QUERY_UPDATESTMT_NODE, PG_QUERY_DELETESTMT_NODE: + relationMsg := GetMessageField(msg, "relation") + if relationMsg != nil { + schemaName := GetStringField(relationMsg, "schemaname") + relName := GetStringField(relationMsg, "relname") + objectName := utils.BuildObjectName(schemaName, relName) + log.Debugf("[IUD] fetched schemaname=%s relname=%s objectname=%s field\n", schemaName, relName, objectName) + if c.predicate(schemaName, relName, constants.TABLE) { + c.addObject(objectName) + } + } + + // Extract function names + case PG_QUERY_FUNCCALL_NODE: + schemaName, functionName := GetFuncNameFromFuncCall(msg) + if functionName == "" { + return + } + + objectName := utils.BuildObjectName(schemaName, functionName) + log.Debugf("[Funccall] fetched schemaname=%s objectname=%s field\n", schemaName, objectName) + if c.predicate(schemaName, functionName, constants.FUNCTION) { + c.addObject(objectName) + } + + // Add more cases as needed for other message types + } +} + +// addObject adds an object name to the collector if it's not already present. +func (c *ObjectCollector) addObject(objectName string) { + if _, exists := c.objectSet[objectName]; !exists { + log.Debugf("adding object to object collector set: %s", objectName) + c.objectSet[objectName] = true + } +} + +// GetObjects returns a slice of collected unique object names. +func (c *ObjectCollector) GetObjects() []string { + return lo.Keys(c.objectSet) +} + +func (c *ObjectCollector) GetSchemaList() []string { + var schemaList []string + for obj := range c.objectSet { + splits := strings.Split(obj, ".") + if len(splits) == 1 { + schemaList = append(schemaList, "") + } else if len(splits) == 2 { + schemaList = append(schemaList, splits[0]) + } + } + return lo.Uniq(schemaList) +} diff --git a/yb-voyager/src/query/queryparser/query_parser.go b/yb-voyager/src/query/queryparser/query_parser.go new file mode 100644 index 0000000000..864cb30d07 --- /dev/null +++ b/yb-voyager/src/query/queryparser/query_parser.go @@ -0,0 +1,68 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +This package contains all the logic related to parsing a query string, and extracting details out of it. +We mainly use the pg_query_go library to help with this. + +The main functions in this package are: +1. Use pg_query_go to parse the query string into a ParseResult (i.e. a parseTree) +2. Traverse and process each protobufMessage node of the ParseTree. +3. For PLPGSQL, convert the PLPGSQL to JSON; get all the statements out of the PLPGSQL block. +we can put all the parser related logic (the parsing, the parsing of plpgsql to json, the traversal through the proto messages, the traversal through the nested plpgsql json, adding clauses to statements, etc +*/ +package queryparser + +import ( + "fmt" + + pg_query "github.com/pganalyze/pg_query_go/v6" + log "github.com/sirupsen/logrus" +) + +func Parse(query string) (*pg_query.ParseResult, error) { + log.Debugf("parsing the query [%s]", query) + tree, err := pg_query.Parse(query) + if err != nil { + return nil, err + } + log.Debugf("query: %s\n", query) + log.Debugf("parse tree: %v\n", tree) + return tree, nil +} + +func ParsePLPGSQLToJson(query string) (string, error) { + log.Debugf("parsing the PLPGSQL to json query [%s]", query) + jsonString, err := pg_query.ParsePlPgSqlToJSON(query) + if err != nil { + return "", err + } + return jsonString, err +} + +func ProcessDDL(parseTree *pg_query.ParseResult) (DDLObject, error) { + processor, err := GetDDLProcessor(parseTree) + if err != nil { + return nil, fmt.Errorf("getting processor failed: %v", err) + } + + ddlObject, err := processor.Process(parseTree) + if err != nil { + return nil, fmt.Errorf("parsing DDL failed: %v", err) + } + + return ddlObject, nil +} diff --git a/yb-voyager/src/query/queryparser/traversal_plpgsql.go b/yb-voyager/src/query/queryparser/traversal_plpgsql.go new file mode 100644 index 0000000000..bc6a3c419e --- /dev/null +++ b/yb-voyager/src/query/queryparser/traversal_plpgsql.go @@ -0,0 +1,409 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package queryparser + +import ( + "encoding/json" + "fmt" + "strings" + + log "github.com/sirupsen/logrus" +) + +const ( + PLPGSQL_EXPR = "PLpgSQL_expr" + QUERY = "query" + + ACTION = "action" + DATUMS = "datums" + PLPGSQL_VAR = "PLpgSQL_var" + DATATYPE = "datatype" + TYPENAME = "typname" + PLPGSQL_TYPE = "PLpgSQL_type" + PLPGSQL_FUNCTION = "PLpgSQL_function" +) + +/* +* +This function is not concrete yet because of following limitation from parser - +The following queries are not the actual query we need so if we pass all these queries directly to parser again to detect the unsupported feature/construct. It will fail for some of these with syntax error, e.g. + + a. query "balance > 0 AND balance < withdrawal;" error - syntax error at or near "balance" + b. query "format(' + CREATE TABLE IF NOT EXISTS %I ( + id SERIAL PRIMARY KEY, + name TEXT, + amount NUMERIC + )', partition_table);" error - syntax error at or near "format" + c. query "(SELECT balance FROM accounts WHERE account_id = sender_id) < transfer_amount;" error - syntax error at or near "<" + +These issues are majorly expressions, conditions, assignments, loop variables, raise parameters, etc… and the parser is giving all these as queries so we can’t differentiate as such between actual query and these. +* +*/ +func GetAllPLPGSQLStatements(query string) ([]string, error) { + parsedJson, parsedJsonMap, err := getParsedJsonMap(query) + if err != nil { + return []string{}, err + } + + function := parsedJsonMap[PLPGSQL_FUNCTION] + parsedFunctionMap, ok := function.(map[string]interface{}) + if !ok { + return []string{}, fmt.Errorf("the PlPgSQL_Function field is not a map in parsed json-%s", parsedJson) + } + + actions := parsedFunctionMap[ACTION] + var plPgSqlStatements []string + TraversePlPgSQLJson(actions, &plPgSqlStatements) + return plPgSqlStatements, nil +} + +/* +Query example- + + CREATE OR REPLACE FUNCTION func_example_advi( + sender_id INT, + receiver_id INT, + transfer_amount NUMERIC + +) +RETURNS VOID +LANGUAGE plpgsql +AS $$ +BEGIN + + -- Acquire advisory locks to prevent concurrent updates on the same accounts + PERFORM pg_advisory_lock(sender_id); + PERFORM pg_advisory_lock(receiver_id); + + -- Deduct the amount from the sender's account + UPDATE accounts + SET balance = balance - transfer_amount + WHERE account_id = sender_id; + + -- Add the amount to the receiver's account + UPDATE accounts + SET balance = balance + transfer_amount + WHERE account_id = receiver_id; + + -- Release the advisory locks + PERFORM pg_advisory_unlock(sender_id); + PERFORM pg_advisory_unlock(receiver_id); + +END; +$$; + +parsed json - + + { + "PLpgSQL_function": { + "datums": [ + ... + ], + "action": { + "PLpgSQL_stmt_block": { + "lineno": 2, + "body": [ + { + "PLpgSQL_stmt_perform": { + "lineno": 4, + "expr": { + "PLpgSQL_expr": { + "query": "SELECT pg_advisory_lock(sender_id)", + "parseMode": 0 + } + } + } + }, + { + ... similar to above + }, + { + "PLpgSQL_stmt_execsql": { + "lineno": 8, + "sqlstmt": { + "PLpgSQL_expr": { + "query": "UPDATE accounts \n SET balance = balance - transfer_amount \n WHERE account_id = sender_id", + "parseMode": 0 + } + } + } + }, + { + .... similar to above + }, + { + ... similar to below + }, + { + "PLpgSQL_stmt_perform": { + "lineno": 19, + "expr": { + "PLpgSQL_expr": { + "query": "SELECT pg_advisory_unlock(receiver_id)", + "parseMode": 0 + } + } + } + }, + { + "PLpgSQL_stmt_return": {} + } + ] + } + } + } + } +*/ +func TraversePlPgSQLJson(fieldValue interface{}, plPgSqlStatements *[]string) { + fieldMap, isMap := fieldValue.(map[string]interface{}) + fieldList, isList := fieldValue.([]interface{}) + switch true { + case isMap: + for k, v := range fieldMap { + switch k { + // base case of recursive calls to reach this PLPGSQL_EXPR field in json which will have "query" field with statement + case PLPGSQL_EXPR: + expr, ok := v.(map[string]interface{}) + if ok { + query, ok := expr[QUERY] + if ok { + q := formatExprQuery(query.(string)) // formating the query of parsed json if required + + *plPgSqlStatements = append(*plPgSqlStatements, q) + } + } + default: + TraversePlPgSQLJson(v, plPgSqlStatements) + } + } + case isList: + //In case the value of a field is not a but a list of e.g. "body" + for _, l := range fieldList { + TraversePlPgSQLJson(l, plPgSqlStatements) + } + } + +} + +// Function to format the PLPGSQL EXPR query from the json string +func formatExprQuery(q string) string { + /* + PLPGSQL line - + EXECUTE 'DROP TABLE IF EXISTS employees'; + + json str - + "PLpgSQL_expr": { + "query": "'DROP TABLE IF EXISTS employees'", + */ + q = strings.Trim(q, "'") //to remove any extra '' around the statement + q = strings.TrimSpace(q) + if !strings.HasSuffix(q, ";") { // adding the ; to query in case not added already + q += ";" + } + return q +} + +func getParsedJsonMap(query string) (string, map[string]interface{}, error) { + parsedJson, err := ParsePLPGSQLToJson(query) + if err != nil { + log.Infof("error in parsing the stmt-%s to json: %v", query, err) + return parsedJson, nil, err + } + if parsedJson == "" { + return "", nil, nil + } + var parsedJsonMapList []map[string]interface{} + //Refer to the queryparser.traversal_plpgsql.go for example and sample parsed json + log.Debugf("parsing the json string-%s of stmt-%s", parsedJson, query) + err = json.Unmarshal([]byte(parsedJson), &parsedJsonMapList) + if err != nil { + return parsedJson, nil, fmt.Errorf("error parsing the json string of stmt-%s: %v", query, err) + } + + if len(parsedJsonMapList) == 0 { + return parsedJson, nil, nil + } + + return parsedJson, parsedJsonMapList[0], nil +} + +/* +example - +CREATE FUNCTION public.get_employeee_salary(emp_id employees.employee_id%TYPE) RETURNS employees.salary%Type + + LANGUAGE plpgsql + AS $$ + +DECLARE + + emp_salary employees.salary%TYPE; + +BEGIN + + SELECT salary INTO emp_salary + FROM employees + WHERE employee_id = emp_id; + RETURN emp_salary; + +END; +$$; +[ + + { + "PLpgSQL_function": { + "datums": [ + { + "PLpgSQL_var": { + "refname": "emp_id", + "datatype": { + "PLpgSQL_type": { + "typname": "UNKNOWN" + } + } + } + }, + { + "PLpgSQL_var": { + "refname": "found", + "datatype": { + "PLpgSQL_type": { + "typname": "UNKNOWN" + } + } + } + }, + { + "PLpgSQL_var": { + "refname": "emp_salary", + "lineno": 3, + "datatype": { + "PLpgSQL_type": { + "typname": "employees.salary%TYPE" + } + } + } + }, + { + "PLpgSQL_row": { + "refname": "(unnamed row)", + "lineno": 5, + "fields": [ + { + "name": "emp_salary", + "varno": 2 + } + ] + } + } + ],"action": { + .... + } + } + }, + + Caveats: + 1. Not returning typename for variables in function parameter from this function (in correct in json as UNKNOWN), for that using the GetTypeNamesFromFuncParameters() + 2. Not returning the return type from this function (not available in json), for that using the GetReturnTypeOfFunc() +*/ +func GetAllTypeNamesInPlpgSQLStmt(query string) ([]string, error) { + parsedJson, parsedJsonMap, err := getParsedJsonMap(query) + if err != nil { + return []string{}, nil + } + function := parsedJsonMap[PLPGSQL_FUNCTION] + parsedFunctionMap, ok := function.(map[string]interface{}) + if !ok { + return []string{}, fmt.Errorf("the PlPgSQL_Function field is not a map in parsed json-%s", parsedJson) + } + + datums := parsedFunctionMap[DATUMS] + datumList, isList := datums.([]interface{}) + if !isList { + return []string{}, fmt.Errorf("type names datums field is not list in parsed json-%s", parsedJson) + } + + var typeNames []string + for _, datum := range datumList { + datumMap, ok := datum.(map[string]interface{}) + if !ok { + log.Errorf("datum is not a map-%v", datum) + continue + } + for key, val := range datumMap { + switch key { + case PLPGSQL_VAR: + typeName, err := getTypeNameFromPlpgSQLVar(val) + if err != nil { + log.Errorf("not able to get typename from PLPGSQL_VAR(%v): %v", val, err) + continue + } + typeNames = append(typeNames, typeName) + } + } + } + return typeNames, nil +} + +/* +example of PLPGSQL_VAR - + + "PLpgSQL_var": { + "refname": "tax_rate", + "lineno": 3, + "datatype": { + "PLpgSQL_type": { + "typname": "employees.tax_rate%TYPE" + } + } + } +*/ +func getTypeNameFromPlpgSQLVar(plpgsqlVar interface{}) (string, error) { + //getting the map of of PLpgSQL_Var json + valueMap, ok := plpgsqlVar.(map[string]interface{}) + if !ok { + return "", fmt.Errorf("PLPGSQL_VAR is not a map-%v", plpgsqlVar) + } + + //getting the "datatype" field of PLpgSQL_Var json + datatype, ok := valueMap[DATATYPE] + if !ok { + return "", fmt.Errorf("datatype is not in the PLPGSQL_VAR map-%v", valueMap) + } + + datatypeValueMap, ok := datatype.(map[string]interface{}) + if !ok { + return "", fmt.Errorf("datatype is not a map-%v", datatype) + } + + plpgsqlType, ok := datatypeValueMap[PLPGSQL_TYPE] + if !ok { + return "", fmt.Errorf("PLPGSQL_Type is not in the datatype map-%v", datatypeValueMap) + } + + typeValueMap, ok := plpgsqlType.(map[string]interface{}) + if !ok { + return "", fmt.Errorf("PLPGSQL_Type is not a map-%v", plpgsqlType) + } + + typeName, ok := typeValueMap[TYPENAME] + if !ok { + return "", fmt.Errorf("typname is not in the PLPGSQL_Type map-%v", typeValueMap) + } + + return typeName.(string), nil + +} diff --git a/yb-voyager/src/queryparser/traversal.go b/yb-voyager/src/query/queryparser/traversal_proto.go similarity index 67% rename from yb-voyager/src/queryparser/traversal.go rename to yb-voyager/src/query/queryparser/traversal_proto.go index 9c231bd2d2..09b2658ac7 100644 --- a/yb-voyager/src/queryparser/traversal.go +++ b/yb-voyager/src/query/queryparser/traversal_proto.go @@ -1,3 +1,18 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ package queryparser import ( @@ -9,12 +24,40 @@ import ( ) const ( - PG_QUERY_NODE_NODE = "pg_query.Node" - PG_QUERY_STRING_NODE = "pg_query.String" - PG_QUERY_ASTAR_NODE = "pg_query.A_Star" - PG_QUERY_XMLEXPR_NODE = "pg_query.XmlExpr" - PG_QUERY_FUNCCALL_NODE = "pg_query.FuncCall" - PG_QUERY_COLUMNREF_NODE = "pg_query.ColumnRef" + PG_QUERY_NODE_NODE = "pg_query.Node" + PG_QUERY_STRING_NODE = "pg_query.String" + PG_QUERY_ASTAR_NODE = "pg_query.A_Star" + PG_QUERY_ACONST_NODE = "pg_query.A_Const" + PG_QUERY_TYPECAST_NODE = "pg_query.TypeCast" + PG_QUERY_XMLEXPR_NODE = "pg_query.XmlExpr" + PG_QUERY_FUNCCALL_NODE = "pg_query.FuncCall" + PG_QUERY_COLUMNREF_NODE = "pg_query.ColumnRef" + PG_QUERY_RANGEFUNCTION_NODE = "pg_query.RangeFunction" + PG_QUERY_RANGEVAR_NODE = "pg_query.RangeVar" + PG_QUERY_RANGETABLEFUNC_NODE = "pg_query.RangeTableFunc" + PG_QUERY_PARAMREF_NODE = "pg_query.ParamRef" + PG_QUERY_DEFELEM_NODE = "pg_query.DefElem" + + PG_QUERY_INSERTSTMT_NODE = "pg_query.InsertStmt" + PG_QUERY_UPDATESTMT_NODE = "pg_query.UpdateStmt" + PG_QUERY_DELETESTMT_NODE = "pg_query.DeleteStmt" + PG_QUERY_SELECTSTMT_NODE = "pg_query.SelectStmt" + + PG_QUERY_A_INDIRECTION_NODE = "pg_query.A_Indirection" + PG_QUERY_JSON_OBJECT_AGG_NODE = "pg_query.JsonObjectAgg" + PG_QUERY_JSON_ARRAY_AGG_NODE = "pg_query.JsonArrayAgg" + PG_QUERY_JSON_ARRAY_CONSTRUCTOR_AGG_NODE = "pg_query.JsonArrayConstructor" + PG_QUERY_JSON_FUNC_EXPR_NODE = "pg_query.JsonFuncExpr" + PG_QUERY_JSON_OBJECT_CONSTRUCTOR_NODE = "pg_query.JsonObjectConstructor" + PG_QUERY_JSON_TABLE_NODE = "pg_query.JsonTable" + PG_QUERY_JSON_IS_PREDICATE_NODE = "pg_query.JsonIsPredicate" + PG_QUERY_VIEW_STMT_NODE = "pg_query.ViewStmt" + PG_QUERY_COPY_STMT_NODE = "pg_query.CopyStmt" + + PG_QUERY_DEFINE_STMT_NODE = "pg_query.DefineStmt" + PG_QUERY_MERGE_STMT_NODE = "pg_query.MergeStmt" + PG_QUERY_CONSTRAINT_NODE = "pg_query.Constraint" + PG_QUERY_INDEX_STMT_NODE = "pg_query.IndexStmt" ) // function type for processing nodes during traversal @@ -97,7 +140,7 @@ func TraverseParseTree(msg protoreflect.Message, visited map[protoreflect.Messag // Reference Oneof - https://protobuf.dev/programming-guides/proto3/#oneof if nodeType == PG_QUERY_NODE_NODE { - nodeField := msg.WhichOneof(msg.Descriptor().Oneofs().ByName("node")) + nodeField := getOneofActiveField(msg, "node") if nodeField != nil { value := msg.Get(nodeField) if value.IsValid() { @@ -185,3 +228,23 @@ func IsScalarKind(kind protoreflect.Kind) bool { protoreflect.BoolKind, protoreflect.StringKind, protoreflect.BytesKind, protoreflect.EnumKind} return slices.Contains(listOfScalarKinds, kind) } + +func GetSchemaUsed(query string) ([]string, error) { + parseTree, err := Parse(query) + if err != nil { + return nil, fmt.Errorf("error parsing query: %w", err) + } + + msg := GetProtoMessageFromParseTree(parseTree) + visited := make(map[protoreflect.Message]bool) + objectCollector := NewObjectCollector(TablesOnlyPredicate) + err = TraverseParseTree(msg, visited, func(msg protoreflect.Message) error { + objectCollector.Collect(msg) + return nil + }) + if err != nil { + return nil, fmt.Errorf("traversing parse tree: %w", err) + } + + return objectCollector.GetSchemaList(), nil +} diff --git a/yb-voyager/src/query/queryparser/traversal_test.go b/yb-voyager/src/query/queryparser/traversal_test.go new file mode 100644 index 0000000000..6e65579719 --- /dev/null +++ b/yb-voyager/src/query/queryparser/traversal_test.go @@ -0,0 +1,367 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package queryparser + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Test ObjectCollector with default predicate(AllObjectsPredicate) +func TestObjectCollector(t *testing.T) { + tests := []struct { + Sql string + ExpectedObjects []string + ExpectedSchemas []string + }{ + { + Sql: `SELECT * from public.employees`, + ExpectedObjects: []string{"public.employees"}, + ExpectedSchemas: []string{"public"}, + }, + { + Sql: `SELECT * from employees`, + ExpectedObjects: []string{"employees"}, + ExpectedSchemas: []string{""}, // empty schemaname indicates unqualified objectname + }, + { + Sql: `SELECT * from s1.employees`, + ExpectedObjects: []string{"s1.employees"}, + ExpectedSchemas: []string{"s1"}, + }, + { + Sql: `SELECT * from s2.employees where salary > (Select salary from s3.employees)`, + ExpectedObjects: []string{"s3.employees", "s2.employees"}, + ExpectedSchemas: []string{"s3", "s2"}, + }, + { + Sql: `SELECT c.name, SUM(o.amount) AS total_spent FROM sales.customers c JOIN finance.orders o ON c.id = o.customer_id GROUP BY c.name HAVING SUM(o.amount) > 1000`, + ExpectedObjects: []string{"sales.customers", "finance.orders", "sum"}, + ExpectedSchemas: []string{"sales", "finance", ""}, + }, + { + Sql: `SELECT name FROM hr.employees WHERE department_id IN (SELECT id FROM public.departments WHERE location_id IN (SELECT id FROM eng.locations WHERE country = 'USA'))`, + ExpectedObjects: []string{"hr.employees", "public.departments", "eng.locations"}, + ExpectedSchemas: []string{"hr", "public", "eng"}, + }, + { + Sql: `SELECT name FROM sales.customers UNION SELECT name FROM marketing.customers;`, + ExpectedObjects: []string{"sales.customers", "marketing.customers"}, + ExpectedSchemas: []string{"sales", "marketing"}, + }, + { + Sql: `CREATE VIEW analytics.top_customers AS + SELECT user_id, COUNT(*) as order_count FROM public.orders GROUP BY user_id HAVING COUNT(*) > 10;`, + ExpectedObjects: []string{"analytics.top_customers", "public.orders", "count"}, + ExpectedSchemas: []string{"analytics", "public", ""}, // "" -> unknown schemaName + }, + { + Sql: `WITH user_orders AS ( + SELECT u.id, o.id as order_id + FROM users u + JOIN orders o ON u.id = o.user_id + WHERE o.amount > 100 + ), order_items AS ( + SELECT o.order_id, i.product_id + FROM order_items i + JOIN user_orders o ON i.order_id = o.order_id + ) + SELECT p.name, COUNT(oi.product_id) FROM products p + JOIN order_items oi ON p.id = oi.product_id GROUP BY p.name;`, + ExpectedObjects: []string{"users", "orders", "order_items", "user_orders", "products", "count"}, + ExpectedSchemas: []string{""}, + }, + { + Sql: `UPDATE finance.accounts + SET balance = balance + 1000 + WHERE account_id IN ( + SELECT account_id FROM public.users WHERE active = true + );`, + ExpectedObjects: []string{"finance.accounts", "public.users"}, + ExpectedSchemas: []string{"finance", "public"}, + }, + { + Sql: `SELECT classid, objid, refobjid FROM pg_depend WHERE refclassid = $1::regclass AND deptype = $2 ORDER BY 3`, + ExpectedObjects: []string{"pg_depend"}, + ExpectedSchemas: []string{""}, + }, + { + Sql: `SELECT pg_advisory_unlock_shared(100);`, + ExpectedObjects: []string{"pg_advisory_unlock_shared"}, + ExpectedSchemas: []string{""}, + }, + { + Sql: `SELECT xpath_exists('/employee/name', 'John'::xml)`, + ExpectedObjects: []string{"xpath_exists"}, + ExpectedSchemas: []string{""}, + }, + } + + for _, tc := range tests { + parseResult, err := Parse(tc.Sql) + assert.NoError(t, err) + + objectsCollector := NewObjectCollector(nil) + processor := func(msg protoreflect.Message) error { + objectsCollector.Collect(msg) + return nil + } + + visited := make(map[protoreflect.Message]bool) + parseTreeMsg := GetProtoMessageFromParseTree(parseResult) + err = TraverseParseTree(parseTreeMsg, visited, processor) + assert.NoError(t, err) + + collectedObjects := objectsCollector.GetObjects() + collectedSchemas := objectsCollector.GetSchemaList() + + assert.ElementsMatch(t, tc.ExpectedObjects, collectedObjects, + "Objects list mismatch for sql [%s]. Expected: %v(len=%d), Actual: %v(len=%d)", tc.Sql, tc.ExpectedObjects, len(tc.ExpectedObjects), collectedObjects, len(collectedObjects)) + assert.ElementsMatch(t, tc.ExpectedSchemas, collectedSchemas, + "Schema list mismatch for sql [%s]. Expected: %v(len=%d), Actual: %v(len=%d)", tc.Sql, tc.ExpectedSchemas, len(tc.ExpectedSchemas), collectedSchemas, len(collectedSchemas)) + } +} + +// Test ObjectCollector with default predicate(AllObjectsPredicate) +// test focussed on collecting function names from DMLs +func TestObjectCollector2(t *testing.T) { + tests := []struct { + Sql string + ExpectedObjects []string + ExpectedSchemas []string + }{ + { + Sql: `SELECT finance.calculate_tax(amount) AS tax, name FROM sales.transactions;`, + ExpectedObjects: []string{"finance.calculate_tax", "sales.transactions"}, + ExpectedSchemas: []string{"finance", "sales"}, + }, + { + Sql: `SELECT hr.get_employee_details(e.id) FROM hr.employees e;`, + ExpectedObjects: []string{"hr.get_employee_details", "hr.employees"}, + ExpectedSchemas: []string{"hr"}, + }, + { + Sql: `Select now();`, + ExpectedObjects: []string{"now"}, + ExpectedSchemas: []string{""}, + }, + { // nested functions + Sql: `SELECT finance.calculate_bonus(sum(salary)) FROM hr.employees;`, + ExpectedObjects: []string{"finance.calculate_bonus", "sum", "hr.employees"}, + ExpectedSchemas: []string{"finance", "", "hr"}, + }, + { // functions as arguments in expressions + Sql: `SELECT e.name, CASE + WHEN e.salary > finance.calculate_bonus(e.salary) THEN 'High' + ELSE 'Low' + END AS salary_grade + FROM hr.employees e;`, + ExpectedObjects: []string{"finance.calculate_bonus", "hr.employees"}, + ExpectedSchemas: []string{"finance", "hr"}, + }, + { + Sql: `CREATE SEQUENCE finance.invoice_seq START 1000;`, + ExpectedObjects: []string{"finance.invoice_seq"}, + ExpectedSchemas: []string{"finance"}, + }, + { + Sql: `SELECT department, + MAX(CASE WHEN month = 'January' THEN sales ELSE 0 END) AS January_Sales, + MAX(CASE WHEN month = 'February' THEN sales ELSE 0 END) AS February_Sales + FROM sales_data + GROUP BY department;`, + ExpectedObjects: []string{"max", "sales_data"}, + ExpectedSchemas: []string{""}, + }, + { // quoted mixed case + Sql: `SELECT * FROM "SALES_DATA"."Order_Details";`, + ExpectedObjects: []string{"SALES_DATA.Order_Details"}, + ExpectedSchemas: []string{"SALES_DATA"}, + }, + { + Sql: `SELECT * FROM myfunc(analytics.calculate_metrics(2024)) AS cm(metrics);`, + ExpectedObjects: []string{"myfunc", "analytics.calculate_metrics"}, + ExpectedSchemas: []string{"", "analytics"}, + }, + { + Sql: `COPY (SELECT id, xmlagg(xmlparse(document xml_column)) AS combined_xml + FROM my_table + GROUP BY id) + TO '/path/to/file.csv' WITH CSV;`, + ExpectedObjects: []string{"xmlagg", "my_table"}, + ExpectedSchemas: []string{""}, + }, + { + Sql: `COPY (SELECT ctid, xmin, id, data FROM schema_name.my_table) + TO '/path/to/file_with_system_columns.csv' WITH CSV;`, + ExpectedObjects: []string{"schema_name.my_table"}, + ExpectedSchemas: []string{"schema_name"}, + }, + } + + for _, tc := range tests { + parseResult, err := Parse(tc.Sql) + assert.NoError(t, err) + + objectsCollector := NewObjectCollector(nil) + processor := func(msg protoreflect.Message) error { + objectsCollector.Collect(msg) + return nil + } + + visited := make(map[protoreflect.Message]bool) + parseTreeMsg := GetProtoMessageFromParseTree(parseResult) + err = TraverseParseTree(parseTreeMsg, visited, processor) + assert.NoError(t, err) + + collectedObjects := objectsCollector.GetObjects() + collectedSchemas := objectsCollector.GetSchemaList() + + assert.ElementsMatch(t, tc.ExpectedObjects, collectedObjects, + "Objects list mismatch for sql [%s]. Expected: %v(len=%d), Actual: %v(len=%d)", tc.Sql, tc.ExpectedObjects, len(tc.ExpectedObjects), collectedObjects, len(collectedObjects)) + assert.ElementsMatch(t, tc.ExpectedSchemas, collectedSchemas, + "Schema list mismatch for sql [%s]. Expected: %v(len=%d), Actual: %v(len=%d)", tc.Sql, tc.ExpectedSchemas, len(tc.ExpectedSchemas), collectedSchemas, len(collectedSchemas)) + } +} + +// Test ObjectCollector with TablesOnlyPredicate +func TestTableObjectCollector(t *testing.T) { + tests := []struct { + Sql string + ExpectedObjects []string + ExpectedSchemas []string + }{ + { + Sql: `SELECT * from public.employees`, + ExpectedObjects: []string{"public.employees"}, + ExpectedSchemas: []string{"public"}, + }, + { + Sql: `SELECT * from employees`, + ExpectedObjects: []string{"employees"}, + ExpectedSchemas: []string{""}, // empty schemaname indicates unqualified objectname + }, + { + Sql: `SELECT * from s1.employees`, + ExpectedObjects: []string{"s1.employees"}, + ExpectedSchemas: []string{"s1"}, + }, + { + Sql: `SELECT * from s2.employees where salary > (Select salary from s3.employees)`, + ExpectedObjects: []string{"s3.employees", "s2.employees"}, + ExpectedSchemas: []string{"s3", "s2"}, + }, + { + Sql: `SELECT c.name, SUM(o.amount) AS total_spent FROM sales.customers c JOIN finance.orders o ON c.id = o.customer_id GROUP BY c.name HAVING SUM(o.amount) > 1000`, + ExpectedObjects: []string{"sales.customers", "finance.orders"}, + ExpectedSchemas: []string{"sales", "finance"}, + }, + { + Sql: `SELECT name FROM hr.employees WHERE department_id IN (SELECT id FROM public.departments WHERE location_id IN (SELECT id FROM eng.locations WHERE country = 'USA'))`, + ExpectedObjects: []string{"hr.employees", "public.departments", "eng.locations"}, + ExpectedSchemas: []string{"hr", "public", "eng"}, + }, + { + Sql: `SELECT name FROM sales.customers UNION SELECT name FROM marketing.customers;`, + ExpectedObjects: []string{"sales.customers", "marketing.customers"}, + ExpectedSchemas: []string{"sales", "marketing"}, + }, + { + Sql: `CREATE VIEW analytics.top_customers AS + SELECT user_id, COUNT(*) as order_count FROM public.orders GROUP BY user_id HAVING COUNT(*) > 10;`, + ExpectedObjects: []string{"analytics.top_customers", "public.orders"}, + ExpectedSchemas: []string{"analytics", "public"}, + }, + { + Sql: `WITH user_orders AS ( + SELECT u.id, o.id as order_id + FROM users u + JOIN orders o ON u.id = o.user_id + WHERE o.amount > 100 + ), order_items AS ( + SELECT o.order_id, i.product_id + FROM order_items i + JOIN user_orders o ON i.order_id = o.order_id + ) + SELECT p.name, COUNT(oi.product_id) FROM products p + JOIN order_items oi ON p.id = oi.product_id GROUP BY p.name;`, + ExpectedObjects: []string{"users", "orders", "order_items", "user_orders", "products"}, + ExpectedSchemas: []string{""}, + }, + { + Sql: `UPDATE finance.accounts + SET balance = balance + 1000 + WHERE account_id IN ( + SELECT account_id FROM public.users WHERE active = true + );`, + ExpectedObjects: []string{"finance.accounts", "public.users"}, + ExpectedSchemas: []string{"finance", "public"}, + }, + { + Sql: `SELECT classid, objid, refobjid FROM pg_depend WHERE refclassid = $1::regclass AND deptype = $2 ORDER BY 3`, + ExpectedObjects: []string{"pg_depend"}, + ExpectedSchemas: []string{""}, + }, + { + Sql: `SELECT pg_advisory_unlock_shared(100);`, + ExpectedObjects: []string{}, // empty slice represents no object collected + ExpectedSchemas: []string{}, + }, + { + Sql: `SELECT xpath_exists('/employee/name', 'John'::xml)`, + ExpectedObjects: []string{}, + ExpectedSchemas: []string{}, + }, + { + Sql: `SELECT t.id, + xpath('/root/node', xmlparse(document t.xml_column)) AS extracted_nodes, + s2.some_function() + FROM s1.some_table t + WHERE t.id IN (SELECT id FROM s2.some_function()) + AND pg_advisory_lock(t.id);`, + ExpectedObjects: []string{"s1.some_table"}, + ExpectedSchemas: []string{"s1"}, + }, + } + + for _, tc := range tests { + parseResult, err := Parse(tc.Sql) + assert.NoError(t, err) + + objectsCollector := NewObjectCollector(TablesOnlyPredicate) + processor := func(msg protoreflect.Message) error { + objectsCollector.Collect(msg) + return nil + } + + visited := make(map[protoreflect.Message]bool) + parseTreeMsg := GetProtoMessageFromParseTree(parseResult) + err = TraverseParseTree(parseTreeMsg, visited, processor) + assert.NoError(t, err) + + collectedObjects := objectsCollector.GetObjects() + collectedSchemas := objectsCollector.GetSchemaList() + + assert.ElementsMatch(t, tc.ExpectedObjects, collectedObjects, + "Objects list mismatch for sql [%s]. Expected: %v(len=%d), Actual: %v(len=%d)", tc.Sql, tc.ExpectedObjects, len(tc.ExpectedObjects), collectedObjects, len(collectedObjects)) + assert.ElementsMatch(t, tc.ExpectedSchemas, collectedSchemas, + "Schema list mismatch for sql [%s]. Expected: %v(len=%d), Actual: %v(len=%d)", tc.Sql, tc.ExpectedSchemas, len(tc.ExpectedSchemas), collectedSchemas, len(collectedSchemas)) + } +} diff --git a/yb-voyager/src/queryissue/helpers.go b/yb-voyager/src/queryissue/helpers.go deleted file mode 100644 index 849cd15b57..0000000000 --- a/yb-voyager/src/queryissue/helpers.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright (c) YugabyteDB, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package queryissue - -// Refer: https://www.postgresql.org/docs/current/functions-admin.html#FUNCTIONS-ADVISORY-LOCKS -var unsupportedAdvLockFuncs = []string{ - "pg_advisory_lock", "pg_advisory_lock_shared", - "pg_advisory_unlock", "pg_advisory_unlock_all", "pg_advisory_unlock_shared", - "pg_advisory_xact_lock", "pg_advisory_xact_lock_shared", - "pg_try_advisory_lock", "pg_try_advisory_lock_shared", - "pg_try_advisory_xact_lock", "pg_try_advisory_xact_lock_shared", -} - -var unsupportedSysCols = []string{ - "xmin", "xmax", "cmin", "cmax", "ctid", -} - -// Refer: https://www.postgresql.org/docs/17/functions-xml.html#FUNCTIONS-XML-PROCESSING -var unsupportedXmlFunctions = []string{ - // 1. Producing XML content - "xmltext", "xmlcomment", "xmlconcat", "xmlelement", "xmlforest", - "xmlpi", "xmlroot", "xmlagg", - // 2. XML predicates - "xml", "xmlexists", "xml_is_well_formed", "xml_is_well_formed_document", - "xml_is_well_formed_content", - // 3. Processing XML - "xpath", "xpath_exists", "xmltable", - // 4. Mapping Table to XML - "table_to_xml", "table_to_xmlschema", "table_to_xml_and_xmlschema", - "cursor_to_xmlschema", "cursor_to_xml", - "query_to_xmlschema", "query_to_xml", "query_to_xml_and_xmlschema", - "schema_to_xml", "schema_to_xmlschema", "schema_to_xml_and_xmlschema", - "database_to_xml", "database_to_xmlschema", "database_to_xml_and_xmlschema", - - /* - 5. extras - not in ref doc but exists - SELECT proname FROM pg_proc - WHERE prorettype = 'xml'::regtype; - */ - "xmlconcat2", "xmlvalidate", "xml_in", "xml_out", "xml_recv", "xml_send", // System XML I/O -} diff --git a/yb-voyager/src/queryissue/queryissue.go b/yb-voyager/src/queryissue/queryissue.go deleted file mode 100644 index 7154fffeb6..0000000000 --- a/yb-voyager/src/queryissue/queryissue.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright (c) YugabyteDB, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package queryissue - -import ( - "fmt" - - pg_query "github.com/pganalyze/pg_query_go/v5" - "github.com/samber/lo" - log "github.com/sirupsen/logrus" - "github.com/yugabyte/yb-voyager/yb-voyager/src/issue" - "github.com/yugabyte/yb-voyager/yb-voyager/src/queryparser" - "google.golang.org/protobuf/reflect/protoreflect" -) - -type ParserIssueDetector struct { - // TODO: Add fields here - // e.g. store composite types, etc. for future processing. -} - -func NewParserIssueDetector() *ParserIssueDetector { - return &ParserIssueDetector{} -} - -func (p *ParserIssueDetector) GetIssues(query string) ([]issue.IssueInstance, error) { - parseTree, err := queryparser.Parse(query) - if err != nil { - return nil, fmt.Errorf("error parsing query: %w", err) - } - return p.getDMLIssues(query, parseTree) -} - -func (p *ParserIssueDetector) getDMLIssues(query string, parseTree *pg_query.ParseResult) ([]issue.IssueInstance, error) { - var result []issue.IssueInstance - var unsupportedConstructs []string - visited := make(map[protoreflect.Message]bool) - detectors := []UnsupportedConstructDetector{ - NewFuncCallDetector(), - NewColumnRefDetector(), - NewXmlExprDetector(), - } - - processor := func(msg protoreflect.Message) error { - for _, detector := range detectors { - log.Debugf("running detector %T", detector) - constructs, err := detector.Detect(msg) - if err != nil { - log.Debugf("error in detector %T: %v", detector, err) - return fmt.Errorf("error in detectors %T: %w", detector, err) - } - unsupportedConstructs = lo.Union(unsupportedConstructs, constructs) - } - return nil - } - - parseTreeProtoMsg := queryparser.GetProtoMessageFromParseTree(parseTree) - err := queryparser.TraverseParseTree(parseTreeProtoMsg, visited, processor) - if err != nil { - return result, fmt.Errorf("error traversing parse tree message: %w", err) - } - - for _, unsupportedConstruct := range unsupportedConstructs { - switch unsupportedConstruct { - case ADVISORY_LOCKS: - result = append(result, issue.NewAdvisoryLocksIssue(issue.DML_QUERY_OBJECT_TYPE, "", query)) - case SYSTEM_COLUMNS: - result = append(result, issue.NewSystemColumnsIssue(issue.DML_QUERY_OBJECT_TYPE, "", query)) - case XML_FUNCTIONS: - result = append(result, issue.NewXmlFunctionsIssue(issue.DML_QUERY_OBJECT_TYPE, "", query)) - } - } - return result, nil -} diff --git a/yb-voyager/src/queryissue/unsupported_dml_constructs.go b/yb-voyager/src/queryissue/unsupported_dml_constructs.go deleted file mode 100644 index d81206b4f4..0000000000 --- a/yb-voyager/src/queryissue/unsupported_dml_constructs.go +++ /dev/null @@ -1,97 +0,0 @@ -package queryissue - -import ( - log "github.com/sirupsen/logrus" - "github.com/yugabyte/yb-voyager/yb-voyager/src/queryparser" - "google.golang.org/protobuf/reflect/protoreflect" -) - -const ( - ADVISORY_LOCKS = "Advisory Locks" - SYSTEM_COLUMNS = "System Columns" - XML_FUNCTIONS = "XML Functions" -) - -// To Add a new unsupported query construct implement this interface -type UnsupportedConstructDetector interface { - Detect(msg protoreflect.Message) ([]string, error) -} - -type FuncCallDetector struct { - // right now it covers Advisory Locks and XML functions - unsupportedFuncs map[string]string -} - -func NewFuncCallDetector() *FuncCallDetector { - unsupportedFuncs := make(map[string]string) - for _, fname := range unsupportedAdvLockFuncs { - unsupportedFuncs[fname] = ADVISORY_LOCKS - } - for _, fname := range unsupportedXmlFunctions { - unsupportedFuncs[fname] = XML_FUNCTIONS - } - - return &FuncCallDetector{ - unsupportedFuncs: unsupportedFuncs, - } -} - -// Detect checks if a FuncCall node uses an unsupported function. -func (d *FuncCallDetector) Detect(msg protoreflect.Message) ([]string, error) { - if queryparser.GetMsgFullName(msg) != queryparser.PG_QUERY_FUNCCALL_NODE { - return nil, nil - } - - funcName := queryparser.GetFuncNameFromFuncCall(msg) - log.Debugf("fetched function name from %s node: %q", queryparser.PG_QUERY_FUNCCALL_NODE, funcName) - if constructType, isUnsupported := d.unsupportedFuncs[funcName]; isUnsupported { - log.Debugf("detected unsupported function %q in msg - %+v", funcName, msg) - return []string{constructType}, nil - } - return nil, nil -} - -type ColumnRefDetector struct { - unsupportedColumns map[string]string -} - -func NewColumnRefDetector() *ColumnRefDetector { - unsupportedColumns := make(map[string]string) - for _, colName := range unsupportedSysCols { - unsupportedColumns[colName] = SYSTEM_COLUMNS - } - - return &ColumnRefDetector{ - unsupportedColumns: unsupportedColumns, - } -} - -// Detect checks if a ColumnRef node uses an unsupported system column -func (d *ColumnRefDetector) Detect(msg protoreflect.Message) ([]string, error) { - if queryparser.GetMsgFullName(msg) != queryparser.PG_QUERY_COLUMNREF_NODE { - return nil, nil - } - - colName := queryparser.GetColNameFromColumnRef(msg) - log.Debugf("fetched column name from %s node: %q", queryparser.PG_QUERY_COLUMNREF_NODE, colName) - if constructType, isUnsupported := d.unsupportedColumns[colName]; isUnsupported { - log.Debugf("detected unsupported system column %q in msg - %+v", colName, msg) - return []string{constructType}, nil - } - return nil, nil -} - -type XmlExprDetector struct{} - -func NewXmlExprDetector() *XmlExprDetector { - return &XmlExprDetector{} -} - -// Detect checks if a XmlExpr node is present, means Xml type/functions are used -func (d *XmlExprDetector) Detect(msg protoreflect.Message) ([]string, error) { - if queryparser.GetMsgFullName(msg) == queryparser.PG_QUERY_XMLEXPR_NODE { - log.Debug("detected xml expression") - return []string{XML_FUNCTIONS}, nil - } - return nil, nil -} diff --git a/yb-voyager/src/queryissue/unsupported_dml_constructs_test.go b/yb-voyager/src/queryissue/unsupported_dml_constructs_test.go deleted file mode 100644 index 88fba29493..0000000000 --- a/yb-voyager/src/queryissue/unsupported_dml_constructs_test.go +++ /dev/null @@ -1,375 +0,0 @@ -package queryissue - -import ( - "fmt" - "sort" - "testing" - - pg_query "github.com/pganalyze/pg_query_go/v5" - "github.com/samber/lo" - log "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" - "github.com/yugabyte/yb-voyager/yb-voyager/src/queryparser" - "google.golang.org/protobuf/reflect/protoreflect" -) - -// TestFuncCallDetector tests the Advisory Lock Detector. -func TestFuncCallDetector(t *testing.T) { - advisoryLockSqls := []string{ - `SELECT pg_advisory_lock(100), COUNT(*) FROM cars;`, - `SELECT pg_advisory_lock_shared(100), COUNT(*) FROM cars;`, - `SELECT pg_advisory_unlock_shared(100);`, - `SELECT * FROM (SELECT pg_advisory_xact_lock(200)) AS lock_acquired;`, - `SELECT * FROM (SELECT pg_advisory_xact_lock_shared(200)) AS lock_acquired;`, - `SELECT id, first_name FROM employees WHERE pg_try_advisory_lock(300) IS TRUE;`, - `SELECT id, first_name FROM employees WHERE salary > 400 AND EXISTS (SELECT 1 FROM pg_advisory_lock(500));`, - `SELECT id, first_name FROM employees WHERE pg_try_advisory_lock(600) IS TRUE AND salary > 700;`, - `SELECT pg_try_advisory_lock_shared(1234, 100);`, - `SELECT pg_try_advisory_xact_lock_shared(1,2);`, - `WITH lock_cte AS ( - SELECT pg_advisory_lock(1000) AS lock_acquired - ) - SELECT e.id, e.name - FROM employees e - JOIN lock_cte ON TRUE - WHERE e.department = 'Engineering';`, - `SELECT e.id, e.name - FROM employees e - WHERE EXISTS ( - SELECT 1 - FROM projects p - WHERE p.manager_id = e.id - AND pg_try_advisory_lock_shared(p.project_id) - );`, - `SELECT e.id, - CASE - WHEN e.salary > 100000 THEN pg_advisory_lock(e.id) - ELSE pg_advisory_unlock(e.id) - END AS lock_status - FROM employees e;`, - `SELECT e.id, l.lock_status - FROM employees e - JOIN LATERAL ( - SELECT pg_try_advisory_lock(e.id) AS lock_status - ) l ON TRUE - WHERE e.status = 'active';`, - `WITH lock_cte AS ( - SELECT 1 - ) - SELECT e.id, e.name, pg_try_advisory_lock(600) - FROM employees e - JOIN lock_cte ON TRUE - WHERE pg_advisory_unlock(500) = TRUE;`, - `SELECT pg_advisory_unlock_all();`, - } - - detector := NewFuncCallDetector() - for _, sql := range advisoryLockSqls { - parseResult, err := pg_query.Parse(sql) - assert.NoError(t, err, "Failed to parse SQL: %s", sql) - - visited := make(map[protoreflect.Message]bool) - unsupportedConstructs := []string{} - - processor := func(msg protoreflect.Message) error { - constructs, err := detector.Detect(msg) - if err != nil { - return err - } - unsupportedConstructs = append(unsupportedConstructs, constructs...) - return nil - } - - parseTreeMsg := parseResult.Stmts[0].Stmt.ProtoReflect() - err = queryparser.TraverseParseTree(parseTreeMsg, visited, processor) - assert.NoError(t, err) - // The detector should detect Advisory Locks in these queries - assert.Contains(t, unsupportedConstructs, ADVISORY_LOCKS, "Advisory Locks not detected in SQL: %s", sql) - } -} - -// TestColumnRefDetector tests the System Column Detector. -func TestColumnRefDetector(t *testing.T) { - systemColumnSqls := []string{ - `SELECT xmin, xmax FROM employees;`, - `SELECT * FROM (SELECT * FROM employees WHERE xmin = 100) AS version_info;`, - `SELECT * FROM (SELECT xmin, xmax FROM employees) AS version_info;`, - `SELECT * FROM employees WHERE xmin = 200;`, - `SELECT * FROM employees WHERE 1 = 1 AND xmax = 300;`, - `SELECT cmin - FROM employees;`, - `SELECT cmax - FROM employees;`, - `SELECT ctid, tableoid, xmin, xmax, cmin, cmax - FROM employees;`, - `WITH versioned_employees AS ( - SELECT *, xmin, xmax - FROM employees - ) - SELECT ve1.id, ve2.id - FROM versioned_employees ve1 - JOIN versioned_employees ve2 ON ve1.xmin = ve2.xmax - WHERE ve1.id <> ve2.id;`, - `SELECT e.id, e.name, - ROW_NUMBER() OVER (ORDER BY e.ctid) AS row_num - FROM employees e;`, - `SELECT * - FROM employees e - WHERE e.xmax = ( - SELECT MAX(xmax) - FROM employees - WHERE department = e.department - );`, - `UPDATE employees - SET salary = salary * 1.05 - WHERE department = 'Sales' - RETURNING id, xmax;`, - `SELECT xmin, COUNT(*) - FROM employees - GROUP BY xmin - HAVING COUNT(*) > 1;`, - } - - detector := NewColumnRefDetector() - - for _, sql := range systemColumnSqls { - parseResult, err := pg_query.Parse(sql) - assert.NoError(t, err, "Failed to parse SQL: %s", sql) - - visited := make(map[protoreflect.Message]bool) - unsupportedConstructs := []string{} - - processor := func(msg protoreflect.Message) error { - constructs, err := detector.Detect(msg) - if err != nil { - return err - } - unsupportedConstructs = append(unsupportedConstructs, constructs...) - return nil - } - - parseTreeMsg := parseResult.Stmts[0].Stmt.ProtoReflect() - err = queryparser.TraverseParseTree(parseTreeMsg, visited, processor) - assert.NoError(t, err) - // The detector should detect System Columns in these queries - assert.Contains(t, unsupportedConstructs, SYSTEM_COLUMNS, "System Columns not detected in SQL: %s", sql) - } -} - -// TestXmlExprDetector tests the XML Function Detection. -func TestXmlExprDetectorAndFuncCallDetector(t *testing.T) { - xmlFunctionSqls := []string{ - `SELECT id, xmlelement(name "employee", name) AS employee_data FROM employees;`, - `SELECT id, xpath('/person/name/text()', data) AS name FROM xml_example;`, - `SELECT id FROM employees WHERE xmlexists('/id' PASSING BY VALUE xmlcolumn);`, - `SELECT e.id, x.employee_xml - FROM employees e - JOIN ( - SELECT xmlelement(name "employee", xmlattributes(e.id AS "id"), e.name) AS employee_xml - FROM employees e - ) x ON x.employee_xml IS NOT NULL - WHERE xmlexists('//employee[name="John Doe"]' PASSING BY REF x.employee_xml);`, - `WITH xml_data AS ( - SELECT - id, - xml_column, - xpath('/root/element/@attribute', xml_column) as xpath_result - FROM xml_documents - ) - SELECT - x.id, - (xt.value).text as value - FROM - xml_data x - CROSS JOIN LATERAL unnest(x.xpath_result) as xt(value);`, - `SELECT e.id, e.name - FROM employees e - WHERE CASE - WHEN e.department = 'IT' THEN xmlexists('//access[@level="high"]' PASSING e.permissions) - ELSE FALSE - END;`, - `SELECT xmlserialize( - content xmlelement(name "employees", - xmlagg( - xmlelement(name "employee", - xmlattributes(e.id AS "id"), - e.name - ) - ) - ) AS CLOB - ) AS employees_xml - FROM employees e - WHERE e.status = 'active';`, - `CREATE VIEW employee_xml_view AS - SELECT e.id, - xmlelement(name "employee", - xmlattributes(e.id AS "id"), - e.name, - e.department - ) AS employee_xml - FROM employees e;`, - `SELECT xmltext('Widget') AS inventory_text -FROM inventory -WHERE id = 5;`, - `SELECT xmlforest(name, department) AS employee_info -FROM employees -WHERE id = 4;`, - // TODO: future - - // `SELECT xmltable.* - // FROM xmldata, - // XMLTABLE('//ROWS/ROW' - // PASSING data - // COLUMNS id int PATH '@id', - // ordinality FOR ORDINALITY, - // "COUNTRY_NAME" text, - // country_id text PATH 'COUNTRY_ID', - // size_sq_km float PATH 'SIZE[@unit = "sq_km"]', - // size_other text PATH - // 'concat(SIZE[@unit!="sq_km"], " ", SIZE[@unit!="sq_km"]/@unit)', - // premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');`, - // `SELECT xmltable.* - // FROM XMLTABLE(XMLNAMESPACES('http://example.com/myns' AS x, - // 'http://example.com/b' AS "B"), - // '/x:example/x:item' - // PASSING (SELECT data FROM xmldata) - // COLUMNS foo int PATH '@foo', - // bar int PATH '@B:bar');`, - `SELECT xml_is_well_formed_content('Alpha') AS is_well_formed_content -FROM projects -WHERE project_id = 10;`, - `SELECT xml_is_well_formed_document(xmlforest(name, department)) AS is_well_formed_document -FROM employees -WHERE id = 2;`, - `SELECT xml_is_well_formed(xmltext('Jane Doe')) AS is_well_formed -FROM employees -WHERE id = 1;`, - `SELECT xmlparse(DOCUMENT 'John');`, - `SELECT xpath_exists('/employee/name', 'John'::xml)`, - `SELECT table_to_xml('employees', TRUE, FALSE, '');`, - `SELECT query_to_xml('SELECT * FROM employees', TRUE, FALSE, '');`, - `SELECT schema_to_xml('public', TRUE, FALSE, '');`, - `SELECT database_to_xml(TRUE, TRUE, '');`, - `SELECT query_to_xmlschema('SELECT * FROM employees', TRUE, FALSE, '');`, - `SELECT table_to_xmlschema('employees', TRUE, FALSE, '');`, - `SELECT xmlconcat('value1'::xml, 'value2'::xml);`, - `SELECT xmlcomment('Sample XML comment');`, - `SELECT xmlpi(name php, 'echo "hello world";');`, - `SELECT xmlroot('content', VERSION '1.0');`, - `SELECT xmlagg('content');`, - `SELECT xmlexists('//some/path' PASSING BY REF '');`, - `SELECT table_to_xml_and_xmlschema('public', 'employees', true, false, '');`, - `SELECT * FROM cursor_to_xmlschema('foo_cursor', false, true,'');`, - `SELECT * FROM cursor_to_xml('foo_cursor', 1, false, false,'');`, - `SELECT query_to_xml_and_xmlschema('SELECT * FROM employees', true, false, '');`, - `SELECT schema_to_xmlschema('public', true, false, '');`, - `SELECT schema_to_xml_and_xmlschema('public', true, false, '');`, - `SELECT database_to_xmlschema(true, false, '');`, - `SELECT database_to_xml_and_xmlschema(true, false, '');`, - `SELECT xmlconcat2('Content', 'More Content');`, - `SELECT xmlvalidate('content');`, - `SELECT xml_in('input');`, - `SELECT xml_out('output');`, - `SELECT xml_recv('');`, - `SELECT xml_send('send');`, - } - - detectors := []UnsupportedConstructDetector{ - NewXmlExprDetector(), - NewFuncCallDetector(), - } - - for _, sql := range xmlFunctionSqls { - parseResult, err := pg_query.Parse(sql) - assert.NoError(t, err) - - visited := make(map[protoreflect.Message]bool) - unsupportedConstructs := []string{} - - processor := func(msg protoreflect.Message) error { - for _, detector := range detectors { - log.Debugf("running detector %T", detector) - constructs, err := detector.Detect(msg) - if err != nil { - log.Debugf("error in detector %T: %v", detector, err) - return fmt.Errorf("error in detectors %T: %w", detector, err) - } - unsupportedConstructs = lo.Union(unsupportedConstructs, constructs) - } - return nil - } - - parseTreeMsg := parseResult.Stmts[0].Stmt.ProtoReflect() - err = queryparser.TraverseParseTree(parseTreeMsg, visited, processor) - assert.NoError(t, err) - // The detector should detect XML Functions in these queries - assert.Contains(t, unsupportedConstructs, XML_FUNCTIONS, "XML Functions not detected in SQL: %s", sql) - } -} - -// Combination of: FuncCallDetector, ColumnRefDetector, XmlExprDetector -func TestCombinationOfDetectors1(t *testing.T) { - combinationSqls := []string{ - `WITH LockedEmployees AS ( - SELECT *, pg_advisory_lock(xmin) AS lock_acquired - FROM employees - WHERE pg_try_advisory_lock(xmin) IS TRUE -) -SELECT xmlelement(name "EmployeeData", xmlagg( - xmlelement(name "Employee", xmlattributes(id AS "ID"), - xmlforest(name AS "Name", xmin AS "TransactionID", xmax AS "ModifiedID")))) -FROM LockedEmployees -WHERE xmax IS NOT NULL;`, - `WITH Data AS ( - SELECT id, name, xmin, xmax, - pg_try_advisory_lock(id) AS lock_status, - xmlelement(name "info", xmlforest(name as "name", xmin as "transaction_start", xmax as "transaction_end")) as xml_info - FROM projects - WHERE xmin > 100 AND xmax < 500 -) -SELECT x.id, x.xml_info -FROM Data x -WHERE x.lock_status IS TRUE;`, - `UPDATE employees -SET salary = salary * 1.1 -WHERE pg_try_advisory_xact_lock(ctid) IS TRUE AND department = 'Engineering' -RETURNING id, - xmlelement(name "UpdatedEmployee", - xmlattributes(id AS "ID"), - xmlforest(name AS "Name", salary AS "NewSalary", xmin AS "TransactionStartID", xmax AS "TransactionEndID"));`, - } - expectedConstructs := []string{ADVISORY_LOCKS, SYSTEM_COLUMNS, XML_FUNCTIONS} - - detectors := []UnsupportedConstructDetector{ - NewFuncCallDetector(), - NewColumnRefDetector(), - NewXmlExprDetector(), - } - for _, sql := range combinationSqls { - parseResult, err := pg_query.Parse(sql) - assert.NoError(t, err) - - visited := make(map[protoreflect.Message]bool) - unsupportedConstructs := []string{} - - processor := func(msg protoreflect.Message) error { - for _, detector := range detectors { - log.Debugf("running detector %T", detector) - constructs, err := detector.Detect(msg) - if err != nil { - log.Debugf("error in detector %T: %v", detector, err) - return fmt.Errorf("error in detectors %T: %w", detector, err) - } - unsupportedConstructs = lo.Union(unsupportedConstructs, constructs) - } - return nil - } - - parseTreeMsg := parseResult.Stmts[0].Stmt.ProtoReflect() - err = queryparser.TraverseParseTree(parseTreeMsg, visited, processor) - assert.NoError(t, err) - - sort.Strings(unsupportedConstructs) - sort.Strings(expectedConstructs) - assert.Equal(t, expectedConstructs, unsupportedConstructs, "Detected constructs do not exactly match the expected constructs") - } -} diff --git a/yb-voyager/src/queryparser/helpers.go b/yb-voyager/src/queryparser/helpers.go deleted file mode 100644 index 9eca161b04..0000000000 --- a/yb-voyager/src/queryparser/helpers.go +++ /dev/null @@ -1,95 +0,0 @@ -package queryparser - -import "google.golang.org/protobuf/reflect/protoreflect" - -const ( - DOCS_LINK_PREFIX = "https://docs.yugabyte.com/preview/yugabyte-voyager/known-issues/" - POSTGRESQL_PREFIX = "postgresql/" - ADVISORY_LOCKS_DOC_LINK = DOCS_LINK_PREFIX + POSTGRESQL_PREFIX + "#advisory-locks-is-not-yet-implemented" - SYSTEM_COLUMNS_DOC_LINK = DOCS_LINK_PREFIX + POSTGRESQL_PREFIX + "#system-columns-is-not-yet-supported" - XML_FUNCTIONS_DOC_LINK = DOCS_LINK_PREFIX + POSTGRESQL_PREFIX + "#xml-functions-is-not-yet-supported" -) - -// Sample example: {func_call:{funcname:{string:{sval:"pg_advisory_lock"}} -func GetFuncNameFromFuncCall(funcCallNode protoreflect.Message) string { - if GetMsgFullName(funcCallNode) != PG_QUERY_FUNCCALL_NODE { - return "" - } - - funcnameField := funcCallNode.Get(funcCallNode.Descriptor().Fields().ByName("funcname")) - funcnameList := funcnameField.List() - var names []string - - // TODO: simplification to directly access last item of funcnameList - for i := 0; i < funcnameList.Len(); i++ { - item := funcnameList.Get(i) - name := GetStringValueFromNode(item.Message()) - if name != "" { - names = append(names, name) - } - } - if len(names) == 0 { - return "" - } - return names[len(names)-1] // ignoring schema_name -} - -// Sample example:: {column_ref:{fields:{string:{sval:"xmax"}} -func GetColNameFromColumnRef(columnRefNode protoreflect.Message) string { - if GetMsgFullName(columnRefNode) != PG_QUERY_COLUMNREF_NODE { - return "" - } - - fields := columnRefNode.Get(columnRefNode.Descriptor().Fields().ByName("fields")) - fieldsList := fields.List() - var names []string - - // TODO: simplification to directly access last item of fieldsList - for i := 0; i < fieldsList.Len(); i++ { - item := fieldsList.Get(i) - name := GetStringValueFromNode(item.Message()) - if name != "" { - names = append(names, name) - } - } - if len(names) == 0 { - return "" - } - return names[len(names)-1] // ignoring schema_name -} - -// Sample example:: {column_ref:{fields:{string:{sval:"s"}} fields:{string:{sval:"tableoid"}} location:7} -func GetStringValueFromNode(nodeMsg protoreflect.Message) string { - if nodeMsg == nil || !nodeMsg.IsValid() { - return "" - } - - // 'nodeMsg' is a 'pg_query.Node' getting the set field in the 'node' oneof - nodeField := nodeMsg.WhichOneof(nodeMsg.Descriptor().Oneofs().ByName("node")) - if nodeField == nil { - return "" - } - - nodeValue := nodeMsg.Get(nodeField) - node := nodeValue.Message() - if node == nil || !node.IsValid() { - return "" - } - - nodeType := node.Descriptor().FullName() - switch nodeType { - case PG_QUERY_STRING_NODE: - strField := node.Descriptor().Fields().ByName("sval") - strValue := node.Get(strField) - return strValue.String() - // example: SELECT * FROM employees; - case PG_QUERY_ASTAR_NODE: - return "" - default: - return "" - } -} - -func GetMsgFullName(msg protoreflect.Message) string { - return string(msg.Descriptor().FullName()) -} diff --git a/yb-voyager/src/queryparser/query_parser.go b/yb-voyager/src/queryparser/query_parser.go deleted file mode 100644 index f90e6faabc..0000000000 --- a/yb-voyager/src/queryparser/query_parser.go +++ /dev/null @@ -1,20 +0,0 @@ -package queryparser - -import ( - pg_query "github.com/pganalyze/pg_query_go/v5" - log "github.com/sirupsen/logrus" - "google.golang.org/protobuf/reflect/protoreflect" -) - -func Parse(query string) (*pg_query.ParseResult, error) { - log.Debugf("parsing the query [%s]", query) - tree, err := pg_query.Parse(query) - if err != nil { - return nil, err - } - return tree, nil -} - -func GetProtoMessageFromParseTree(parseTree *pg_query.ParseResult) protoreflect.Message { - return parseTree.Stmts[0].Stmt.ProtoReflect() -} diff --git a/yb-voyager/src/srcdb/common.go b/yb-voyager/src/srcdb/common.go index 0d20ead96a..2035d537a7 100644 --- a/yb-voyager/src/srcdb/common.go +++ b/yb-voyager/src/srcdb/common.go @@ -38,7 +38,7 @@ func getExportedDataFileList(tablesMetadata map[string]*utils.TableProgressMetad targetTableName := strings.TrimSuffix(filepath.Base(tableMetadata.FinalFilePath), "_data.sql") table, err := namereg.NameReg.LookupTableName(targetTableName) if err != nil { - utils.ErrExit("error while looking up table name %q: %v", targetTableName, err) + utils.ErrExit("error while looking up table name: %q: %v", targetTableName, err) } if !utils.FileOrFolderExists(tableMetadata.FinalFilePath) { // This can happen in case of nested tables in Oracle. diff --git a/yb-voyager/src/srcdb/data/gather-assessment-metadata.tar.gz b/yb-voyager/src/srcdb/data/gather-assessment-metadata.tar.gz index 166bfb4887..1d00bd0b12 100644 Binary files a/yb-voyager/src/srcdb/data/gather-assessment-metadata.tar.gz and b/yb-voyager/src/srcdb/data/gather-assessment-metadata.tar.gz differ diff --git a/yb-voyager/src/srcdb/data/gather-assessment-metadata/oracle/yb-voyager-oracle-gather-assessment-metadata.sh b/yb-voyager/src/srcdb/data/gather-assessment-metadata/oracle/yb-voyager-oracle-gather-assessment-metadata.sh index 7550d8f765..5f30e93410 100755 --- a/yb-voyager/src/srcdb/data/gather-assessment-metadata/oracle/yb-voyager-oracle-gather-assessment-metadata.sh +++ b/yb-voyager/src/srcdb/data/gather-assessment-metadata/oracle/yb-voyager-oracle-gather-assessment-metadata.sh @@ -269,7 +269,7 @@ main() { run_command "$ora2pg_cmd" done - ora2pg_report_cmd="ora2pg -t show_report --estimate_cost -c $OUTPUT_FILE_PATH --dump_as_sheet --quiet > $assessment_metadata_dir/schema/ora2pg_report.csv" + ora2pg_report_cmd="ora2pg -t show_report --estimate_cost -c $OUTPUT_FILE_PATH --dump_as_sheet --quiet > $assessment_metadata_dir/schema/ora2pg_report.csv" log "INFO" "executing ora2pg command for report: $ora2pg_report_cmd" run_command "$ora2pg_report_cmd" diff --git a/yb-voyager/src/srcdb/data/gather-assessment-metadata/postgresql/db-queries-summary.psql b/yb-voyager/src/srcdb/data/gather-assessment-metadata/postgresql/db-queries-summary.psql index f63695ea91..02cab1e132 100644 --- a/yb-voyager/src/srcdb/data/gather-assessment-metadata/postgresql/db-queries-summary.psql +++ b/yb-voyager/src/srcdb/data/gather-assessment-metadata/postgresql/db-queries-summary.psql @@ -3,7 +3,7 @@ SELECT queryid, query FROM - pg_stat_statements + :schema_name.pg_stat_statements WHERE dbid = (SELECT oid FROM pg_database WHERE datname = current_database()); diff --git a/yb-voyager/src/srcdb/data/gather-assessment-metadata/postgresql/table-columns-data-types.psql b/yb-voyager/src/srcdb/data/gather-assessment-metadata/postgresql/table-columns-data-types.psql index b448e66297..6e8978342d 100644 --- a/yb-voyager/src/srcdb/data/gather-assessment-metadata/postgresql/table-columns-data-types.psql +++ b/yb-voyager/src/srcdb/data/gather-assessment-metadata/postgresql/table-columns-data-types.psql @@ -7,6 +7,7 @@ SELECT CASE WHEN c.data_type = 'ARRAY' THEN c.udt_schema || '.' || SUBSTRING(c.udt_name FROM 2) || '[]' -- Removes leading _ for array types and appends [] WHEN c.data_type = 'USER-DEFINED' THEN c.udt_schema || '.' || c.udt_name -- in case of User defined types using udt_name + WHEN c.domain_name <> '' THEN c.domain_name -- in case of datatype is a domain name expected type is domain_name e.g "lo" is a domain name over oid https://www.postgresql.org/docs/current/lo.html#LO-RATIONALE:~:text=The%20module%20also%20provides%20a%20data%20type%20lo%2C%20which%20is%20really%20just%20a%20domain%20over%20the%20oid%20type ELSE c.data_type -- in native type cases using data_type END AS data_type FROM diff --git a/yb-voyager/src/srcdb/data/gather-assessment-metadata/postgresql/yb-voyager-pg-gather-assessment-metadata.sh b/yb-voyager/src/srcdb/data/gather-assessment-metadata/postgresql/yb-voyager-pg-gather-assessment-metadata.sh index 00a6cff831..01e1b6b55e 100755 --- a/yb-voyager/src/srcdb/data/gather-assessment-metadata/postgresql/yb-voyager-pg-gather-assessment-metadata.sh +++ b/yb-voyager/src/srcdb/data/gather-assessment-metadata/postgresql/yb-voyager-pg-gather-assessment-metadata.sh @@ -35,12 +35,13 @@ Arguments: assessment_metadata_dir The directory path where the assessment metadata will be stored. This script will attempt to create the directory if it does not exist. - iops_capture_interval This argument is used to configure the interval for measuring the IOPS - metadata on source (in seconds). (Default 120) - + pgss_enabled Determine whether the pg_stat_statements extension is correctly installed, + configured, and enabled on the source database. + + iops_capture_interval Configure the interval for measuring the IOPS metadata on source (in seconds). (Default 120) Example: - PGPASSWORD= $SCRIPT_NAME 'postgresql://user@localhost:5432/mydatabase' 'public|sales' '/path/to/assessment/metadata' '60' + PGPASSWORD= $SCRIPT_NAME 'postgresql://user@localhost:5432/mydatabase' 'public|sales' '/path/to/assessment/metadata' 'true' '60' Please ensure to replace the placeholders with actual values suited to your environment. " @@ -52,22 +53,14 @@ if [ "$1" == "--help" ]; then fi # Check if all required arguments are provided -if [ "$#" -lt 3 ]; then - echo "Usage: $0 [iops_capture_interval]" +if [ "$#" -lt 4 ]; then + echo "Usage: $0 [iops_capture_interval]" exit 1 -elif [ "$#" -gt 4 ]; then - echo "Usage: $0 [iops_capture_interval]" +elif [ "$#" -gt 5 ]; then + echo "Usage: $0 [iops_capture_interval]" exit 1 fi -# Set default iops interval -iops_capture_interval=120 -# Override default sleep interval if a fourth argument is provided -if [ "$#" -eq 4 ]; then - iops_capture_interval=$4 - echo "sleep interval for calculating iops: $iops_capture_interval seconds" -fi - pg_connection_string=$1 schema_list=$2 assessment_metadata_dir=$3 @@ -77,6 +70,16 @@ if [ ! -d "$assessment_metadata_dir" ]; then exit 1 fi +pgss_enabled=$4 +iops_capture_interval=120 # default sleep for calculating iops +# Override default sleep interval if a fifth argument is provided +if [ "$#" -eq 5 ]; then + iops_capture_interval=$5 + echo "sleep interval for calculating iops: $iops_capture_interval seconds" +fi + + + LOG_FILE=$assessment_metadata_dir/yb-voyager-assessment.log log() { local level="$1" @@ -118,6 +121,29 @@ run_command() { fi } + +# Function to convert schema list to an array and ensure 'public' is included +prepare_schema_array() { + local schema_list=$1 + local -a schema_array + + # Convert the schema list (pipe-separated) to an array + IFS='|' read -r -a schema_array <<< "$schema_list" + local public_found=false + for schema in "${schema_array[@]}"; do + if [[ "$schema" == "public" ]]; then + public_found=true + break + fi + done + + if [[ $public_found == false ]]; then + schema_array+=("public") + fi + + echo "${schema_array[*]}" +} + main() { # Resolve the absolute path of assessment_metadata_dir assessment_metadata_dir=$(cd "$assessment_metadata_dir" && pwd) @@ -147,45 +173,61 @@ main() { fi # checking before quoting connection_string - pg_stat_available=$(psql -A -t -q $pg_connection_string -c "SELECT 1 FROM pg_extension WHERE extname = 'pg_stat_statements'") + pgss_ext_schema=$(psql -A -t -q $pg_connection_string -c "SELECT nspname FROM pg_extension e, pg_namespace n WHERE e.extnamespace = n.oid AND e.extname = 'pg_stat_statements'") + log "INFO" "pg_stat_statements extension is available in schema: $pgss_ext_schema" + + schema_array=$(prepare_schema_array $schema_list) + log "INFO" "schema_array for checking pgss_ext_schema: $schema_array" # quote the required shell variables pg_connection_string=$(quote_string "$pg_connection_string") schema_list=$(quote_string "$schema_list") - print_and_log "INFO" "Assessment metadata collection started for '$schema_list' schemas" + print_and_log "INFO" "Assessment metadata collection started for $schema_list schema(s)" for script in $SCRIPT_DIR/*.psql; do script_name=$(basename "$script" .psql) script_action=$(basename "$script" .psql | sed 's/-/ /g') - if [[ "$script_name" == "db-queries-summary" ]]; then - if [[ "$REPORT_UNSUPPORTED_QUERY_CONSTRUCTS" == "false" ]]; then - continue - fi - if [[ "$pg_stat_available" != "1" ]]; then - print_and_log "INFO" "Skipping $script_action: pg_stat_statements is unavailable." - continue - fi - fi + print_and_log "INFO" "Collecting $script_action..." - if [ $script_name == "table-index-iops" ]; then - psql_command="psql -q $pg_connection_string -f $script -v schema_list=$schema_list -v ON_ERROR_STOP=on -v measurement_type=initial" - log "INFO" "Executing initial IOPS collection: $psql_command" - run_command "$psql_command" - mv table-index-iops.csv table-index-iops-initial.csv - - log "INFO" "Sleeping for $iops_capture_interval seconds to capture IOPS data" - # sleeping to calculate the iops reading two different time intervals, to calculate reads_per_second and writes_per_second - sleep $iops_capture_interval - - psql_command="psql -q $pg_connection_string -f $script -v schema_list=$schema_list -v ON_ERROR_STOP=on -v measurement_type=final" - log "INFO" "Executing final IOPS collection: $psql_command" - run_command "$psql_command" - mv table-index-iops.csv table-index-iops-final.csv - else - psql_command="psql -q $pg_connection_string -f $script -v schema_list=$schema_list -v ON_ERROR_STOP=on" - log "INFO" "Executing script: $psql_command" - run_command "$psql_command" - fi + + case $script_name in + "table-index-iops") + psql_command="psql -q $pg_connection_string -f $script -v schema_list=$schema_list -v ON_ERROR_STOP=on -v measurement_type=initial" + log "INFO" "Executing initial IOPS collection: $psql_command" + run_command "$psql_command" + mv table-index-iops.csv table-index-iops-initial.csv + + log "INFO" "Sleeping for $iops_capture_interval seconds to capture IOPS data" + # sleeping to calculate the iops reading two different time intervals, to calculate reads_per_second and writes_per_second + sleep $iops_capture_interval + + psql_command="psql -q $pg_connection_string -f $script -v schema_list=$schema_list -v ON_ERROR_STOP=on -v measurement_type=final" + log "INFO" "Executing final IOPS collection: $psql_command" + run_command "$psql_command" + mv table-index-iops.csv table-index-iops-final.csv + ;; + "db-queries-summary") + if [[ "$REPORT_UNSUPPORTED_QUERY_CONSTRUCTS" == "false" ]]; then + print_and_log "INFO" "Skipping $script_action: Reporting of unsupported query constructs is disabled." + continue + fi + + log "INFO" "argument pgss_enabled=$pgss_enabled" + if [[ "$pgss_enabled" == "false" ]]; then + print_and_log "WARN" "Skipping $script_action: argument pgss_enabled is set as false" + continue + fi + + psql_command="psql -q $pg_connection_string -f $script -v schema_name=$pgss_ext_schema -v ON_ERROR_STOP=on" + log "INFO" "Executing script: $psql_command" + run_command "$psql_command" + ;; + *) + psql_command="psql -q $pg_connection_string -f $script -v schema_list=$schema_list -v ON_ERROR_STOP=on" + log "INFO" "Executing script: $psql_command" + run_command "$psql_command" + ;; + esac done # check for pg_dump version diff --git a/yb-voyager/src/srcdb/main_test.go b/yb-voyager/src/srcdb/main_test.go new file mode 100644 index 0000000000..770855ad60 --- /dev/null +++ b/yb-voyager/src/srcdb/main_test.go @@ -0,0 +1,176 @@ +//go:build integration + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package srcdb + +import ( + "context" + "os" + "testing" + + _ "github.com/godror/godror" + _ "github.com/jackc/pgx/v5/stdlib" + log "github.com/sirupsen/logrus" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" + testcontainers "github.com/yugabyte/yb-voyager/yb-voyager/test/containers" +) + +type TestDB struct { + testcontainers.TestContainer + *Source +} + +var ( + testPostgresSource *TestDB + testOracleSource *TestDB + testMySQLSource *TestDB + testYugabyteDBSource *TestDB +) + +func TestMain(m *testing.M) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + postgresContainer := testcontainers.NewTestContainer("postgresql", nil) + err := postgresContainer.Start(ctx) + if err != nil { + utils.ErrExit("Failed to start postgres container: %v", err) + } + host, port, err := postgresContainer.GetHostPort() + if err != nil { + utils.ErrExit("%v", err) + } + testPostgresSource = &TestDB{ + TestContainer: postgresContainer, + Source: &Source{ + DBType: "postgresql", + DBVersion: postgresContainer.GetConfig().DBVersion, + User: postgresContainer.GetConfig().User, + Password: postgresContainer.GetConfig().Password, + Schema: postgresContainer.GetConfig().Schema, + DBName: postgresContainer.GetConfig().DBName, + Host: host, + Port: port, + SSLMode: "disable", + }, + } + err = testPostgresSource.DB().Connect() + if err != nil { + utils.ErrExit("Failed to connect to postgres database: %w", err) + } + defer testPostgresSource.DB().Disconnect() + + oracleContainer := testcontainers.NewTestContainer("oracle", nil) + err = oracleContainer.Start(ctx) + if err != nil { + utils.ErrExit("Failed to start oracle container: %v", err) + } + host, port, err = oracleContainer.GetHostPort() + if err != nil { + utils.ErrExit("%v", err) + } + + testOracleSource = &TestDB{ + TestContainer: oracleContainer, + Source: &Source{ + DBType: "oracle", + DBVersion: oracleContainer.GetConfig().DBVersion, + User: oracleContainer.GetConfig().User, + Password: oracleContainer.GetConfig().Password, + Schema: oracleContainer.GetConfig().Schema, + DBName: oracleContainer.GetConfig().DBName, + Host: host, + Port: port, + }, + } + + err = testOracleSource.DB().Connect() + if err != nil { + utils.ErrExit("Failed to connect to oracle database: %w", err) + } + defer testOracleSource.DB().Disconnect() + + mysqlContainer := testcontainers.NewTestContainer("mysql", nil) + err = mysqlContainer.Start(ctx) + if err != nil { + utils.ErrExit("Failed to start mysql container: %v", err) + } + host, port, err = mysqlContainer.GetHostPort() + if err != nil { + utils.ErrExit("%v", err) + } + testMySQLSource = &TestDB{ + TestContainer: mysqlContainer, + Source: &Source{ + DBType: "mysql", + DBVersion: mysqlContainer.GetConfig().DBVersion, + User: mysqlContainer.GetConfig().User, + Password: mysqlContainer.GetConfig().Password, + Schema: mysqlContainer.GetConfig().Schema, + DBName: mysqlContainer.GetConfig().DBName, + Host: host, + Port: port, + SSLMode: "disable", + }, + } + + err = testMySQLSource.DB().Connect() + if err != nil { + utils.ErrExit("Failed to connect to mysql database: %w", err) + } + defer testMySQLSource.DB().Disconnect() + + yugabytedbContainer := testcontainers.NewTestContainer("yugabytedb", nil) + err = yugabytedbContainer.Start(ctx) + if err != nil { + utils.ErrExit("Failed to start yugabytedb container: %v", err) + } + host, port, err = yugabytedbContainer.GetHostPort() + if err != nil { + utils.ErrExit("%v", err) + } + testYugabyteDBSource = &TestDB{ + TestContainer: yugabytedbContainer, + Source: &Source{ + DBType: "yugabytedb", + DBVersion: yugabytedbContainer.GetConfig().DBVersion, + User: yugabytedbContainer.GetConfig().User, + Password: yugabytedbContainer.GetConfig().Password, + Schema: yugabytedbContainer.GetConfig().Schema, + DBName: yugabytedbContainer.GetConfig().DBName, + Host: host, + Port: port, + SSLMode: "disable", + }, + } + + err = testYugabyteDBSource.DB().Connect() + if err != nil { + utils.ErrExit("Failed to connect to yugabytedb database: %w", err) + } + defer testYugabyteDBSource.DB().Disconnect() + + // to avoid info level logs flooding the test output + log.SetLevel(log.WarnLevel) + + exitCode := m.Run() + + // cleanig up all the running containers + testcontainers.TerminateAllContainers() + + os.Exit(exitCode) +} diff --git a/yb-voyager/src/srcdb/mysql.go b/yb-voyager/src/srcdb/mysql.go index 216821045e..3090682d18 100644 --- a/yb-voyager/src/srcdb/mysql.go +++ b/yb-voyager/src/srcdb/mysql.go @@ -47,7 +47,7 @@ func newMySQL(s *Source) *MySQL { func (ms *MySQL) Connect() error { db, err := sql.Open("mysql", ms.getConnectionUri()) - db.SetMaxOpenConns(1) + db.SetMaxOpenConns(ms.source.NumConnections) db.SetConnMaxIdleTime(5 * time.Minute) ms.db = db return err @@ -71,21 +71,17 @@ func (ms *MySQL) CheckSchemaExists() bool { return true } -func (ms *MySQL) CheckRequiredToolsAreInstalled() { - checkTools("ora2pg") -} - -func (ms *MySQL) GetTableRowCount(tableName sqlname.NameTuple) int64 { +func (ms *MySQL) GetTableRowCount(tableName sqlname.NameTuple) (int64, error) { var rowCount int64 query := fmt.Sprintf("select count(*) from %s", tableName.AsQualifiedCatalogName()) log.Infof("Querying row count of table %s", tableName) err := ms.db.QueryRow(query).Scan(&rowCount) if err != nil { - utils.ErrExit("Failed to query %q for row count of %q: %s", query, tableName, err) + return 0, fmt.Errorf("query %q for row count of %q: %w", query, tableName, err) } log.Infof("Table %q has %v rows.", tableName, rowCount) - return rowCount + return rowCount, nil } func (ms *MySQL) GetTableApproxRowCount(tableName sqlname.NameTuple) int64 { @@ -98,7 +94,7 @@ func (ms *MySQL) GetTableApproxRowCount(tableName sqlname.NameTuple) int64 { log.Infof("Querying '%s' approx row count of table %q", query, tableName.String()) err := ms.db.QueryRow(query).Scan(&approxRowCount) if err != nil { - utils.ErrExit("Failed to query %q for approx row count of %q: %s", query, tableName.String(), err) + utils.ErrExit("Failed to query for approx row count of table: %q: %q %s", tableName.String(), query, err) } log.Infof("Table %q has approx %v rows.", tableName.String(), approxRowCount) @@ -114,7 +110,7 @@ func (ms *MySQL) GetVersion() string { query := "SELECT VERSION()" err := ms.db.QueryRow(query).Scan(&version) if err != nil { - utils.ErrExit("run query %q on source: %s", query, err) + utils.ErrExit("run query: %q on source: %s", query, err) } ms.source.DBVersion = version return version @@ -375,10 +371,6 @@ func (ms *MySQL) ParentTableOfPartition(table sqlname.NameTuple) string { panic("not implemented") } -func (ms *MySQL) ValidateTablesReadyForLiveMigration(tableList []sqlname.NameTuple) error { - panic("not implemented") -} - /* Only valid case is when the table has a auto increment column Note: a mysql table can have only one auto increment column @@ -396,7 +388,7 @@ func (ms *MySQL) GetColumnToSequenceMap(tableList []sqlname.NameTuple) map[strin var columnName string rows, err := ms.db.Query(query) if err != nil { - utils.ErrExit("Failed to query %q for auto increment column of %q: %s", query, table.String(), err) + utils.ErrExit("Failed to query for auto increment column: query:%q table: %q: %s", query, table.String(), err) } defer func() { closeErr := rows.Close() @@ -407,7 +399,7 @@ func (ms *MySQL) GetColumnToSequenceMap(tableList []sqlname.NameTuple) map[strin if rows.Next() { err = rows.Scan(&columnName) if err != nil { - utils.ErrExit("Failed to scan %q for auto increment column of %q: %s", query, table.String(), err) + utils.ErrExit("Failed to scan for auto increment column: query: %q table: %q: %s", query, table.String(), err) } qualifiedColumeName := fmt.Sprintf("%s.%s", table.AsQualifiedCatalogName(), columnName) // sequence name as per PG naming convention for bigserial datatype's sequence @@ -416,7 +408,7 @@ func (ms *MySQL) GetColumnToSequenceMap(tableList []sqlname.NameTuple) map[strin } err = rows.Close() if err != nil { - utils.ErrExit("close rows for table %s query %q: %s", table.String(), query, err) + utils.ErrExit("close rows for table: %s query %q: %s", table.String(), query, err) } } return columnToSequenceMap @@ -534,11 +526,11 @@ func (ms *MySQL) CheckSourceDBVersion(exportType string) error { return nil } -func (ms *MySQL) GetMissingExportSchemaPermissions() ([]string, error) { +func (ms *MySQL) GetMissingExportSchemaPermissions(queryTableList string) ([]string, error) { return nil, nil } -func (ms *MySQL) GetMissingExportDataPermissions(exportType string) ([]string, error) { +func (ms *MySQL) GetMissingExportDataPermissions(exportType string, finalTableList []sqlname.NameTuple) ([]string, error) { return nil, nil } @@ -546,6 +538,10 @@ func (ms *MySQL) CheckIfReplicationSlotsAreAvailable() (isAvailable bool, usedCo return false, 0, 0, nil } -func (ms *MySQL) GetMissingAssessMigrationPermissions() ([]string, error) { +func (ms *MySQL) GetMissingAssessMigrationPermissions() ([]string, bool, error) { + return nil, false, nil +} + +func (ms *MySQL) GetSchemasMissingUsagePermissions() ([]string, error) { return nil, nil } diff --git a/yb-voyager/src/srcdb/mysql_test.go b/yb-voyager/src/srcdb/mysql_test.go new file mode 100644 index 0000000000..db74724162 --- /dev/null +++ b/yb-voyager/src/srcdb/mysql_test.go @@ -0,0 +1,89 @@ +//go:build integration + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package srcdb + +import ( + "testing" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" + "gotest.tools/assert" +) + +func TestMysqlGetAllTableNames(t *testing.T) { + testMySQLSource.ExecuteSqls( + `CREATE DATABASE test;`, + `CREATE TABLE test.foo ( + id INT PRIMARY KEY, + name VARCHAR(255) + );`, + `CREATE TABLE test.bar ( + id INT PRIMARY KEY, + name VARCHAR(255) + );`, + `CREATE TABLE test.non_pk1( + id INT, + name VARCHAR(255) + );`) + defer testMySQLSource.ExecuteSqls(`DROP DATABASE test;`) + + sqlname.SourceDBType = "mysql" + testMySQLSource.Source.DBName = "test" // used in query of GetAllTableNames() + + // Test GetAllTableNames + actualTables := testMySQLSource.DB().GetAllTableNames() + expectedTables := []*sqlname.SourceName{ + sqlname.NewSourceName("test", "foo"), + sqlname.NewSourceName("test", "bar"), + sqlname.NewSourceName("test", "non_pk1"), + } + assert.Equal(t, len(expectedTables), len(actualTables), "Expected number of tables to match") + + testutils.AssertEqualSourceNameSlices(t, expectedTables, actualTables) +} + +// TODO: Seems like a Bug somwhere, because now mysql.GetAllNonPkTables() as it is returning all the tables created in this test +// func TestMySQLGetNonPKTables(t *testing.T) { +// testMySQLSource.ExecuteSqls( +// `CREATE DATABASE test;`, +// `CREATE TABLE test.table1 ( +// id INT AUTO_INCREMENT PRIMARY KEY, +// name VARCHAR(100) +// );`, +// `CREATE TABLE test.table2 ( +// id INT AUTO_INCREMENT PRIMARY KEY, +// email VARCHAR(100) +// );`, +// `CREATE TABLE test.non_pk1( +// id INT, +// name VARCHAR(255) +// );`, +// `CREATE TABLE test.non_pk2( +// id INT, +// name VARCHAR(255) +// );`) +// defer testMySQLSource.ExecuteSqls(`DROP DATABASE test;`) + +// testMySQLSource.Source.DBName = "test" +// actualTables, err := testMySQLSource.DB().GetNonPKTables() +// assert.NilError(t, err, "Expected nil but non nil error: %v", err) + +// expectedTables := []string{"test.non_pk1", "test.non_pk2"} + +// testutils.AssertEqualStringSlices(t, expectedTables, actualTables) +// } diff --git a/yb-voyager/src/srcdb/ora2pg.go b/yb-voyager/src/srcdb/ora2pg.go index 98d8272d68..bc299a9c5d 100644 --- a/yb-voyager/src/srcdb/ora2pg.go +++ b/yb-voyager/src/srcdb/ora2pg.go @@ -113,7 +113,7 @@ func populateOra2pgConfigFile(configFilePath string, conf *Ora2pgConfig) { err = os.WriteFile(configFilePath, output.Bytes(), 0644) if err != nil { - utils.ErrExit("unable to update config file %q: %v\n", configFilePath, err) + utils.ErrExit("unable to update config file: %q: %v\n", configFilePath, err) } } diff --git a/yb-voyager/src/srcdb/ora2pg_export_data.go b/yb-voyager/src/srcdb/ora2pg_export_data.go index 4357b4fa9e..d0f6dfe6e2 100644 --- a/yb-voyager/src/srcdb/ora2pg_export_data.go +++ b/yb-voyager/src/srcdb/ora2pg_export_data.go @@ -118,7 +118,7 @@ func getIdentityColumnSequences(exportDir string) []string { filePath := filepath.Join(exportDir, "data", "postdata.sql") bytes, err := os.ReadFile(filePath) if err != nil { - utils.ErrExit("unable to read file %q: %v\n", filePath, err) + utils.ErrExit("unable to read file: %q: %v\n", filePath, err) } lines := strings.Split(string(bytes), "\n") @@ -137,7 +137,7 @@ func replaceAllIdentityColumns(exportDir string, sourceTargetIdentitySequenceNam filePath := filepath.Join(exportDir, "data", "postdata.sql") bytes, err := os.ReadFile(filePath) if err != nil { - utils.ErrExit("unable to read file %q: %v\n", filePath, err) + utils.ErrExit("unable to read file: %q: %v\n", filePath, err) } lines := strings.Split(string(bytes), "\n") @@ -160,7 +160,7 @@ func replaceAllIdentityColumns(exportDir string, sourceTargetIdentitySequenceNam err = os.WriteFile(filePath, bytesToWrite, 0644) if err != nil { - utils.ErrExit("unable to write file %q: %v\n", filePath, err) + utils.ErrExit("unable to write file: %q: %v\n", filePath, err) } } @@ -180,7 +180,7 @@ func renameDataFilesForReservedWords(tablesProgressMetadata map[string]*utils.Ta log.Infof("Renaming %q -> %q", oldFilePath, newFilePath) err := os.Rename(oldFilePath, newFilePath) if err != nil { - utils.ErrExit("renaming data file for table %q after data export: %v", tblNameQuoted, err) + utils.ErrExit("renaming data file for table after data export: %q: %v", tblNameQuoted, err) } tableProgressMetadata.FinalFilePath = newFilePath } else { @@ -214,7 +214,7 @@ func getOra2pgExportedColumnsListForTable(exportDir, tableName, filePath string) return false // stop reading file }) if err != nil { - utils.ErrExit("error in reading file %q: %v", filePath, err) + utils.ErrExit("error in reading file: %q: %v", filePath, err) } log.Infof("columns list for table %s: %v", tableName, columnsList) return columnsList diff --git a/yb-voyager/src/srcdb/oracle.go b/yb-voyager/src/srcdb/oracle.go index 2af042e9cf..32de02bfa7 100644 --- a/yb-voyager/src/srcdb/oracle.go +++ b/yb-voyager/src/srcdb/oracle.go @@ -47,7 +47,7 @@ func newOracle(s *Source) *Oracle { func (ora *Oracle) Connect() error { db, err := sql.Open("godror", ora.getConnectionUri()) - db.SetMaxOpenConns(1) + db.SetMaxOpenConns(ora.source.NumConnections) db.SetConnMaxIdleTime(5 * time.Minute) ora.db = db return err @@ -73,26 +73,22 @@ func (ora *Oracle) CheckSchemaExists() bool { if err == sql.ErrNoRows { return false } else if err != nil { - utils.ErrExit("error in querying source database for schema %q: %v\n", schemaName, err) + utils.ErrExit("error in querying source database for schema: %q: %v\n", schemaName, err) } return true } -func (ora *Oracle) CheckRequiredToolsAreInstalled() { - checkTools("ora2pg", "sqlplus") -} - -func (ora *Oracle) GetTableRowCount(tableName sqlname.NameTuple) int64 { +func (ora *Oracle) GetTableRowCount(tableName sqlname.NameTuple) (int64, error) { var rowCount int64 query := fmt.Sprintf("select count(*) from %s", tableName.ForUserQuery()) log.Infof("Querying row count of table %q", tableName) err := ora.db.QueryRow(query).Scan(&rowCount) if err != nil { - utils.ErrExit("Failed to query %q for row count of %q: %s", query, tableName, err) + return 0, fmt.Errorf("query %q for row count of %q: %w", query, tableName, err) } log.Infof("Table %q has %v rows.", tableName, rowCount) - return rowCount + return rowCount, nil } func (ora *Oracle) GetTableApproxRowCount(tableName sqlname.NameTuple) int64 { @@ -105,7 +101,7 @@ func (ora *Oracle) GetTableApproxRowCount(tableName sqlname.NameTuple) int64 { log.Infof("Querying '%s' approx row count of table %q", query, tableName.String()) err := ora.db.QueryRow(query).Scan(&approxRowCount) if err != nil { - utils.ErrExit("Failed to query %q for approx row count of %q: %s", query, tableName.String(), err) + utils.ErrExit("Failed to query: %q for approx row count of %q: %s", query, tableName.String(), err) } log.Infof("Table %q has approx %v rows.", tableName.String(), approxRowCount) @@ -122,7 +118,7 @@ func (ora *Oracle) GetVersion() string { // query sample output: Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production err := ora.db.QueryRow(query).Scan(&version) if err != nil { - utils.ErrExit("run query %q on source: %s", query, err) + utils.ErrExit("run query: %q on source: %s", query, err) } ora.source.DBVersion = version return version @@ -329,13 +325,13 @@ func (ora *Oracle) FilterUnsupportedTables(migrationUUID uuid.UUID, tableList [] log.Infof("query for queue tables: %q\n", query) rows, err := ora.db.Query(query) if err != nil { - utils.ErrExit("failed to query %q for filtering unsupported queue tables: %v", query, err) + utils.ErrExit("failed to query for filtering unsupported queue tables:%q: %v", query, err) } for rows.Next() { var tableName string err := rows.Scan(&tableName) if err != nil { - utils.ErrExit("failed to scan tableName from output of query %q: %v", query, err) + utils.ErrExit("failed to scan tableName from output of query: %q: %v", query, err) } tableName = fmt.Sprintf(`"%s"`, tableName) tableSrcName := sqlname.NewSourceName(ora.source.Schema, tableName) @@ -398,7 +394,7 @@ func (ora *Oracle) IsNestedTable(tableName sqlname.NameTuple) bool { isNestedTable := 0 err := ora.db.QueryRow(query).Scan(&isNestedTable) if err != nil && err != sql.ErrNoRows { - utils.ErrExit("error in query to check if table %v is a nested table: %v", tableName, err) + utils.ErrExit("check if table is a nested table: %v: %v", tableName, err) } return isNestedTable == 1 } @@ -411,7 +407,7 @@ func (ora *Oracle) IsParentOfNestedTable(tableName sqlname.NameTuple) bool { isParentNestedTable := 0 err := ora.db.QueryRow(query).Scan(&isParentNestedTable) if err != nil && err != sql.ErrNoRows { - utils.ErrExit("error in query to check if table %v is parent of nested table: %v", tableName, err) + utils.ErrExit("check if table is parent of nested table: %v: %v", tableName, err) } return isParentNestedTable == 1 } @@ -424,7 +420,7 @@ func (ora *Oracle) GetTargetIdentityColumnSequenceName(sequenceName string) stri if err == sql.ErrNoRows { return "" } else if err != nil { - utils.ErrExit("failed to query %q for finding identity sequence table and column: %v", query, err) + utils.ErrExit("failed to query for finding identity sequence table and column: %q: %v", query, err) } return fmt.Sprintf("%s_%s_seq", tableName, columnName) @@ -434,10 +430,6 @@ func (ora *Oracle) ParentTableOfPartition(table sqlname.NameTuple) string { panic("not implemented") } -func (ora *Oracle) ValidateTablesReadyForLiveMigration(tableList []sqlname.NameTuple) error { - panic("not implemented") -} - /* GetColumnToSequenceMap returns a map of column name to sequence name for all identity columns in the given list of tables. Note: There can be only one identity column per table in Oracle @@ -450,7 +442,7 @@ func (ora *Oracle) GetColumnToSequenceMap(tableList []sqlname.NameTuple) map[str query := fmt.Sprintf("SELECT column_name FROM all_tab_identity_cols WHERE owner = '%s' AND table_name = '%s'", sname, tname) rows, err := ora.db.Query(query) if err != nil { - utils.ErrExit("failed to query %q for finding identity column: %v", query, err) + utils.ErrExit("failed to query for finding identity column: %q: %v", query, err) } defer func() { closeErr := rows.Close() @@ -462,14 +454,14 @@ func (ora *Oracle) GetColumnToSequenceMap(tableList []sqlname.NameTuple) map[str var columnName string err := rows.Scan(&columnName) if err != nil { - utils.ErrExit("failed to scan columnName from output of query %q: %v", query, err) + utils.ErrExit("failed to scan columnName from output of query: %q: %v", query, err) } qualifiedColumnName := fmt.Sprintf("%s.%s", table.AsQualifiedCatalogName(), columnName) columnToSequenceMap[qualifiedColumnName] = fmt.Sprintf("%s_%s_seq", tname, columnName) } err = rows.Close() if err != nil { - utils.ErrExit("close rows for table %s query %q: %s", table.String(), query, err) + utils.ErrExit("close rows for table: %s query %q: %s", table.String(), query, err) } } @@ -717,18 +709,22 @@ func (ora *Oracle) CheckSourceDBVersion(exportType string) error { return nil } -func (ora *Oracle) GetMissingExportSchemaPermissions() ([]string, error) { +func (ora *Oracle) GetMissingExportSchemaPermissions(queryTableList string) ([]string, error) { return nil, nil } -func (ora *Oracle) GetMissingExportDataPermissions(exportType string) ([]string, error) { +func (ora *Oracle) GetMissingExportDataPermissions(exportType string, finalTableList []sqlname.NameTuple) ([]string, error) { return nil, nil } -func (ora *Oracle) GetMissingAssessMigrationPermissions() ([]string, error) { - return nil, nil +func (ora *Oracle) GetMissingAssessMigrationPermissions() ([]string, bool, error) { + return nil, false, nil } func (ora *Oracle) CheckIfReplicationSlotsAreAvailable() (isAvailable bool, usedCount int, maxCount int, err error) { return false, 0, 0, nil } + +func (ora *Oracle) GetSchemasMissingUsagePermissions() ([]string, error) { + return nil, nil +} diff --git a/yb-voyager/src/srcdb/oracle_test.go b/yb-voyager/src/srcdb/oracle_test.go new file mode 100644 index 0000000000..9369b82575 --- /dev/null +++ b/yb-voyager/src/srcdb/oracle_test.go @@ -0,0 +1,80 @@ +//go:build integration + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package srcdb + +import ( + "testing" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" + "gotest.tools/assert" +) + +func TestOracleGetAllTableNames(t *testing.T) { + sqlname.SourceDBType = "oracle" + + // Test GetAllTableNames + actualTables := testOracleSource.DB().GetAllTableNames() + expectedTables := []*sqlname.SourceName{ + sqlname.NewSourceName("YBVOYAGER", "foo"), + sqlname.NewSourceName("YBVOYAGER", "bar"), + sqlname.NewSourceName("YBVOYAGER", "table1"), + sqlname.NewSourceName("YBVOYAGER", "table2"), + sqlname.NewSourceName("YBVOYAGER", "unique_table"), + sqlname.NewSourceName("YBVOYAGER", "non_pk1"), + sqlname.NewSourceName("YBVOYAGER", "non_pk2"), + } + assert.Equal(t, len(expectedTables), len(actualTables), "Expected number of tables to match") + + testutils.AssertEqualSourceNameSlices(t, expectedTables, actualTables) +} + +func TestOracleGetTableToUniqueKeyColumnsMap(t *testing.T) { + objectName := sqlname.NewObjectName("oracle", "YBVOYAGER", "YBVOYAGER", "UNIQUE_TABLE") + + // Test GetTableToUniqueKeyColumnsMap + tableList := []sqlname.NameTuple{ + {CurrentName: objectName}, + } + uniqueKeys, err := testOracleSource.DB().GetTableToUniqueKeyColumnsMap(tableList) + if err != nil { + t.Fatalf("Error retrieving unique keys: %v", err) + } + + expectedKeys := map[string][]string{ + "UNIQUE_TABLE": {"EMAIL", "PHONE", "ADDRESS"}, + } + + // Compare the maps by iterating over each table and asserting the columns list + for table, expectedColumns := range expectedKeys { + actualColumns, exists := uniqueKeys[table] + if !exists { + t.Errorf("Expected table %s not found in uniqueKeys", table) + } + + testutils.AssertEqualStringSlices(t, expectedColumns, actualColumns) + } +} + +func TestOracleGetNonPKTables(t *testing.T) { + actualTables, err := testOracleSource.DB().GetNonPKTables() + assert.NilError(t, err, "Expected nil but non nil error: %v", err) + + expectedTables := []string{`YBVOYAGER."NON_PK1"`, `YBVOYAGER."NON_PK2"`} + testutils.AssertEqualStringSlices(t, expectedTables, actualTables) +} diff --git a/yb-voyager/src/srcdb/pg_dump_export_data.go b/yb-voyager/src/srcdb/pg_dump_export_data.go index a41cf186a4..d68871409d 100644 --- a/yb-voyager/src/srcdb/pg_dump_export_data.go +++ b/yb-voyager/src/srcdb/pg_dump_export_data.go @@ -108,7 +108,7 @@ func parseAndCreateTocTextFile(dataDirPath string) { parseTocFileCommand := exec.Command("strings", tocFilePath) cmdOutput, err := parseTocFileCommand.CombinedOutput() if err != nil { - utils.ErrExit("parsing tocfile %q: %v", tocFilePath, err) + utils.ErrExit("parsing tocfile: %q: %v", tocFilePath, err) } //Put the data into a toc.txt file @@ -146,7 +146,7 @@ func renameDataFiles(tablesProgressMetadata map[string]*utils.TableProgressMetad log.Infof("Renaming %q -> %q", oldFilePath, newFilePath) err := os.Rename(oldFilePath, newFilePath) if err != nil { - utils.ErrExit("renaming data file for table %q after data export: %v", tableProgressMetadata.TableName, err) + utils.ErrExit("renaming data file: for table %q after data export: %v", tableProgressMetadata.TableName, err) } } else { log.Infof("File %q to rename doesn't exists!", oldFilePath) diff --git a/yb-voyager/src/srcdb/pg_dump_extract_schema.go b/yb-voyager/src/srcdb/pg_dump_extract_schema.go index 71c09c433a..20e5158839 100644 --- a/yb-voyager/src/srcdb/pg_dump_extract_schema.go +++ b/yb-voyager/src/srcdb/pg_dump_extract_schema.go @@ -70,7 +70,7 @@ func pgdumpExtractSchema(source *Source, connectionUri string, exportDir string, func readSchemaFile(path string) []string { file, err := os.Open(path) if err != nil { - utils.ErrExit("error in opening schema file %s: %v", path, err) + utils.ErrExit("error in opening schema file: %s: %v", path, err) } defer file.Close() var lines []string @@ -83,7 +83,7 @@ func readSchemaFile(path string) []string { } if scanner.Err() != nil { - utils.ErrExit("error in reading schema file %s: %v", path, scanner.Err()) + utils.ErrExit("error in reading schema file: %s: %v", path, scanner.Err()) } return lines diff --git a/yb-voyager/src/srcdb/postgres.go b/yb-voyager/src/srcdb/postgres.go index 21d4cd2813..48406e6abb 100644 --- a/yb-voyager/src/srcdb/postgres.go +++ b/yb-voyager/src/srcdb/postgres.go @@ -42,15 +42,16 @@ import ( const MIN_SUPPORTED_PG_VERSION_OFFLINE = "9" const MIN_SUPPORTED_PG_VERSION_LIVE = "10" -const MAX_SUPPORTED_PG_VERSION = "16" +const MAX_SUPPORTED_PG_VERSION = "17" const MISSING = "MISSING" const GRANTED = "GRANTED" const NO_USAGE_PERMISSION = "NO USAGE PERMISSION" +const PG_STAT_STATEMENTS = "pg_stat_statements" -var pg_catalog_tables_required = []string{"regclass", "pg_class", "pg_inherits", "setval", "pg_index", "pg_relation_size", "pg_namespace", "pg_tables", "pg_sequences", "pg_roles", "pg_database"} +var pg_catalog_tables_required = []string{"regclass", "pg_class", "pg_inherits", "setval", "pg_index", "pg_relation_size", "pg_namespace", "pg_tables", "pg_sequences", "pg_roles", "pg_database", "pg_extension"} var information_schema_tables_required = []string{"schemata", "tables", "columns", "key_column_usage", "sequences"} -var PostgresUnsupportedDataTypes = []string{"GEOMETRY", "GEOGRAPHY", "BOX2D", "BOX3D", "TOPOGEOMETRY", "RASTER", "PG_LSN", "TXID_SNAPSHOT", "XML", "XID"} -var PostgresUnsupportedDataTypesForDbzm = []string{"POINT", "LINE", "LSEG", "BOX", "PATH", "POLYGON", "CIRCLE", "GEOMETRY", "GEOGRAPHY", "BOX2D", "BOX3D", "TOPOGEOMETRY", "RASTER", "PG_LSN", "TXID_SNAPSHOT", "XML"} +var PostgresUnsupportedDataTypes = []string{"GEOMETRY", "GEOGRAPHY", "BOX2D", "BOX3D", "TOPOGEOMETRY", "RASTER", "PG_LSN", "TXID_SNAPSHOT", "XML", "XID", "LO", "INT4MULTIRANGE", "INT8MULTIRANGE", "NUMMULTIRANGE", "TSMULTIRANGE", "TSTZMULTIRANGE", "DATEMULTIRANGE"} +var PostgresUnsupportedDataTypesForDbzm = []string{"POINT", "LINE", "LSEG", "BOX", "PATH", "POLYGON", "CIRCLE", "GEOMETRY", "GEOGRAPHY", "BOX2D", "BOX3D", "TOPOGEOMETRY", "RASTER", "PG_LSN", "TXID_SNAPSHOT", "XML", "LO", "INT4MULTIRANGE", "INT8MULTIRANGE", "NUMMULTIRANGE", "TSMULTIRANGE", "TSTZMULTIRANGE", "DATEMULTIRANGE"} func GetPGLiveMigrationUnsupportedDatatypes() []string { liveMigrationUnsupportedDataTypes, _ := lo.Difference(PostgresUnsupportedDataTypesForDbzm, PostgresUnsupportedDataTypes) @@ -109,7 +110,7 @@ func newPostgreSQL(s *Source) *PostgreSQL { func (pg *PostgreSQL) Connect() error { db, err := sql.Open("pgx", pg.getConnectionUri()) - db.SetMaxOpenConns(1) + db.SetMaxOpenConns(pg.source.NumConnections) db.SetConnMaxIdleTime(5 * time.Minute) pg.db = db return err @@ -139,20 +140,16 @@ func (pg *PostgreSQL) getTrimmedSchemaList() []string { return trimmedList } -func (pg *PostgreSQL) CheckRequiredToolsAreInstalled() { - checkTools("strings") -} - -func (pg *PostgreSQL) GetTableRowCount(tableName sqlname.NameTuple) int64 { +func (pg *PostgreSQL) GetTableRowCount(tableName sqlname.NameTuple) (int64, error) { var rowCount int64 query := fmt.Sprintf("select count(*) from %s", tableName.ForUserQuery()) log.Infof("Querying row count of table %q", tableName) err := pg.db.QueryRow(query).Scan(&rowCount) if err != nil { - utils.ErrExit("Failed to query %q for row count of %q: %s", query, tableName, err) + return 0, fmt.Errorf("query %q for row count of %q: %w", query, tableName, err) } log.Infof("Table %q has %v rows.", tableName, rowCount) - return rowCount + return rowCount, nil } func (pg *PostgreSQL) GetTableApproxRowCount(tableName sqlname.NameTuple) int64 { @@ -163,7 +160,7 @@ func (pg *PostgreSQL) GetTableApproxRowCount(tableName sqlname.NameTuple) int64 log.Infof("Querying '%s' approx row count of table %q", query, tableName.String()) err := pg.db.QueryRow(query).Scan(&approxRowCount) if err != nil { - utils.ErrExit("Failed to query %q for approx row count of %q: %s", query, tableName.String(), err) + utils.ErrExit("Failed to query for approx row count of table: %q: %q: %s", tableName.String(), query, err) } log.Infof("Table %q has approx %v rows.", tableName.String(), approxRowCount) @@ -198,7 +195,7 @@ func (pg *PostgreSQL) checkSchemasExists() []string { WHERE nspname IN (%s);`, querySchemaList) rows, err := pg.db.Query(chkSchemaExistsQuery) if err != nil { - utils.ErrExit("error in querying(%q) source database for checking mentioned schema(s) present or not: %v\n", chkSchemaExistsQuery, err) + utils.ErrExit("error in querying source database for checking mentioned schema(s) present or not: %q: %v\n", chkSchemaExistsQuery, err) } defer func() { closeErr := rows.Close() @@ -219,16 +216,25 @@ func (pg *PostgreSQL) checkSchemasExists() []string { schemaNotPresent := utils.SetDifference(trimmedSchemaList, listOfSchemaPresent) if len(schemaNotPresent) > 0 { - utils.ErrExit("Following schemas are not present in source database %v, please provide a valid schema list.\n", schemaNotPresent) + utils.ErrExit("Following schemas are not present in source database: %v, please provide a valid schema list.\n", schemaNotPresent) } return trimmedSchemaList } func (pg *PostgreSQL) GetAllTableNamesRaw(schemaName string) ([]string, error) { - query := fmt.Sprintf(`SELECT table_name - FROM information_schema.tables - WHERE table_type = 'BASE TABLE' AND - table_schema = '%s';`, schemaName) + // Information schema requires select permission on the tables to query the tables. However, pg_catalog does not require any permission. + // So, we are using pg_catalog to get the table names. + query := fmt.Sprintf(` + SELECT + c.relname AS table_name + FROM + pg_catalog.pg_class c + JOIN + pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE + c.relkind IN ('r', 'p') -- 'r' for regular tables, 'p' for partitioned tables + AND n.nspname = '%s'; + `, schemaName) rows, err := pg.db.Query(query) if err != nil { @@ -258,14 +264,24 @@ func (pg *PostgreSQL) GetAllTableNamesRaw(schemaName string) ([]string, error) { func (pg *PostgreSQL) GetAllTableNames() []*sqlname.SourceName { schemaList := pg.checkSchemasExists() querySchemaList := "'" + strings.Join(schemaList, "','") + "'" - query := fmt.Sprintf(`SELECT table_schema, table_name - FROM information_schema.tables - WHERE table_type = 'BASE TABLE' AND - table_schema IN (%s);`, querySchemaList) + // Information schema requires select permission on the tables to query the tables. However, pg_catalog does not require any permission. + // So, we are using pg_catalog to get the table names. + query := fmt.Sprintf(` + SELECT + n.nspname AS table_schema, + c.relname AS table_name + FROM + pg_catalog.pg_class c + JOIN + pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE + c.relkind IN ('r', 'p') -- 'r' for regular tables, 'p' for partitioned tables + AND n.nspname IN (%s); + `, querySchemaList) rows, err := pg.db.Query(query) if err != nil { - utils.ErrExit("error in querying(%q) source database for table names: %v\n", query, err) + utils.ErrExit("error in querying source database for table names: %q: %v\n", query, err) } defer func() { closeErr := rows.Close() @@ -451,7 +467,7 @@ func (pg *PostgreSQL) GetAllSequences() []string { query := fmt.Sprintf(`SELECT sequence_schema, sequence_name FROM information_schema.sequences where sequence_schema IN (%s);`, querySchemaList) rows, err := pg.db.Query(query) if err != nil { - utils.ErrExit("error in querying(%q) source database for sequence names: %v\n", query, err) + utils.ErrExit("error in querying source database for sequence names: %q: %v\n", query, err) } defer func() { closeErr := rows.Close() @@ -577,7 +593,7 @@ func (pg *PostgreSQL) FilterEmptyTables(tableList []sqlname.NameTuple) ([]sqlnam if err == sql.ErrNoRows { empty = true } else { - utils.ErrExit("error in querying table %v: %v", tableName, err) + utils.ErrExit("error in querying table LIMIT 1: %v: %v", tableName, err) } } if !empty { @@ -654,14 +670,23 @@ func (pg *PostgreSQL) GetColumnsWithSupportedTypes(tableList []sqlname.NameTuple func (pg *PostgreSQL) ParentTableOfPartition(table sqlname.NameTuple) string { var parentTable string + // For this query in case of case sensitive tables, minquoting is required - query := fmt.Sprintf(`SELECT inhparent::pg_catalog.regclass - FROM pg_catalog.pg_class c JOIN pg_catalog.pg_inherits ON c.oid = inhrelid - WHERE c.oid = '%s'::regclass::oid`, table.ForOutput()) + query := fmt.Sprintf(`SELECT + inhparent::pg_catalog.regclass AS parent_table + FROM + pg_catalog.pg_inherits + JOIN + pg_catalog.pg_class AS child ON pg_inherits.inhrelid = child.oid + JOIN + pg_catalog.pg_namespace AS nsp_child ON child.relnamespace = nsp_child.oid + WHERE + child.relname = '%s' + AND nsp_child.nspname = '%s';`, table.CurrentName.Unqualified.MinQuoted, table.CurrentName.SchemaName) err := pg.db.QueryRow(query).Scan(&parentTable) if err != sql.ErrNoRows && err != nil { - utils.ErrExit("Error in query=%s for parent tablename of table=%s: %v", query, table, err) + utils.ErrExit("Error in query: %s for parent tablename of table=%s: %v", query, table, err) } return parentTable @@ -679,7 +704,7 @@ func (pg *PostgreSQL) GetColumnToSequenceMap(tableList []sqlname.NameTuple) map[ rows, err := pg.db.Query(query) if err != nil { log.Infof("Query to find column to sequence mapping: %s", query) - utils.ErrExit("Error in querying for sequences in table=%s: %v", table, err) + utils.ErrExit("Error in querying for sequences in table: %s: %v", table, err) } defer func() { closeErr := rows.Close() @@ -690,7 +715,7 @@ func (pg *PostgreSQL) GetColumnToSequenceMap(tableList []sqlname.NameTuple) map[ for rows.Next() { err := rows.Scan(&columeName, &sequenceName, &schemaName) if err != nil { - utils.ErrExit("Error in scanning for sequences in table=%s: %v", table, err) + utils.ErrExit("Error in scanning for sequences in table: %s: %v", table, err) } qualifiedColumnName := fmt.Sprintf("%s.%s", table.AsQualifiedCatalogName(), columeName) // quoting sequence name as it can be case sensitive - required during import data restore sequences @@ -698,7 +723,7 @@ func (pg *PostgreSQL) GetColumnToSequenceMap(tableList []sqlname.NameTuple) map[ } err = rows.Close() if err != nil { - utils.ErrExit("close rows for table %s query %q: %s", table.String(), query, err) + utils.ErrExit("close rows for table: %s query %q: %s", table.String(), query, err) } } @@ -760,7 +785,7 @@ WHERE parent.relname='%s' AND nmsp_parent.nspname = '%s' `, tname, sname) rows, err := pg.db.Query(query) if err != nil { log.Errorf("failed to list partitions of table %s: query = [ %s ], error = %s", tableName, query, err) - utils.ErrExit("failed to find the partitions for table %s:", tableName, err) + utils.ErrExit("failed to find the partitions for table: %s: %v", tableName, err) } defer func() { closeErr := rows.Close() @@ -772,12 +797,12 @@ WHERE parent.relname='%s' AND nmsp_parent.nspname = '%s' `, tname, sname) var childSchema, childTable string err := rows.Scan(&childSchema, &childTable) if err != nil { - utils.ErrExit("Error in scanning for child partitions of table=%s: %v", tableName, err) + utils.ErrExit("Error in scanning for child partitions of table: %s: %v", tableName, err) } partitions = append(partitions, fmt.Sprintf(`%s.%s`, childSchema, childTable)) } if rows.Err() != nil { - utils.ErrExit("Error in scanning for child partitions of table=%s: %v", tableName, rows.Err()) + utils.ErrExit("Error in scanning for child partitions of table: %s: %v", tableName, rows.Err()) } return partitions } @@ -915,6 +940,7 @@ var PG_QUERY_TO_CHECK_IF_TABLE_HAS_PK = `SELECT nspname AS schema_name, relname FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace LEFT JOIN pg_constraint con ON con.conrelid = c.oid AND con.contype = 'p' +WHERE c.relkind = 'r' OR c.relkind = 'p' -- Only consider table objects GROUP BY schema_name, table_name HAVING nspname IN (%s);` func (pg *PostgreSQL) GetNonPKTables() ([]string, error) { @@ -939,51 +965,15 @@ func (pg *PostgreSQL) GetNonPKTables() ([]string, error) { if err != nil { return nil, fmt.Errorf("error in scanning query rows for primary key: %v", err) } - table := sqlname.NewSourceName(schemaName, fmt.Sprintf(`"%s"`, tableName)) + if pkCount == 0 { + table := sqlname.NewSourceName(schemaName, fmt.Sprintf(`"%s"`, tableName)) nonPKTables = append(nonPKTables, table.Qualified.Quoted) } } return nonPKTables, nil } -func (pg *PostgreSQL) ValidateTablesReadyForLiveMigration(tableList []sqlname.NameTuple) error { - var tablesWithReplicaIdentityNotFull []string - var qualifiedTableNames []string - for _, table := range tableList { - sname, tname := table.ForCatalogQuery() - qualifiedTableNames = append(qualifiedTableNames, fmt.Sprintf("'%s.%s'", sname, tname)) - } - query := fmt.Sprintf(`SELECT n.nspname || '.' || c.relname AS table_name_with_schema - FROM pg_class AS c - JOIN pg_namespace AS n ON c.relnamespace = n.oid - WHERE (n.nspname || '.' || c.relname) IN (%s) - AND c.relkind = 'r' - AND c.relreplident <> 'f';`, strings.Join(qualifiedTableNames, ",")) - rows, err := pg.db.Query(query) - if err != nil { - return fmt.Errorf("error in querying(%q) source database for replica identity: %v", query, err) - } - defer func() { - closeErr := rows.Close() - if closeErr != nil { - log.Warnf("close rows for query %q: %v", query, closeErr) - } - }() - for rows.Next() { - var tableWithSchema string - err := rows.Scan(&tableWithSchema) - if err != nil { - return fmt.Errorf("error in scanning query rows for replica identity: %v", err) - } - tablesWithReplicaIdentityNotFull = append(tablesWithReplicaIdentityNotFull, tableWithSchema) - } - if len(tablesWithReplicaIdentityNotFull) > 0 { - return fmt.Errorf("tables %v do not have REPLICA IDENTITY FULL\nPlease ALTER the tables and set their REPLICA IDENTITY to FULL", tablesWithReplicaIdentityNotFull) - } - return nil -} - // =============================== Guardrails =============================== func (pg *PostgreSQL) CheckSourceDBVersion(exportType string) error { @@ -991,14 +981,24 @@ func (pg *PostgreSQL) CheckSourceDBVersion(exportType string) error { if pgVersion == "" { return fmt.Errorf("failed to get source database version") } + + // Extract the major version from the full version string + // Version can be like: 17.2 (Ubuntu 17.2-1.pgdg20.04+1), 17.2, 17alpha1 etc. + re := regexp.MustCompile(`^(\d+)`) + match := re.FindStringSubmatch(pgVersion) + if len(match) < 2 { + return fmt.Errorf("failed to extract major version from source database version: %s", pgVersion) + } + majorVersion := match[1] + supportedVersionRange := fmt.Sprintf("%s to %s", MIN_SUPPORTED_PG_VERSION_OFFLINE, MAX_SUPPORTED_PG_VERSION) - if version.CompareSimple(pgVersion, MAX_SUPPORTED_PG_VERSION) > 0 || version.CompareSimple(pgVersion, MIN_SUPPORTED_PG_VERSION_OFFLINE) < 0 { + if version.CompareSimple(majorVersion, MAX_SUPPORTED_PG_VERSION) > 0 || version.CompareSimple(majorVersion, MIN_SUPPORTED_PG_VERSION_OFFLINE) < 0 { return fmt.Errorf("current source db version: %s. Supported versions: %s", pgVersion, supportedVersionRange) } // for live migration if exportType == utils.CHANGES_ONLY || exportType == utils.SNAPSHOT_AND_CHANGES { - if version.CompareSimple(pgVersion, MIN_SUPPORTED_PG_VERSION_LIVE) < 0 { + if version.CompareSimple(majorVersion, MIN_SUPPORTED_PG_VERSION_LIVE) < 0 { supportedVersionRange = fmt.Sprintf("%s to %s", MIN_SUPPORTED_PG_VERSION_LIVE, MAX_SUPPORTED_PG_VERSION) utils.PrintAndLog(color.RedString("Warning: Live Migration: Current source db version: %s. Supported versions: %s", pgVersion, supportedVersionRange)) } @@ -1015,20 +1015,11 @@ Returns: - []string: A slice of strings describing the missing permissions, if any. - error: An error if any issues occur during the permission checks. */ -func (pg *PostgreSQL) GetMissingExportSchemaPermissions() ([]string, error) { +func (pg *PostgreSQL) GetMissingExportSchemaPermissions(queryTableList string) ([]string, error) { var combinedResult []string - // Check if schemas have USAGE permission - missingSchemas, err := pg.listSchemasMissingUsagePermission() - if err != nil { - return nil, fmt.Errorf("error checking schema usage permissions: %w", err) - } - if len(missingSchemas) > 0 { - combinedResult = append(combinedResult, fmt.Sprintf("\n%s[%s]", color.RedString("Missing USAGE permission for user %s on Schemas: ", pg.source.User), strings.Join(missingSchemas, ", "))) - } - // Check if tables have SELECT permission - missingTables, err := pg.listTablesMissingSelectPermission() + missingTables, err := pg.listTablesMissingSelectPermission(queryTableList) if err != nil { return nil, fmt.Errorf("error checking table select permissions: %w", err) } @@ -1065,8 +1056,12 @@ The function performs the following checks: - Checks if the user has create permission on the database. - Checks if the user has ownership over all tables. */ -func (pg *PostgreSQL) GetMissingExportDataPermissions(exportType string) ([]string, error) { +func (pg *PostgreSQL) GetMissingExportDataPermissions(exportType string, finalTableList []sqlname.NameTuple) ([]string, error) { var combinedResult []string + qualifiedMinQuotedTableNames := lo.Map(finalTableList, func(table sqlname.NameTuple, _ int) string { + return table.ForOutput() + }) + queryTableList := fmt.Sprintf("'%s'", strings.Join(qualifiedMinQuotedTableNames, "','")) // For live migration if exportType == utils.CHANGES_ONLY || exportType == utils.SNAPSHOT_AND_CHANGES { @@ -1101,26 +1096,17 @@ func (pg *PostgreSQL) GetMissingExportDataPermissions(exportType string) ([]stri combinedResult = append(combinedResult, fmt.Sprintf("\n%sCREATE on database %s", color.RedString("Missing permission for user "+pg.source.User+": "), pg.source.DBName)) } - // Check if schemas have USAGE permission - missingSchemas, err := pg.listSchemasMissingUsagePermission() + // Check replica identity of tables + missingTables, err := pg.listTablesMissingReplicaIdentityFull(queryTableList) if err != nil { - return nil, fmt.Errorf("error checking schema usage permissions: %w", err) + return nil, fmt.Errorf("error in checking table replica identity: %w", err) } - if len(missingSchemas) > 0 { - combinedResult = append(combinedResult, fmt.Sprintf("\n%s[%s]", color.RedString(fmt.Sprintf("Missing USAGE permission for user %s on Schemas: ", pg.source.User)), strings.Join(missingSchemas, ", "))) + if len(missingTables) > 0 { + combinedResult = append(combinedResult, fmt.Sprintf("\n%s[%s]", color.RedString("Tables missing replica identity full: "), strings.Join(missingTables, ", "))) } - // Check replica identity of tables - // missingTables, err := pg.listTablesMissingReplicaIdentityFull() - // if err != nil { - // return nil, fmt.Errorf("error in checking table replica identity: %w", err) - // } - // if len(missingTables) > 0 { - // combinedResult = append(combinedResult, fmt.Sprintf("\n%s[%s]", color.RedString("Tables missing replica identity full: "), strings.Join(missingTables, ", "))) - // } - // Check if user has ownership over all tables - missingTables, err := pg.listTablesMissingOwnerPermission() + missingTables, err = pg.listTablesMissingOwnerPermission(queryTableList) if err != nil { return nil, fmt.Errorf("error in checking table owner permissions: %w", err) } @@ -1139,7 +1125,7 @@ func (pg *PostgreSQL) GetMissingExportDataPermissions(exportType string) ([]stri } else { // For offline migration // Check if schemas have USAGE permission and check if tables in the provided schemas have SELECT permission - res, err := pg.GetMissingExportSchemaPermissions() + res, err := pg.GetMissingExportSchemaPermissions(queryTableList) if err != nil { return nil, fmt.Errorf("error in getting missing export data permissions: %w", err) } @@ -1158,28 +1144,97 @@ func (pg *PostgreSQL) GetMissingExportDataPermissions(exportType string) ([]stri return combinedResult, nil } -func (pg *PostgreSQL) GetMissingAssessMigrationPermissions() ([]string, error) { +func (pg *PostgreSQL) GetMissingAssessMigrationPermissions() ([]string, bool, error) { var combinedResult []string - // Check if schemas have USAGE permission - missingSchemas, err := pg.listSchemasMissingUsagePermission() + // Check if tables have SELECT permission + missingTables, err := pg.listTablesMissingSelectPermission("") if err != nil { - return nil, fmt.Errorf("error checking schema usage permissions: %w", err) + return nil, false, fmt.Errorf("error checking table select permissions: %w", err) } - if len(missingSchemas) > 0 { - combinedResult = append(combinedResult, fmt.Sprintf("\n%s[%s]", color.RedString("Missing USAGE permission for user %s on Schemas: ", pg.source.User), strings.Join(missingSchemas, ", "))) + if len(missingTables) > 0 { + combinedResult = append(combinedResult, fmt.Sprintf("\n%s[%s]", color.RedString("Missing SELECT permission for user %s on Tables: ", pg.source.User), strings.Join(missingTables, ", "))) } - // Check if tables have SELECT permission - missingTables, err := pg.listTablesMissingSelectPermission() + result, err := pg.checkPgStatStatementsSetup() if err != nil { - return nil, fmt.Errorf("error checking table select permissions: %w", err) + return nil, false, fmt.Errorf("error checking pg_stat_statement extension installed with read permissions: %w", err) } - if len(missingTables) > 0 { - combinedResult = append(combinedResult, fmt.Sprintf("\n%s[%s]", color.RedString("Missing SELECT permission for user %s on Tables: ", pg.source.User), strings.Join(missingTables, ", "))) + + pgssEnabled := true + if result != "" { + pgssEnabled = false + combinedResult = append(combinedResult, result) } + return combinedResult, pgssEnabled, nil +} - return combinedResult, nil +const ( + queryPgStatStatementsSchema = ` + SELECT nspname + FROM pg_extension e + JOIN pg_namespace n ON e.extnamespace = n.oid + WHERE e.extname = 'pg_stat_statements'` + + queryHasReadStatsPermission = ` + SELECT pg_has_role(current_user, 'pg_read_all_stats', 'USAGE')` + + SHARED_PRELOAD_LIBRARY_ERROR = "pg_stat_statements must be loaded via shared_preload_libraries" +) + +// checkPgStatStatementsSetup checks if pg_stat_statements is properly installed and if the user has the necessary read permissions. +func (pg *PostgreSQL) checkPgStatStatementsSetup() (string, error) { + if !utils.GetEnvAsBool("REPORT_UNSUPPORTED_QUERY_CONSTRUCTS", true) { + log.Infof("REPORT_UNSUPPORTED_QUERY_CONSTRUCTS is set as false, skipping guardrails check for pg_stat_statements") + return "", nil + } + + // 1. check if pg_stat_statements extension is available on source + var pgssExtSchema string + err := pg.db.QueryRow(queryPgStatStatementsSchema).Scan(&pgssExtSchema) + if err != nil && err != sql.ErrNoRows { + if err == sql.ErrNoRows { + return "pg_stat_statements extension is not installed on source DB, required for detecting Unsupported Query Constructs", nil + } + return "", fmt.Errorf("failed to fetch the schema of pg_stat_statements available in: %w", err) + } + + if pgssExtSchema == "" { + return "pg_stat_statements extension is not installed on source DB, required for detecting Unsupported Query Constructs", nil + } else { + schemaList := lo.Union(pg.getTrimmedSchemaList(), []string{"public"}) + log.Infof("comparing schema list %v against pgss extension schema '%s'", schemaList, pgssExtSchema) + if !slices.Contains(schemaList, pgssExtSchema) { + return fmt.Sprintf("pg_stat_statements extension schema %q is not in the schema list (%s), required for detecting Unsupported Query Constructs", + pgssExtSchema, strings.Join(schemaList, ", ")), nil + } + } + + // 2. User has permission to read from pg_stat_statements table + var hasReadAllStats bool + err = pg.db.QueryRow(queryHasReadStatsPermission).Scan(&hasReadAllStats) + if err != nil { + return "", fmt.Errorf("failed to check pg_read_all_stats grant on migration user: %w", err) + } + + if !hasReadAllStats { + return "\n" + color.RedString("Missing Permission:") + " User doesn't have the `pg_read_all_stats` grant, required for detecting Unsupported Query Constructs", nil + } + + // To access "shared_preload_libraries" must be superuser or a member of pg_read_all_settings + // so instead of getting current_settings(), executing SELECT query on pg_stat_statements view + // 3. check if its properly installed/loaded without any extra permissions + queryCheckPgssLoaded := fmt.Sprintf("SELECT 1 from %s.pg_stat_statements LIMIT 1", pgssExtSchema) + log.Infof("query to check pgss is properly loaded - [%s]", queryCheckPgssLoaded) + _, err = pg.db.Exec(queryCheckPgssLoaded) + if err != nil { + if strings.Contains(err.Error(), SHARED_PRELOAD_LIBRARY_ERROR) { + return "pg_stat_statements is not loaded via shared_preload_libraries, required for detecting Unsupported Query Constructs", nil + } + return "", fmt.Errorf("failed to check pg_stat_statements is loaded via shared_preload_libraries: %w", err) + } + + return "", nil } func (pg *PostgreSQL) isMigrationUserASuperUser() (bool, error) { @@ -1207,10 +1262,7 @@ func (pg *PostgreSQL) isMigrationUserASuperUser() (bool, error) { return isSuperUser, nil } -func (pg *PostgreSQL) listTablesMissingOwnerPermission() ([]string, error) { - trimmedSchemaList := pg.getTrimmedSchemaList() - querySchemaList := "'" + strings.Join(trimmedSchemaList, "','") + "'" - +func (pg *PostgreSQL) listTablesMissingOwnerPermission(queryTableList string) ([]string, error) { checkTableOwnerPermissionQuery := fmt.Sprintf(` WITH table_ownership AS ( SELECT @@ -1219,8 +1271,8 @@ func (pg *PostgreSQL) listTablesMissingOwnerPermission() ([]string, error) { pg_get_userbyid(c.relowner) AS owner_name FROM pg_class c JOIN pg_namespace n ON c.relnamespace = n.oid - WHERE c.relkind IN ('r', 'p') -- 'r' indicates a table 'p' indicates a partitioned table - AND n.nspname IN (%s) + WHERE c.relkind IN ('r', 'p') -- 'r' indicates a table, 'p' indicates a partitioned table + AND (quote_ident(n.nspname) || '.' || quote_ident(c.relname)) IN (%s) ) SELECT schema_name, @@ -1237,7 +1289,7 @@ func (pg *PostgreSQL) listTablesMissingOwnerPermission() ([]string, error) { ) THEN true ELSE false END AS has_ownership - FROM table_ownership;`, querySchemaList, pg.source.User, pg.source.User) + FROM table_ownership;`, queryTableList, pg.source.User, pg.source.User) rows, err := pg.db.Query(checkTableOwnerPermissionQuery) if err != nil { @@ -1325,54 +1377,55 @@ func (pg *PostgreSQL) checkReplicationPermission() (bool, error) { return hasPermission, nil } -// func (pg *PostgreSQL) listTablesMissingReplicaIdentityFull() ([]string, error) { -// trimmedSchemaList := pg.getTrimmedSchemaList() -// querySchemaList := "'" + strings.Join(trimmedSchemaList, "','") + "'" -// checkTableReplicaIdentityQuery := fmt.Sprintf(`SELECT -// n.nspname AS schema_name, -// c.relname AS table_name, -// c.relreplident AS replica_identity, -// CASE -// WHEN c.relreplident <> 'f' -// THEN '%s' -// ELSE '%s' -// END AS status -// FROM pg_class c -// JOIN pg_namespace n ON c.relnamespace = n.oid -// WHERE quote_ident(n.nspname) IN (%s) -// AND c.relkind IN ('r', 'p');`, MISSING, GRANTED, querySchemaList) -// rows, err := pg.db.Query(checkTableReplicaIdentityQuery) -// if err != nil { -// return nil, fmt.Errorf("error in querying(%q) source database for checking table replica identity: %w", checkTableReplicaIdentityQuery, err) -// } -// defer func() { -// closeErr := rows.Close() -// if closeErr != nil { -// log.Warnf("close rows for query %q: %v", checkTableReplicaIdentityQuery, closeErr) -// } -// }() - -// var missingTables []string -// var tableSchemaName, tableName, replicaIdentity, status string - -// for rows.Next() { -// err = rows.Scan(&tableSchemaName, &tableName, &replicaIdentity, &status) -// if err != nil { -// return nil, fmt.Errorf("error in scanning query rows for table names: %w", err) -// } -// if status == MISSING { -// // quote table name as it can be case sensitive -// missingTables = append(missingTables, fmt.Sprintf(`%s."%s"`, tableSchemaName, tableName)) -// } -// } - -// // Check for errors during row iteration -// if err = rows.Err(); err != nil { -// return nil, fmt.Errorf("error iterating over query rows: %w", err) -// } - -// return missingTables, nil -// } +func (pg *PostgreSQL) listTablesMissingReplicaIdentityFull(queryTableList string) ([]string, error) { + checkTableReplicaIdentityQuery := fmt.Sprintf(` + SELECT + n.nspname AS schema_name, + c.relname AS table_name, + c.relreplident AS replica_identity, + CASE + WHEN c.relreplident <> 'f' + THEN '%s' + ELSE '%s' + END AS status + FROM pg_class c + JOIN pg_namespace n ON c.relnamespace = n.oid + WHERE (quote_ident(n.nspname) || '.' || quote_ident(c.relname)) IN (%s) + AND c.relkind IN ('r', 'p'); + `, MISSING, GRANTED, queryTableList) + + rows, err := pg.db.Query(checkTableReplicaIdentityQuery) + if err != nil { + return nil, fmt.Errorf("error in querying(%q) source database for checking table replica identity: %w", checkTableReplicaIdentityQuery, err) + } + defer func() { + closeErr := rows.Close() + if closeErr != nil { + log.Warnf("close rows for query %q: %v", checkTableReplicaIdentityQuery, closeErr) + } + }() + + var missingTables []string + var tableSchemaName, tableName, replicaIdentity, status string + + for rows.Next() { + err = rows.Scan(&tableSchemaName, &tableName, &replicaIdentity, &status) + if err != nil { + return nil, fmt.Errorf("error in scanning query rows for table names: %w", err) + } + if status == MISSING { + // quote table name as it can be case sensitive + missingTables = append(missingTables, fmt.Sprintf(`%s."%s"`, tableSchemaName, tableName)) + } + } + + // Check for errors during row iteration + if err = rows.Err(); err != nil { + return nil, fmt.Errorf("error iterating over query rows: %w", err) + } + + return missingTables, nil +} func (pg *PostgreSQL) checkWalLevel() (msg string) { query := `SELECT current_setting('wal_level') AS wal_level;` @@ -1380,7 +1433,7 @@ func (pg *PostgreSQL) checkWalLevel() (msg string) { var walLevel string err := pg.db.QueryRow(query).Scan(&walLevel) if err != nil { - utils.ErrExit("error in querying(%q) source database for wal_level: %v\n", query, err) + utils.ErrExit("error in querying source database for wal_level: %q %v\n", query, err) } if walLevel != "logical" { msg = fmt.Sprintf("\n%s Current wal_level: %s; Required wal_level: logical", color.RedString("ERROR"), walLevel) @@ -1460,43 +1513,61 @@ func (pg *PostgreSQL) listSequencesMissingSelectPermission() (sequencesWithMissi return sequencesWithMissingPerm, nil } -func (pg *PostgreSQL) listTablesMissingSelectPermission() (tablesWithMissingPerm []string, err error) { +func (pg *PostgreSQL) listTablesMissingSelectPermission(queryTableList string) (tablesWithMissingPerm []string, err error) { // Users only need SELECT permissions on the tables of the schema they want to export for export schema - trimmedSchemaList := pg.getTrimmedSchemaList() - trimmedSchemaList = append(trimmedSchemaList, "pg_catalog", "information_schema") - querySchemaList := "'" + strings.Join(trimmedSchemaList, "','") + "'" + checkTableSelectPermissionQuery := "" + if queryTableList == "" { + + trimmedSchemaList := pg.getTrimmedSchemaList() + trimmedSchemaList = append(trimmedSchemaList, "pg_catalog", "information_schema") + querySchemaList := "'" + strings.Join(trimmedSchemaList, "','") + "'" + + checkTableSelectPermissionQuery = fmt.Sprintf(` + WITH schema_list AS ( + SELECT unnest(ARRAY[%s]) AS schema_name + ), + accessible_schemas AS ( + SELECT schema_name + FROM schema_list + WHERE has_schema_privilege('%s', quote_ident(schema_name), 'USAGE') + ) + SELECT + t.schemaname AS schema_name, + t.tablename AS table_name, + CASE + WHEN has_table_privilege('%s', quote_ident(t.schemaname) || '.' || quote_ident(t.tablename), 'SELECT') + THEN '%s' + ELSE '%s' + END AS status + FROM pg_tables t + JOIN accessible_schemas a ON t.schemaname = a.schema_name + UNION ALL + SELECT + t.schemaname AS schema_name, + t.tablename AS table_name, + '%s' AS status + FROM pg_tables t + WHERE t.schemaname IN (SELECT schema_name FROM schema_list) + AND NOT EXISTS ( + SELECT 1 + FROM accessible_schemas a + WHERE t.schemaname = a.schema_name + );`, querySchemaList, pg.source.User, pg.source.User, GRANTED, MISSING, NO_USAGE_PERMISSION) + } else { + checkTableSelectPermissionQuery = fmt.Sprintf(` + SELECT + t.schemaname AS schema_name, + t.tablename AS table_name, + CASE + WHEN has_table_privilege('%s', quote_ident(t.schemaname) || '.' || quote_ident(t.tablename), 'SELECT') + THEN '%s' + ELSE '%s' + END AS status + FROM pg_tables t + WHERE quote_ident(t.schemaname) || '.' || quote_ident(t.tablename) IN (%s); + `, pg.source.User, GRANTED, MISSING, queryTableList) + } - checkTableSelectPermissionQuery := fmt.Sprintf(` - WITH schema_list AS ( - SELECT unnest(ARRAY[%s]) AS schema_name - ), - accessible_schemas AS ( - SELECT schema_name - FROM schema_list - WHERE has_schema_privilege('%s', quote_ident(schema_name), 'USAGE') - ) - SELECT - t.schemaname AS schema_name, - t.tablename AS table_name, - CASE - WHEN has_table_privilege('%s', quote_ident(t.schemaname) || '.' || quote_ident(t.tablename), 'SELECT') - THEN '%s' - ELSE '%s' - END AS status - FROM pg_tables t - JOIN accessible_schemas a ON t.schemaname = a.schema_name - UNION ALL - SELECT - t.schemaname AS schema_name, - t.tablename AS table_name, - '%s' AS status - FROM pg_tables t - WHERE t.schemaname IN (SELECT schema_name FROM schema_list) - AND NOT EXISTS ( - SELECT 1 - FROM accessible_schemas a - WHERE t.schemaname = a.schema_name - );`, querySchemaList, pg.source.User, pg.source.User, GRANTED, MISSING, NO_USAGE_PERMISSION) rows, err := pg.db.Query(checkTableSelectPermissionQuery) if err != nil { return nil, fmt.Errorf("error in querying(%q) source database for checking table select permission: %w", checkTableSelectPermissionQuery, err) @@ -1536,7 +1607,7 @@ func (pg *PostgreSQL) listTablesMissingSelectPermission() (tablesWithMissingPerm return tablesWithMissingPerm, nil } -func (pg *PostgreSQL) listSchemasMissingUsagePermission() ([]string, error) { +func (pg *PostgreSQL) GetSchemasMissingUsagePermissions() ([]string, error) { // Users need usage permissions on the schemas they want to export and the pg_catalog and information_schema schemas trimmedSchemaList := pg.getTrimmedSchemaList() trimmedSchemaList = append(trimmedSchemaList, "pg_catalog", "information_schema") diff --git a/yb-voyager/src/srcdb/postgres_test.go b/yb-voyager/src/srcdb/postgres_test.go new file mode 100644 index 0000000000..41ac55d5ac --- /dev/null +++ b/yb-voyager/src/srcdb/postgres_test.go @@ -0,0 +1,136 @@ +//go:build integration + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package srcdb + +import ( + "testing" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" + "gotest.tools/assert" +) + +func TestPostgresGetAllTableNames(t *testing.T) { + testPostgresSource.TestContainer.ExecuteSqls( + `CREATE SCHEMA test_schema;`, + `CREATE TABLE test_schema.foo ( + id INT PRIMARY KEY, + name VARCHAR + );`, + `INSERT into test_schema.foo values (1, 'abc'), (2, 'xyz');`, + `CREATE TABLE test_schema.bar ( + id INT PRIMARY KEY, + name VARCHAR + );`, + `INSERT into test_schema.bar values (1, 'abc'), (2, 'xyz');`, + `CREATE TABLE test_schema.non_pk1( + id INT, + name VARCHAR(255) + );`) + defer testPostgresSource.TestContainer.ExecuteSqls(`DROP SCHEMA test_schema CASCADE;`) + + sqlname.SourceDBType = "postgresql" + testPostgresSource.Source.Schema = "test_schema" // used in query of GetAllTableNames() + + // Test GetAllTableNames + actualTables := testPostgresSource.DB().GetAllTableNames() + expectedTables := []*sqlname.SourceName{ + sqlname.NewSourceName("test_schema", "foo"), + sqlname.NewSourceName("test_schema", "bar"), + sqlname.NewSourceName("test_schema", "non_pk1"), + } + assert.Equal(t, len(expectedTables), len(actualTables), "Expected number of tables to match") + testutils.AssertEqualSourceNameSlices(t, expectedTables, actualTables) +} + +func TestPostgresGetTableToUniqueKeyColumnsMap(t *testing.T) { + testPostgresSource.TestContainer.ExecuteSqls( + `CREATE SCHEMA test_schema;`, + `CREATE TABLE test_schema.unique_table ( + id SERIAL PRIMARY KEY, + email VARCHAR(255) UNIQUE, + phone VARCHAR(20) UNIQUE, + address VARCHAR(255) UNIQUE + );`, + `INSERT INTO test_schema.unique_table (email, phone, address) VALUES + ('john@example.com', '1234567890', '123 Elm Street'), + ('jane@example.com', '0987654321', '456 Oak Avenue');`, + `CREATE TABLE test_schema.another_unique_table ( + user_id SERIAL PRIMARY KEY, + username VARCHAR(50) UNIQUE, + age INT + );`, + `CREATE UNIQUE INDEX idx_age ON test_schema.another_unique_table(age);`, + `INSERT INTO test_schema.another_unique_table (username, age) VALUES + ('user1', 30), + ('user2', 40);`) + defer testPostgresSource.TestContainer.ExecuteSqls(`DROP SCHEMA test_schema CASCADE;`) + + uniqueTablesList := []sqlname.NameTuple{ + {CurrentName: sqlname.NewObjectName("postgresql", "test_schema", "test_schema", "unique_table")}, + {CurrentName: sqlname.NewObjectName("postgresql", "test_schema", "test_schema", "another_unique_table")}, + } + + actualUniqKeys, err := testPostgresSource.DB().GetTableToUniqueKeyColumnsMap(uniqueTablesList) + if err != nil { + t.Fatalf("Error retrieving unique keys: %v", err) + } + + expectedUniqKeys := map[string][]string{ + "test_schema.unique_table": {"email", "phone", "address"}, + "test_schema.another_unique_table": {"username", "age"}, + } + + // Compare the maps by iterating over each table and asserting the columns list + for table, expectedColumns := range expectedUniqKeys { + actualColumns, exists := actualUniqKeys[table] + if !exists { + t.Errorf("Expected table %s not found in uniqueKeys", table) + } + + testutils.AssertEqualStringSlices(t, expectedColumns, actualColumns) + } +} + +func TestPostgresGetNonPKTables(t *testing.T) { + testPostgresSource.TestContainer.ExecuteSqls( + `CREATE SCHEMA test_schema;`, + `CREATE TABLE test_schema.table1 ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) + );`, + `CREATE TABLE test_schema.table2 ( + id SERIAL PRIMARY KEY, + email VARCHAR(100) + );`, + `CREATE TABLE test_schema.non_pk1( + id INT, + name VARCHAR(255) + );`, + `CREATE TABLE test_schema.non_pk2( + id INT, + name VARCHAR(255) + );`) + defer testPostgresSource.TestContainer.ExecuteSqls(`DROP SCHEMA test_schema CASCADE;`) + + actualTables, err := testPostgresSource.DB().GetNonPKTables() + assert.NilError(t, err, "Expected nil but non nil error: %v", err) + + expectedTables := []string{`test_schema."non_pk2"`, `test_schema."non_pk1"`} // func returns table.Qualified.Quoted + testutils.AssertEqualStringSlices(t, expectedTables, actualTables) +} diff --git a/yb-voyager/src/srcdb/source.go b/yb-voyager/src/srcdb/source.go index 945f2fbf77..605536c9fb 100644 --- a/yb-voyager/src/srcdb/source.go +++ b/yb-voyager/src/srcdb/source.go @@ -83,6 +83,10 @@ func (s *Source) GetOracleHome() string { } } +func (s *Source) GetSchemaList() []string { + return strings.Split(s.Schema, "|") +} + func (s *Source) IsOracleCDBSetup() bool { return (s.CDBName != "" || s.CDBTNSAlias != "" || s.CDBSid != "") } diff --git a/yb-voyager/src/srcdb/srcdb.go b/yb-voyager/src/srcdb/srcdb.go index 6ff1934e7d..aac963ff6e 100644 --- a/yb-voyager/src/srcdb/srcdb.go +++ b/yb-voyager/src/srcdb/srcdb.go @@ -31,9 +31,8 @@ type SourceDB interface { Disconnect() CheckSchemaExists() bool GetConnectionUriWithoutPassword() string - GetTableRowCount(tableName sqlname.NameTuple) int64 + GetTableRowCount(tableName sqlname.NameTuple) (int64, error) GetTableApproxRowCount(tableName sqlname.NameTuple) int64 - CheckRequiredToolsAreInstalled() GetVersion() string GetAllTableNames() []*sqlname.SourceName GetAllTableNamesRaw(schemaName string) ([]string, error) @@ -54,13 +53,13 @@ type SourceDB interface { GetTableToUniqueKeyColumnsMap(tableList []sqlname.NameTuple) (map[string][]string, error) ClearMigrationState(migrationUUID uuid.UUID, exportDir string) error GetNonPKTables() ([]string, error) - ValidateTablesReadyForLiveMigration(tableList []sqlname.NameTuple) error GetDatabaseSize() (int64, error) CheckSourceDBVersion(exportType string) error - GetMissingExportSchemaPermissions() ([]string, error) - GetMissingExportDataPermissions(exportType string) ([]string, error) - GetMissingAssessMigrationPermissions() ([]string, error) + GetMissingExportSchemaPermissions(queryTableList string) ([]string, error) + GetMissingExportDataPermissions(exportType string, finalTableList []sqlname.NameTuple) ([]string, error) + GetMissingAssessMigrationPermissions() ([]string, bool, error) CheckIfReplicationSlotsAreAvailable() (isAvailable bool, usedCount int, maxCount int, err error) + GetSchemasMissingUsagePermissions() ([]string, error) } func newSourceDB(source *Source) SourceDB { @@ -85,7 +84,7 @@ func IsTableEmpty(db *sql.DB, query string) bool { return true } if err != nil { - utils.ErrExit("Failed to query %q to check table is empty: %s", query, err) + utils.ErrExit("Failed to query: %q to check table is empty: %s", query, err) } return false } diff --git a/yb-voyager/src/srcdb/utils.go b/yb-voyager/src/srcdb/utils.go index 30f03df904..12aaf374a3 100644 --- a/yb-voyager/src/srcdb/utils.go +++ b/yb-voyager/src/srcdb/utils.go @@ -17,25 +17,10 @@ limitations under the License. import ( "fmt" "os" - "os/exec" "path" "strings" - - log "github.com/sirupsen/logrus" - - "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" ) -func checkTools(tools ...string) { - for _, tool := range tools { - execPath, err := exec.LookPath(tool) - if err != nil { - utils.ErrExit("%q not found. Check if it is installed and included in the path.", tool) - } - log.Infof("Found %q", execPath) - } -} - func findAllExecutablesInPath(executableName string) ([]string, error) { pathString := os.Getenv("PATH") if pathString == "" { diff --git a/yb-voyager/src/srcdb/yugabytedb.go b/yb-voyager/src/srcdb/yugabytedb.go index fc2a6c7a71..96daa1307b 100644 --- a/yb-voyager/src/srcdb/yugabytedb.go +++ b/yb-voyager/src/srcdb/yugabytedb.go @@ -52,7 +52,7 @@ func newYugabyteDB(s *Source) *YugabyteDB { func (yb *YugabyteDB) Connect() error { db, err := sql.Open("pgx", yb.getConnectionUri()) - db.SetMaxOpenConns(1) + db.SetMaxOpenConns(yb.source.NumConnections) db.SetConnMaxIdleTime(5 * time.Minute) yb.db = db return err @@ -70,20 +70,16 @@ func (yb *YugabyteDB) Disconnect() { } } -func (yb *YugabyteDB) CheckRequiredToolsAreInstalled() { - checkTools("strings") -} - -func (yb *YugabyteDB) GetTableRowCount(tableName sqlname.NameTuple) int64 { +func (yb *YugabyteDB) GetTableRowCount(tableName sqlname.NameTuple) (int64, error) { var rowCount int64 query := fmt.Sprintf("select count(*) from %s", tableName.ForUserQuery()) log.Infof("Querying row count of table %q", tableName) err := yb.db.QueryRow(query).Scan(&rowCount) if err != nil { - utils.ErrExit("Failed to query %q for row count of %q: %s", query, tableName, err) + return 0, fmt.Errorf("query %q for row count of %q: %w", query, tableName, err) } log.Infof("Table %q has %v rows.", tableName, rowCount) - return rowCount + return rowCount, nil } func (yb *YugabyteDB) GetTableApproxRowCount(tableName sqlname.NameTuple) int64 { @@ -94,7 +90,7 @@ func (yb *YugabyteDB) GetTableApproxRowCount(tableName sqlname.NameTuple) int64 log.Infof("Querying '%s' approx row count of table %q", query, tableName.String()) err := yb.db.QueryRow(query).Scan(&approxRowCount) if err != nil { - utils.ErrExit("Failed to query %q for approx row count of %q: %s", query, tableName.String(), err) + utils.ErrExit("Failed to query: %q for approx row count of %q: %s", query, tableName.String(), err) } log.Infof("Table %q has approx %v rows.", tableName.String(), approxRowCount) @@ -110,7 +106,7 @@ func (yb *YugabyteDB) GetVersion() string { query := "SELECT setting from pg_settings where name = 'server_version'" err := yb.db.QueryRow(query).Scan(&version) if err != nil { - utils.ErrExit("run query %q on source: %s", query, err) + utils.ErrExit("run query: %q on source: %s", query, err) } yb.source.DBVersion = version return version @@ -135,7 +131,7 @@ func (yb *YugabyteDB) checkSchemasExists() []string { FROM information_schema.schemata where schema_name IN (%s);`, querySchemaList) rows, err := yb.db.Query(chkSchemaExistsQuery) if err != nil { - utils.ErrExit("error in querying(%q) source database for checking mentioned schema(s) present or not: %v\n", chkSchemaExistsQuery, err) + utils.ErrExit("error in querying source database for checking mentioned schema(s) present or not: %q: %v\n", chkSchemaExistsQuery, err) } var listOfSchemaPresent []string var tableSchemaName string @@ -156,16 +152,25 @@ func (yb *YugabyteDB) checkSchemasExists() []string { schemaNotPresent := utils.SetDifference(trimmedList, listOfSchemaPresent) if len(schemaNotPresent) > 0 { - utils.ErrExit("Following schemas are not present in source database %v, please provide a valid schema list.\n", schemaNotPresent) + utils.ErrExit("Following schemas are not present in source database: %v, please provide a valid schema list.\n", schemaNotPresent) } return trimmedList } func (yb *YugabyteDB) GetAllTableNamesRaw(schemaName string) ([]string, error) { - query := fmt.Sprintf(`SELECT table_name - FROM information_schema.tables - WHERE table_type = 'BASE TABLE' AND - table_schema = '%s';`, schemaName) + // Information schema requires select permission on the tables to query the tables. However, pg_catalog does not require any permission. + // So, we are using pg_catalog to get the table names. + query := fmt.Sprintf(` + SELECT + c.relname AS table_name + FROM + pg_catalog.pg_class c + JOIN + pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE + c.relkind IN ('r', 'p') -- 'r' for regular tables, 'p' for partitioned tables + AND n.nspname = '%s'; + `, schemaName) rows, err := yb.db.Query(query) if err != nil { @@ -195,14 +200,24 @@ func (yb *YugabyteDB) GetAllTableNamesRaw(schemaName string) ([]string, error) { func (yb *YugabyteDB) GetAllTableNames() []*sqlname.SourceName { schemaList := yb.checkSchemasExists() querySchemaList := "'" + strings.Join(schemaList, "','") + "'" - query := fmt.Sprintf(`SELECT table_schema, table_name - FROM information_schema.tables - WHERE table_type = 'BASE TABLE' AND - table_schema IN (%s);`, querySchemaList) + // Information schema requires select permission on the tables to query the tables. However, pg_catalog does not require any permission. + // So, we are using pg_catalog to get the table names. + query := fmt.Sprintf(` + SELECT + n.nspname AS table_schema, + c.relname AS table_name + FROM + pg_catalog.pg_class c + JOIN + pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE + c.relkind IN ('r', 'p') -- 'r' for regular tables, 'p' for partitioned tables + AND n.nspname IN (%s); + `, querySchemaList) rows, err := yb.db.Query(query) if err != nil { - utils.ErrExit("error in querying(%q) YB database for table names: %v\n", query, err) + utils.ErrExit("error in querying YB database for table names: %q: %v\n", query, err) } defer func() { closeErr := rows.Close() @@ -264,10 +279,6 @@ func (yb *YugabyteDB) ExportSchema(exportDir string, schemaDir string) { panic("not implemented") } -func (yb *YugabyteDB) ValidateTablesReadyForLiveMigration(tableList []sqlname.NameTuple) error { - panic("not implemented") -} - func (yb *YugabyteDB) GetIndexesInfo() []utils.IndexInfo { return nil } @@ -336,7 +347,7 @@ func (yb *YugabyteDB) GetAllSequences() []string { query := fmt.Sprintf(`SELECT sequence_name FROM information_schema.sequences where sequence_schema IN (%s);`, querySchemaList) rows, err := yb.db.Query(query) if err != nil { - utils.ErrExit("error in querying(%q) source database for sequence names: %v\n", query, err) + utils.ErrExit("error in querying source database for sequence names: %q: %v\n", query, err) } defer func() { closeErr := rows.Close() @@ -423,7 +434,7 @@ func (yb *YugabyteDB) getAllUserDefinedTypesInSchema(schemaName string) []string );`, schemaName, schemaName, schemaName) rows, err := yb.db.Query(query) if err != nil { - utils.ErrExit("error in querying(%q) source database for enum types: %v\n", query, err) + utils.ErrExit("error in querying source database for enum types: %q: %v\n", query, err) } defer func() { closeErr := rows.Close() @@ -450,7 +461,7 @@ func (yb *YugabyteDB) getTypesOfAllArraysInATable(schemaName, tableName string) AND data_type = 'ARRAY';`, schemaName, tableName) rows, err := yb.db.Query(query) if err != nil { - utils.ErrExit("error in querying(%q) source database for array types: %v\n", query, err) + utils.ErrExit("error in querying source database for array types: %q: %v\n", query, err) } defer func() { closeErr := rows.Close() @@ -530,7 +541,7 @@ func (yb *YugabyteDB) FilterEmptyTables(tableList []sqlname.NameTuple) ([]sqlnam if err == sql.ErrNoRows { empty = true } else { - utils.ErrExit("error in querying table %v: %v", tableName, err) + utils.ErrExit("error in querying table: %v: %v", tableName, err) } } if !empty { @@ -591,7 +602,7 @@ func (yb *YugabyteDB) filterUnsupportedUserDefinedDatatypes(tableName sqlname.Na a.attnum;`, tname, sname) rows, err := yb.db.Query(query) if err != nil { - utils.ErrExit("error in querying(%q) source database for user defined columns: %v\n", query, err) + utils.ErrExit("error in querying source database for user defined columns: %q: %v\n", query, err) } defer func() { closeErr := rows.Close() @@ -659,7 +670,7 @@ func (yb *YugabyteDB) ParentTableOfPartition(table sqlname.NameTuple) string { err := yb.db.QueryRow(query).Scan(&parentTable) if err != sql.ErrNoRows && err != nil { - utils.ErrExit("Error in query=%s for parent tablename of table=%s: %v", query, table, err) + utils.ErrExit("Error in query for parent tablename of table: %q: %s: %v", query, table, err) } return parentTable @@ -677,7 +688,7 @@ func (yb *YugabyteDB) GetColumnToSequenceMap(tableList []sqlname.NameTuple) map[ rows, err := yb.db.Query(query) if err != nil { log.Infof("Query to find column to sequence mapping: %s", query) - utils.ErrExit("Error in querying for sequences in table=%s: %v", table, err) + utils.ErrExit("Error in querying for sequences in table: %s: %v", table, err) } defer func() { closeErr := rows.Close() @@ -688,7 +699,7 @@ func (yb *YugabyteDB) GetColumnToSequenceMap(tableList []sqlname.NameTuple) map[ for rows.Next() { err := rows.Scan(&columeName, &sequenceName, &schemaName) if err != nil { - utils.ErrExit("Error in scanning for sequences in table=%s: %v", table, err) + utils.ErrExit("Error in scanning for sequences in table: %s: %v", table, err) } qualifiedColumnName := fmt.Sprintf("%s.%s", table.AsQualifiedCatalogName(), columeName) // quoting sequence name as it can be case sensitive - required during import data restore sequences @@ -696,7 +707,7 @@ func (yb *YugabyteDB) GetColumnToSequenceMap(tableList []sqlname.NameTuple) map[ } err = rows.Close() if err != nil { - utils.ErrExit("close rows for table %s query %q: %s", table.String(), query, err) + utils.ErrExit("close rows for table: %s query: %q: %s", table.String(), query, err) } } @@ -709,7 +720,7 @@ func (yb *YugabyteDB) GetServers() []string { YB_SERVERS_QUERY := "SELECT host FROM yb_servers()" rows, err := yb.db.Query(YB_SERVERS_QUERY) if err != nil { - utils.ErrExit("error in querying(%q) source database for yb_servers: %v\n", YB_SERVERS_QUERY, err) + utils.ErrExit("error in querying source database for yb_servers: %q: %v\n", YB_SERVERS_QUERY, err) } defer func() { closeErr := rows.Close() @@ -745,7 +756,7 @@ WHERE parent.relname='%s' AND nmsp_parent.nspname = '%s' `, tname, sname) rows, err := yb.db.Query(query) if err != nil { log.Errorf("failed to list partitions of table %s: query = [ %s ], error = %s", tableName, query, err) - utils.ErrExit("failed to find the partitions for table %s:", tableName, err) + utils.ErrExit("failed to find the partitions for table: %s: %v", tableName, err) } defer func() { closeErr := rows.Close() @@ -757,12 +768,12 @@ WHERE parent.relname='%s' AND nmsp_parent.nspname = '%s' `, tname, sname) var childSchema, childTable string err := rows.Scan(&childSchema, &childTable) if err != nil { - utils.ErrExit("Error in scanning for child partitions of table=%s: %v", tableName, err) + utils.ErrExit("Error in scanning for child partitions of table: %s: %v", tableName, err) } partitions = append(partitions, fmt.Sprintf(`%s.%s`, childSchema, childTable)) } if rows.Err() != nil { - utils.ErrExit("Error in scanning for child partitions of table=%s: %v", tableName, rows.Err()) + utils.ErrExit("Error in scanning for child partitions of table: %s: %v", tableName, rows.Err()) } return partitions } @@ -876,7 +887,7 @@ func (yb *YugabyteDB) GetNonPKTables() ([]string, error) { query := fmt.Sprintf(PG_QUERY_TO_CHECK_IF_TABLE_HAS_PK, querySchemaList) rows, err := yb.db.Query(query) if err != nil { - utils.ErrExit("error in querying(%q) source database for primary key: %v\n", query, err) + utils.ErrExit("error in querying source database for primary key: %q: %v\n", query, err) } defer func() { closeErr := rows.Close() @@ -1033,18 +1044,22 @@ func (yb *YugabyteDB) CheckSourceDBVersion(exportType string) error { return nil } -func (yb *YugabyteDB) GetMissingExportSchemaPermissions() ([]string, error) { +func (yb *YugabyteDB) GetMissingExportSchemaPermissions(queryTableList string) ([]string, error) { return nil, nil } -func (yb *YugabyteDB) GetMissingExportDataPermissions(exportType string) ([]string, error) { +func (yb *YugabyteDB) GetMissingExportDataPermissions(exportType string, finalTableList []sqlname.NameTuple) ([]string, error) { return nil, nil } -func (yb *YugabyteDB) GetMissingAssessMigrationPermissions() ([]string, error) { - return nil, nil +func (yb *YugabyteDB) GetMissingAssessMigrationPermissions() ([]string, bool, error) { + return nil, false, nil } func (yb *YugabyteDB) CheckIfReplicationSlotsAreAvailable() (isAvailable bool, usedCount int, maxCount int, err error) { return checkReplicationSlotsForPGAndYB(yb.db) } + +func (yb *YugabyteDB) GetSchemasMissingUsagePermissions() ([]string, error) { + return nil, nil +} diff --git a/yb-voyager/src/srcdb/yugbaytedb_test.go b/yb-voyager/src/srcdb/yugbaytedb_test.go new file mode 100644 index 0000000000..4a7cf8e6f9 --- /dev/null +++ b/yb-voyager/src/srcdb/yugbaytedb_test.go @@ -0,0 +1,136 @@ +//go:build integration + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package srcdb + +import ( + "testing" + + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" + "gotest.tools/assert" +) + +func TestYugabyteGetAllTableNames(t *testing.T) { + testYugabyteDBSource.TestContainer.ExecuteSqls( + `CREATE SCHEMA test_schema;`, + `CREATE TABLE test_schema.foo ( + id INT PRIMARY KEY, + name VARCHAR + );`, + `INSERT into test_schema.foo values (1, 'abc'), (2, 'xyz');`, + `CREATE TABLE test_schema.bar ( + id INT PRIMARY KEY, + name VARCHAR + );`, + `INSERT into test_schema.bar values (1, 'abc'), (2, 'xyz');`, + `CREATE TABLE test_schema.non_pk1( + id INT, + name VARCHAR(255) + );`) + defer testYugabyteDBSource.TestContainer.ExecuteSqls(`DROP SCHEMA test_schema CASCADE;`) + + sqlname.SourceDBType = "postgresql" + testYugabyteDBSource.Source.Schema = "test_schema" + + // Test GetAllTableNames + actualTables := testYugabyteDBSource.DB().GetAllTableNames() + expectedTables := []*sqlname.SourceName{ + sqlname.NewSourceName("test_schema", "foo"), + sqlname.NewSourceName("test_schema", "bar"), + sqlname.NewSourceName("test_schema", "non_pk1"), + } + assert.Equal(t, len(expectedTables), len(actualTables), "Expected number of tables to match") + testutils.AssertEqualSourceNameSlices(t, expectedTables, actualTables) +} + +func TestYugabyteGetTableToUniqueKeyColumnsMap(t *testing.T) { + testYugabyteDBSource.TestContainer.ExecuteSqls( + `CREATE SCHEMA test_schema;`, + `CREATE TABLE test_schema.unique_table ( + id SERIAL PRIMARY KEY, + email VARCHAR(255) UNIQUE, + phone VARCHAR(20) UNIQUE, + address VARCHAR(255) UNIQUE + );`, + `INSERT INTO test_schema.unique_table (email, phone, address) VALUES + ('john@example.com', '1234567890', '123 Elm Street'), + ('jane@example.com', '0987654321', '456 Oak Avenue');`, + `CREATE TABLE test_schema.another_unique_table ( + user_id SERIAL PRIMARY KEY, + username VARCHAR(50) UNIQUE, + age INT + );`, + `CREATE UNIQUE INDEX idx_age ON test_schema.another_unique_table(age);`, + `INSERT INTO test_schema.another_unique_table (username, age) VALUES + ('user1', 30), + ('user2', 40);`) + defer testYugabyteDBSource.TestContainer.ExecuteSqls(`DROP SCHEMA test_schema CASCADE;`) + + uniqueTablesList := []sqlname.NameTuple{ + {CurrentName: sqlname.NewObjectName("postgresql", "test_schema", "test_schema", "unique_table")}, + {CurrentName: sqlname.NewObjectName("postgresql", "test_schema", "test_schema", "another_unique_table")}, + } + + actualUniqKeys, err := testYugabyteDBSource.DB().GetTableToUniqueKeyColumnsMap(uniqueTablesList) + if err != nil { + t.Fatalf("Error retrieving unique keys: %v", err) + } + + expectedUniqKeys := map[string][]string{ + "test_schema.unique_table": {"email", "phone", "address"}, + "test_schema.another_unique_table": {"username", "age"}, + } + + // Compare the maps by iterating over each table and asserting the columns list + for table, expectedColumns := range expectedUniqKeys { + actualColumns, exists := actualUniqKeys[table] + if !exists { + t.Errorf("Expected table %s not found in uniqueKeys", table) + } + + testutils.AssertEqualStringSlices(t, expectedColumns, actualColumns) + } +} + +func TestYugabyteGetNonPKTables(t *testing.T) { + testYugabyteDBSource.TestContainer.ExecuteSqls( + `CREATE SCHEMA test_schema;`, + `CREATE TABLE test_schema.table1 ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) + );`, + `CREATE TABLE test_schema.table2 ( + id SERIAL PRIMARY KEY, + email VARCHAR(100) + );`, + `CREATE TABLE test_schema.non_pk1( + id INT, + name VARCHAR(255) + );`, + `CREATE TABLE test_schema.non_pk2( + id INT, + name VARCHAR(255) + );`) + defer testYugabyteDBSource.TestContainer.ExecuteSqls(`DROP SCHEMA test_schema CASCADE;`) + + actualTables, err := testYugabyteDBSource.DB().GetNonPKTables() + assert.NilError(t, err, "Expected nil but non nil error: %v", err) + + expectedTables := []string{`test_schema."non_pk2"`, `test_schema."non_pk1"`} // func returns table.Qualified.Quoted + testutils.AssertEqualStringSlices(t, expectedTables, actualTables) +} diff --git a/yb-voyager/src/tgtdb/attr_name_registry_test.go b/yb-voyager/src/tgtdb/attr_name_registry_test.go index a2a4edbf6e..3655e477d0 100644 --- a/yb-voyager/src/tgtdb/attr_name_registry_test.go +++ b/yb-voyager/src/tgtdb/attr_name_registry_test.go @@ -1,14 +1,32 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ package tgtdb import ( "testing" + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" ) func TestAttributeNameRegistry_QuoteAttributeName_POSTGRES(t *testing.T) { reg := NewAttributeNameRegistry(nil, &TargetConf{TargetDBType: POSTGRESQL}) - o := sqlname.NewObjectName(sqlname.POSTGRESQL, "public", "public", "test_table") + o := sqlname.NewObjectName(constants.POSTGRESQL, "public", "public", "test_table") tableNameTup := sqlname.NameTuple{SourceName: o, TargetName: o, CurrentName: o} // Mocking GetListOfTableAttributes to return a list of attributes @@ -81,7 +99,7 @@ func TestAttributeNameRegistry_QuoteAttributeName_POSTGRES(t *testing.T) { } func TestAttributeNameRegistry_QuoteAttributeName_ORACLE(t *testing.T) { reg := NewAttributeNameRegistry(nil, &TargetConf{TargetDBType: ORACLE}) - o := sqlname.NewObjectName(sqlname.ORACLE, "TEST", "TEST", "test_table") + o := sqlname.NewObjectName(constants.ORACLE, "TEST", "TEST", "test_table") tableNameTup := sqlname.NameTuple{SourceName: o, TargetName: o, CurrentName: o} // Mocking GetListOfTableAttributes to return a list of attributes diff --git a/yb-voyager/src/tgtdb/conn_pool_test.go b/yb-voyager/src/tgtdb/conn_pool_test.go index 59fd4dad44..55053950d1 100644 --- a/yb-voyager/src/tgtdb/conn_pool_test.go +++ b/yb-voyager/src/tgtdb/conn_pool_test.go @@ -1,3 +1,5 @@ +//go:build integration + /* Copyright (c) YugabyteDB, Inc. @@ -23,44 +25,18 @@ import ( "time" "github.com/jackc/pgx/v4" + log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - - embeddedpostgres "github.com/fergusstrange/embedded-postgres" ) -// var postgres *embeddedpostgres.EmbeddedPostgres - -func setupPostgres(t *testing.T) *embeddedpostgres.EmbeddedPostgres { - postgres := embeddedpostgres.NewDatabase(embeddedpostgres.DefaultConfig(). - Username("postgres"). - Password("postgres"). - Database("test"). - Port(9876). - StartTimeout(30 * time.Second)) - err := postgres.Start() - if err != nil { - t.Fatal(err) - } - return postgres -} - -func shutdownPostgres(postgres *embeddedpostgres.EmbeddedPostgres, t *testing.T) { - err := postgres.Stop() - if err != nil { - t.Fatal(err) - } -} - func TestBasic(t *testing.T) { - postgres := setupPostgres(t) - defer shutdownPostgres(postgres, t) // GIVEN: a conn pool of size 10. size := 10 connParams := &ConnectionParams{ NumConnections: size, NumMaxConnections: size, - ConnUriList: []string{fmt.Sprintf("postgresql://postgres:postgres@localhost:%d/test", 9876)}, + ConnUriList: []string{testYugabyteDBTarget.GetConnectionString()}, SessionInitScript: []string{}, } pool := NewConnectionPool(connParams) @@ -88,8 +64,6 @@ func dummyProcess(pool *ConnectionPool, milliseconds int, wg *sync.WaitGroup) { } func TestIncreaseConnectionsUptoMax(t *testing.T) { - postgres := setupPostgres(t) - defer shutdownPostgres(postgres, t) // GIVEN: a conn pool of size 10, with max 20 connections. size := 10 maxSize := 20 @@ -97,7 +71,7 @@ func TestIncreaseConnectionsUptoMax(t *testing.T) { connParams := &ConnectionParams{ NumConnections: size, NumMaxConnections: maxSize, - ConnUriList: []string{fmt.Sprintf("postgresql://postgres:postgres@localhost:%d/test", 9876)}, + ConnUriList: []string{testYugabyteDBTarget.GetConnectionString()}, SessionInitScript: []string{}, } pool := NewConnectionPool(connParams) @@ -105,7 +79,7 @@ func TestIncreaseConnectionsUptoMax(t *testing.T) { // WHEN: multiple goroutines acquire connection, perform some operation // and release connection back to pool - // WHEN: we keep increasing the connnections upto the max.. + // WHEN: we keep increasing the connections upto the max.. var wg sync.WaitGroup wg.Add(1) @@ -133,8 +107,6 @@ func TestIncreaseConnectionsUptoMax(t *testing.T) { } func TestDecreaseConnectionsUptoMin(t *testing.T) { - postgres := setupPostgres(t) - defer shutdownPostgres(postgres, t) // GIVEN: a conn pool of size 10, with max 20 connections. size := 10 maxSize := 20 @@ -142,7 +114,7 @@ func TestDecreaseConnectionsUptoMin(t *testing.T) { connParams := &ConnectionParams{ NumConnections: size, NumMaxConnections: maxSize, - ConnUriList: []string{fmt.Sprintf("postgresql://postgres:postgres@localhost:%d/test", 9876)}, + ConnUriList: []string{testYugabyteDBTarget.GetConnectionString()}, SessionInitScript: []string{}, } pool := NewConnectionPool(connParams) @@ -178,8 +150,6 @@ func TestDecreaseConnectionsUptoMin(t *testing.T) { } func TestUpdateConnectionsRandom(t *testing.T) { - postgres := setupPostgres(t) - defer shutdownPostgres(postgres, t) // GIVEN: a conn pool of size 10, with max 20 connections. size := 10 maxSize := 20 @@ -187,7 +157,7 @@ func TestUpdateConnectionsRandom(t *testing.T) { connParams := &ConnectionParams{ NumConnections: size, NumMaxConnections: maxSize, - ConnUriList: []string{fmt.Sprintf("postgresql://postgres:postgres@localhost:%d/test", 9876)}, + ConnUriList: []string{testYugabyteDBTarget.GetConnectionString()}, SessionInitScript: []string{}, } pool := NewConnectionPool(connParams) @@ -207,7 +177,7 @@ func TestUpdateConnectionsRandom(t *testing.T) { if pool.size+randomNumber < 1 || (pool.size+randomNumber > pool.params.NumMaxConnections) { continue } - fmt.Printf("i=%d, updating by %d. New pool size expected = %d\n", i, randomNumber, *expectedFinalSize+randomNumber) + log.Infof("i=%d, updating by %d. New pool size expected = %d\n", i, randomNumber, *expectedFinalSize+randomNumber) err := pool.UpdateNumConnections(randomNumber) assert.NoError(t, err) time.Sleep(10 * time.Millisecond) diff --git a/yb-voyager/src/tgtdb/main_test.go b/yb-voyager/src/tgtdb/main_test.go new file mode 100644 index 0000000000..ea95391485 --- /dev/null +++ b/yb-voyager/src/tgtdb/main_test.go @@ -0,0 +1,141 @@ +//go:build integration + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package tgtdb + +import ( + "context" + "os" + "testing" + + _ "github.com/godror/godror" + _ "github.com/jackc/pgx/v5/stdlib" + log "github.com/sirupsen/logrus" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" + testcontainers "github.com/yugabyte/yb-voyager/yb-voyager/test/containers" +) + +type TestDB struct { + testcontainers.TestContainer + TargetDB +} + +var ( + testPostgresTarget *TestDB + testOracleTarget *TestDB + testYugabyteDBTarget *TestDB +) + +func TestMain(m *testing.M) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + postgresContainer := testcontainers.NewTestContainer("postgresql", nil) + err := postgresContainer.Start(ctx) + if err != nil { + utils.ErrExit("Failed to start postgres container: %v", err) + } + host, port, err := postgresContainer.GetHostPort() + if err != nil { + utils.ErrExit("%v", err) + } + testPostgresTarget = &TestDB{ + TestContainer: postgresContainer, + TargetDB: NewTargetDB(&TargetConf{ + TargetDBType: "postgresql", + DBVersion: postgresContainer.GetConfig().DBVersion, + User: postgresContainer.GetConfig().User, + Password: postgresContainer.GetConfig().Password, + Schema: postgresContainer.GetConfig().Schema, + DBName: postgresContainer.GetConfig().DBName, + Host: host, + Port: port, + SSLMode: "disable", + }), + } + + err = testPostgresTarget.Init() + if err != nil { + utils.ErrExit("Failed to connect to postgres database: %w", err) + } + defer testPostgresTarget.Finalize() + + // oracleContainer := testcontainers.NewTestContainer("oracle", nil) + // _ = oracleContainer.Start(ctx) + // host, port, err = oracleContainer.GetHostPort() + // if err != nil { + // utils.ErrExit("%v", err) + // } + // testOracleTarget = &TestDB2{ + // Container: oracleContainer, + // TargetDB: NewTargetDB(&TargetConf{ + // TargetDBType: "oracle", + // DBVersion: oracleContainer.GetConfig().DBVersion, + // User: oracleContainer.GetConfig().User, + // Password: oracleContainer.GetConfig().Password, + // Schema: oracleContainer.GetConfig().Schema, + // DBName: oracleContainer.GetConfig().DBName, + // Host: host, + // Port: port, + // }), + // } + + // err = testOracleTarget.Init() + // if err != nil { + // utils.ErrExit("Failed to connect to oracle database: %w", err) + // } + // defer testOracleTarget.Finalize() + + yugabytedbContainer := testcontainers.NewTestContainer("yugabytedb", nil) + err = yugabytedbContainer.Start(ctx) + if err != nil { + utils.ErrExit("Failed to start yugabytedb container: %v", err) + } + host, port, err = yugabytedbContainer.GetHostPort() + if err != nil { + utils.ErrExit("%v", err) + } + testYugabyteDBTarget = &TestDB{ + TestContainer: yugabytedbContainer, + TargetDB: NewTargetDB(&TargetConf{ + TargetDBType: "yugabytedb", + DBVersion: yugabytedbContainer.GetConfig().DBVersion, + User: yugabytedbContainer.GetConfig().User, + Password: yugabytedbContainer.GetConfig().Password, + Schema: yugabytedbContainer.GetConfig().Schema, + DBName: yugabytedbContainer.GetConfig().DBName, + Host: host, + Port: port, + }), + } + + err = testYugabyteDBTarget.Init() + if err != nil { + utils.ErrExit("Failed to connect to yugabytedb database: %w", err) + } + defer testYugabyteDBTarget.Finalize() + + // to avoid info level logs flooding the test output + log.SetLevel(log.WarnLevel) + + exitCode := m.Run() + + // cleaning up all the running containers + testcontainers.TerminateAllContainers() + + os.Exit(exitCode) +} diff --git a/yb-voyager/src/tgtdb/oracle.go b/yb-voyager/src/tgtdb/oracle.go index fa13b62da1..6b69f6869a 100644 --- a/yb-voyager/src/tgtdb/oracle.go +++ b/yb-voyager/src/tgtdb/oracle.go @@ -29,9 +29,11 @@ import ( "time" "github.com/google/uuid" + "github.com/samber/lo" log "github.com/sirupsen/logrus" "github.com/yugabyte/yb-voyager/yb-voyager/src/callhome" + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" "github.com/yugabyte/yb-voyager/yb-voyager/src/sqlldr" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" @@ -158,7 +160,7 @@ func (tdb *TargetOracleDB) GetVersion() string { // query sample output: Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production err := tdb.QueryRow(query).Scan(&version) if err != nil { - utils.ErrExit("run query %q on source: %s", query, err) + utils.ErrExit("run query: %q on source: %s", query, err) } return version } @@ -182,7 +184,7 @@ func (tdb *TargetOracleDB) GetNonEmptyTables(tables []sqlname.NameTuple) []sqlna stmt := fmt.Sprintf("SELECT COUNT(*) FROM %s", table.ForUserQuery()) err := tdb.QueryRow(stmt).Scan(&rowCount) if err != nil { - utils.ErrExit("run query %q on target: %s", stmt, err) + utils.ErrExit("run query: %q on target: %s", stmt, err) } if rowCount > 0 { result = append(result, table) @@ -192,6 +194,35 @@ func (tdb *TargetOracleDB) GetNonEmptyTables(tables []sqlname.NameTuple) []sqlna return result } +// There are some restrictions on TRUNCATE TABLE in oracle +// https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/TRUNCATE-TABLE.html#:~:text=cursors%20are%20invalidated.-,Restrictions%20on%20Truncating%20Tables,-This%20statement%20is +// Notable ones include: +// 1. You cannot truncate a table that has a foreign key constraint. This is not a problem because in our fall-forward workflow, +// we ask users to disable foreign key constraints before starting the migration. +// 2. You cannot truncate a table that is part of a cluster. This will fail. +// 3. You cannot truncate the parent table of a reference-partitioned table. This will fail. voyager does not support reference partitioned tables migration. +// +// Given that there are some cases where it might fail, we attempt to truncate all tables wherever possible, +// and accumulate all the errors and return them as a single error. +func (tdb *TargetOracleDB) TruncateTables(tables []sqlname.NameTuple) error { + tableNames := lo.Map(tables, func(nt sqlname.NameTuple, _ int) string { + return nt.ForUserQuery() + }) + var errors []error + + for _, tableName := range tableNames { + query := fmt.Sprintf("TRUNCATE TABLE %s", tableName) + _, err := tdb.Exec(query) + if err != nil { + errors = append(errors, fmt.Errorf("truncate table %q: %w", tableName, err)) + } + } + if len(errors) > 0 { + return fmt.Errorf("truncate tables: %v", errors) + } + return nil +} + func (tdb *TargetOracleDB) IsNonRetryableCopyError(err error) bool { return false } @@ -408,7 +439,7 @@ func (tdb *TargetOracleDB) setTargetSchema(conn *sql.Conn) { setSchemaQuery := fmt.Sprintf("ALTER SESSION SET CURRENT_SCHEMA = %s", tdb.tconf.Schema) _, err := conn.ExecContext(context.Background(), setSchemaQuery) if err != nil { - utils.ErrExit("run query %q on target %q to set schema: %s", setSchemaQuery, tdb.tconf.Host, err) + utils.ErrExit("run query: %q on target %q to set schema: %s", setSchemaQuery, tdb.tconf.Host, err) } } @@ -666,7 +697,7 @@ func (tdb *TargetOracleDB) isTableExists(nt sqlname.NameTuple) bool { func (tdb *TargetOracleDB) isQueryResultNonEmpty(query string) bool { rows, err := tdb.Query(query) if err != nil { - utils.ErrExit("error checking if query %s is empty: %v", query, err) + utils.ErrExit("error checking if query is empty: %q: %v", query, err) } defer rows.Close() @@ -687,7 +718,7 @@ func (tdb *TargetOracleDB) ClearMigrationState(migrationUUID uuid.UUID, exportDi tables := []sqlname.NameTuple{} for _, tableName := range tableNames { parts := strings.Split(tableName, ".") - objName := sqlname.NewObjectName(sqlname.ORACLE, "", parts[0], strings.ToUpper(parts[1])) + objName := sqlname.NewObjectName(constants.ORACLE, "", parts[0], strings.ToUpper(parts[1])) nt := sqlname.NameTuple{ CurrentName: objName, SourceName: objName, diff --git a/yb-voyager/src/tgtdb/postgres.go b/yb-voyager/src/tgtdb/postgres.go index f6c27223b2..53f3a78fee 100644 --- a/yb-voyager/src/tgtdb/postgres.go +++ b/yb-voyager/src/tgtdb/postgres.go @@ -29,10 +29,13 @@ import ( "github.com/google/uuid" "github.com/jackc/pgconn" "github.com/jackc/pgx/v4" + pgconn5 "github.com/jackc/pgx/v5/pgconn" _ "github.com/jackc/pgx/v5/stdlib" + "github.com/samber/lo" log "github.com/sirupsen/logrus" "github.com/yugabyte/yb-voyager/yb-voyager/src/callhome" + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" "github.com/yugabyte/yb-voyager/yb-voyager/src/namereg" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" @@ -75,6 +78,12 @@ func (pg *TargetPostgreSQL) Exec(query string) (int64, error) { res, err := pg.db.Exec(query) if err != nil { + var pgErr *pgconn5.PgError + if errors.As(err, &pgErr) { + if pgErr.Hint != "" || pgErr.Detail != "" { + return rowsAffected, fmt.Errorf("run query %q on target %q: %w \nHINT: %s\nDETAIL: %s", query, pg.tconf.Host, err, pgErr.Hint, pgErr.Detail) + } + } return rowsAffected, fmt.Errorf("run query %q on target %q: %w", query, pg.tconf.Host, err) } rowsAffected, err = res.RowsAffected() @@ -266,6 +275,9 @@ func (pg *TargetPostgreSQL) CreateVoyagerSchema() error { rows_imported BIGINT, PRIMARY KEY (migration_uuid, data_file_name, batch_number, schema_name, table_name) );`, BATCH_METADATA_TABLE_NAME), + fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN data_file_name TYPE TEXT;`, BATCH_METADATA_TABLE_NAME), + fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN schema_name TYPE TEXT;`, BATCH_METADATA_TABLE_NAME), + fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN table_name TYPE TEXT;`, BATCH_METADATA_TABLE_NAME), fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %s ( migration_uuid uuid, channel_no INT, @@ -283,6 +295,7 @@ func (pg *TargetPostgreSQL) CreateVoyagerSchema() error { num_deletes BIGINT, num_updates BIGINT, PRIMARY KEY (migration_uuid, table_name, channel_no));`, EVENTS_PER_TABLE_METADATA_TABLE_NAME), + fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN table_name TYPE TEXT;`, EVENTS_PER_TABLE_METADATA_TABLE_NAME), } maxAttempts := 12 @@ -323,7 +336,7 @@ func (pg *TargetPostgreSQL) GetNonEmptyTables(tables []sqlname.NameTuple) []sqln continue } if err != nil { - utils.ErrExit("failed to check whether table %q empty: %s", table, err) + utils.ErrExit("failed to check whether table is empty: %q: %s", table, err) } result = append(result, table) } @@ -331,6 +344,19 @@ func (pg *TargetPostgreSQL) GetNonEmptyTables(tables []sqlname.NameTuple) []sqln return result } +func (pg *TargetPostgreSQL) TruncateTables(tables []sqlname.NameTuple) error { + tableNames := lo.Map(tables, func(nt sqlname.NameTuple, _ int) string { + return nt.ForUserQuery() + }) + commaSeparatedTableNames := strings.Join(tableNames, ", ") + query := fmt.Sprintf("TRUNCATE TABLE %s", commaSeparatedTableNames) + _, err := pg.Exec(query) + if err != nil { + return fmt.Errorf("truncate tables with query %q: %w", query, err) + } + return nil +} + func (pg *TargetPostgreSQL) ImportBatch(batch Batch, args *ImportBatchArgs, exportDir string, tableSchema map[string]map[string]string) (int64, error) { var rowsAffected int64 var err error @@ -630,7 +656,7 @@ func (pg *TargetPostgreSQL) setTargetSchema(conn *pgx.Conn) { setSchemaQuery := fmt.Sprintf("SET SEARCH_PATH TO %s", pg.tconf.Schema) _, err := conn.Exec(context.Background(), setSchemaQuery) if err != nil { - utils.ErrExit("run query %q on target %q: %s", setSchemaQuery, pg.tconf.Host, err) + utils.ErrExit("run query: %q on target %q: %s", setSchemaQuery, pg.tconf.Host, err) } } @@ -751,7 +777,7 @@ func (pg *TargetPostgreSQL) isTableExists(tableNameTup sqlname.NameTuple) bool { func (pg *TargetPostgreSQL) isQueryResultNonEmpty(query string) bool { rows, err := pg.Query(query) if err != nil { - utils.ErrExit("error checking if query %s is empty: %v", query, err) + utils.ErrExit("error checking if query is empty: %q: %v", query, err) } defer rows.Close() @@ -771,7 +797,7 @@ func (pg *TargetPostgreSQL) ClearMigrationState(migrationUUID uuid.UUID, exportD tables := []sqlname.NameTuple{} for _, tableName := range tableNames { parts := strings.Split(tableName, ".") - objName := sqlname.NewObjectName(sqlname.POSTGRESQL, "", parts[0], parts[1]) + objName := sqlname.NewObjectName(constants.POSTGRESQL, "", parts[0], parts[1]) nt := sqlname.NameTuple{ CurrentName: objName, SourceName: objName, diff --git a/yb-voyager/src/tgtdb/postgres_test.go b/yb-voyager/src/tgtdb/postgres_test.go new file mode 100644 index 0000000000..9f820ce973 --- /dev/null +++ b/yb-voyager/src/tgtdb/postgres_test.go @@ -0,0 +1,147 @@ +//go:build integration + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package tgtdb + +import ( + "database/sql" + "fmt" + "strings" + "testing" + + _ "github.com/jackc/pgx/v5/stdlib" + "github.com/stretchr/testify/assert" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" +) + +func TestCreateVoyagerSchemaPG(t *testing.T) { + db, err := sql.Open("pgx", testPostgresTarget.GetConnectionString()) + assert.NoError(t, err) + defer db.Close() + + // Wait for the database to be ready + err = testutils.WaitForDBToBeReady(db) + assert.NoError(t, err) + + // Initialize the TargetYugabyteDB instance + pg := &TargetPostgreSQL{ + db: db, + } + + // Call CreateVoyagerSchema + err = pg.CreateVoyagerSchema() + assert.NoError(t, err, "CreateVoyagerSchema failed") + + expectedTables := map[string]map[string]testutils.ColumnPropertiesPG{ + BATCH_METADATA_TABLE_NAME: { + "migration_uuid": {Type: "uuid", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "data_file_name": {Type: "text", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "batch_number": {Type: "integer", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "schema_name": {Type: "text", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "table_name": {Type: "text", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "rows_imported": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + }, + EVENT_CHANNELS_METADATA_TABLE_NAME: { + "migration_uuid": {Type: "uuid", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "channel_no": {Type: "integer", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "last_applied_vsn": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "num_inserts": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "num_deletes": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "num_updates": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + }, + EVENTS_PER_TABLE_METADATA_TABLE_NAME: { + "migration_uuid": {Type: "uuid", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "table_name": {Type: "text", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "channel_no": {Type: "integer", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "total_events": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "num_inserts": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "num_deletes": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "num_updates": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + }, + } + + // Validate the schema and tables + t.Run("Check all the expected tables and no extra tables", func(t *testing.T) { + testutils.CheckTableExistencePG(t, db, BATCH_METADATA_TABLE_SCHEMA, expectedTables) + }) + + // Validate columns for each table + for tableName, expectedColumns := range expectedTables { + t.Run(fmt.Sprintf("Check columns for %s table", tableName), func(t *testing.T) { + table := strings.Split(tableName, ".")[1] + testutils.CheckTableStructurePG(t, db, BATCH_METADATA_TABLE_SCHEMA, table, expectedColumns) + }) + } +} + +func TestPostgresGetNonEmptyTables(t *testing.T) { + testPostgresTarget.ExecuteSqls( + `CREATE SCHEMA test_schema`, + `CREATE TABLE test_schema.foo ( + id INT PRIMARY KEY, + name VARCHAR + );`, + `INSERT into test_schema.foo values (1, 'abc'), (2, 'xyz');`, + `CREATE TABLE test_schema.bar ( + id INT PRIMARY KEY, + name VARCHAR + );`, + `INSERT into test_schema.bar values (1, 'abc'), (2, 'xyz');`, + `CREATE TABLE test_schema.unique_table ( + id SERIAL PRIMARY KEY, + email VARCHAR(100), + phone VARCHAR(100), + address VARCHAR(255), + UNIQUE (email, phone) -- Unique constraint on combination of columns + );`, + `CREATE TABLE test_schema.table1 ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) + );`, + `CREATE TABLE test_schema.table2 ( + id SERIAL PRIMARY KEY, + email VARCHAR(100) + );`, + `CREATE TABLE test_schema.non_pk1( + id INT, + name VARCHAR(255) + );`, + `CREATE TABLE test_schema.non_pk2( + id INT, + name VARCHAR(255) + );`) + defer testPostgresTarget.ExecuteSqls(`DROP SCHEMA test_schema CASCADE;`) + + tables := []sqlname.NameTuple{ + {CurrentName: sqlname.NewObjectName(POSTGRESQL, "test_schema", "test_schema", "foo")}, + {CurrentName: sqlname.NewObjectName(POSTGRESQL, "test_schema", "test_schema", "bar")}, + {CurrentName: sqlname.NewObjectName(POSTGRESQL, "test_schema", "test_schema", "unique_table")}, + {CurrentName: sqlname.NewObjectName(POSTGRESQL, "test_schema", "test_schema", "table1")}, + {CurrentName: sqlname.NewObjectName(POSTGRESQL, "test_schema", "test_schema", "table2")}, + {CurrentName: sqlname.NewObjectName(POSTGRESQL, "test_schema", "test_schema", "non_pk1")}, + {CurrentName: sqlname.NewObjectName(POSTGRESQL, "test_schema", "test_schema", "non_pk2")}, + } + + expectedTables := []sqlname.NameTuple{ + {CurrentName: sqlname.NewObjectName(POSTGRESQL, "test_schema", "test_schema", "foo")}, + {CurrentName: sqlname.NewObjectName(POSTGRESQL, "test_schema", "test_schema", "bar")}, + } + + actualTables := testPostgresTarget.GetNonEmptyTables(tables) + testutils.AssertEqualNameTuplesSlice(t, expectedTables, actualTables) +} diff --git a/yb-voyager/src/tgtdb/suites/yugabytedbSuite_test.go b/yb-voyager/src/tgtdb/suites/yugabytedbSuite_test.go index 834263a61e..8f51d621b2 100644 --- a/yb-voyager/src/tgtdb/suites/yugabytedbSuite_test.go +++ b/yb-voyager/src/tgtdb/suites/yugabytedbSuite_test.go @@ -1,3 +1,5 @@ +//go:build unit + /* Copyright (c) YugabyteDB, Inc. diff --git a/yb-voyager/src/tgtdb/target_db_interface.go b/yb-voyager/src/tgtdb/target_db_interface.go index 59e58404ed..faa3f75b8c 100644 --- a/yb-voyager/src/tgtdb/target_db_interface.go +++ b/yb-voyager/src/tgtdb/target_db_interface.go @@ -36,6 +36,7 @@ type TargetDB interface { GetVersion() string CreateVoyagerSchema() error GetNonEmptyTables(tableNames []sqlname.NameTuple) []sqlname.NameTuple + TruncateTables(tableNames []sqlname.NameTuple) error IsNonRetryableCopyError(err error) bool ImportBatch(batch Batch, args *ImportBatchArgs, exportDir string, tableSchema map[string]map[string]string) (int64, error) QuoteAttributeNames(tableNameTup sqlname.NameTuple, columns []string) ([]string, error) diff --git a/yb-voyager/src/tgtdb/yugabytedb.go b/yb-voyager/src/tgtdb/yugabytedb.go index 7683355afe..fc3a392ae9 100644 --- a/yb-voyager/src/tgtdb/yugabytedb.go +++ b/yb-voyager/src/tgtdb/yugabytedb.go @@ -33,11 +33,14 @@ import ( "github.com/google/uuid" "github.com/jackc/pgconn" "github.com/jackc/pgx/v4" + pgconn5 "github.com/jackc/pgx/v5/pgconn" _ "github.com/jackc/pgx/v5/stdlib" + "github.com/samber/lo" log "github.com/sirupsen/logrus" "golang.org/x/exp/slices" "github.com/yugabyte/yb-voyager/yb-voyager/src/callhome" + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" "github.com/yugabyte/yb-voyager/yb-voyager/src/namereg" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" @@ -77,6 +80,12 @@ func (yb *TargetYugabyteDB) Exec(query string) (int64, error) { res, err := yb.db.Exec(query) if err != nil { + var pgErr *pgconn5.PgError + if errors.As(err, &pgErr) { + if pgErr.Hint != "" || pgErr.Detail != "" { + return rowsAffected, fmt.Errorf("run query %q on target %q: %w \nHINT: %s\nDETAIL: %s", query, yb.tconf.Host, err, pgErr.Hint, pgErr.Detail) + } + } return rowsAffected, fmt.Errorf("run query %q on target %q: %w", query, yb.tconf.Host, err) } rowsAffected, err = res.RowsAffected() @@ -234,7 +243,9 @@ func (yb *TargetYugabyteDB) InitConnPool() error { SessionInitScript: getYBSessionInitScript(yb.tconf), } yb.connPool = NewConnectionPool(params) - log.Info("Initialized connection pool with settings: ", spew.Sdump(params)) + redactedParams := params + redactedParams.ConnUriList = utils.GetRedactedURLs(redactedParams.ConnUriList) + log.Info("Initialized connection pool with settings: ", spew.Sdump(redactedParams)) return nil } @@ -332,6 +343,9 @@ func (yb *TargetYugabyteDB) CreateVoyagerSchema() error { rows_imported BIGINT, PRIMARY KEY (migration_uuid, data_file_name, batch_number, schema_name, table_name) );`, BATCH_METADATA_TABLE_NAME), + fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN data_file_name TYPE TEXT;`, BATCH_METADATA_TABLE_NAME), + fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN schema_name TYPE TEXT;`, BATCH_METADATA_TABLE_NAME), + fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN table_name TYPE TEXT;`, BATCH_METADATA_TABLE_NAME), fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %s ( migration_uuid uuid, channel_no INT, @@ -349,6 +363,7 @@ func (yb *TargetYugabyteDB) CreateVoyagerSchema() error { num_deletes BIGINT, num_updates BIGINT, PRIMARY KEY (migration_uuid, table_name, channel_no));`, EVENTS_PER_TABLE_METADATA_TABLE_NAME), + fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN table_name TYPE TEXT;`, EVENTS_PER_TABLE_METADATA_TABLE_NAME), } maxAttempts := 12 @@ -389,7 +404,7 @@ func (yb *TargetYugabyteDB) GetNonEmptyTables(tables []sqlname.NameTuple) []sqln continue } if err != nil { - utils.ErrExit("failed to check whether table %q empty: %s", table, err) + utils.ErrExit("failed to check whether table is empty: %q: %s", table, err) } result = append(result, table) } @@ -397,6 +412,19 @@ func (yb *TargetYugabyteDB) GetNonEmptyTables(tables []sqlname.NameTuple) []sqln return result } +func (yb *TargetYugabyteDB) TruncateTables(tables []sqlname.NameTuple) error { + tableNames := lo.Map(tables, func(nt sqlname.NameTuple, _ int) string { + return nt.ForUserQuery() + }) + commaSeparatedTableNames := strings.Join(tableNames, ", ") + query := fmt.Sprintf("TRUNCATE TABLE %s", commaSeparatedTableNames) + _, err := yb.Exec(query) + if err != nil { + return err + } + return nil +} + func (yb *TargetYugabyteDB) ImportBatch(batch Batch, args *ImportBatchArgs, exportDir string, tableSchema map[string]map[string]string) (int64, error) { var rowsAffected int64 var err error @@ -1020,7 +1048,7 @@ func getYBSessionInitScript(tconf *TargetConf) []string { func checkSessionVariableSupport(tconf *TargetConf, sqlStmt string) bool { conn, err := pgx.Connect(context.Background(), tconf.GetConnectionUri()) if err != nil { - utils.ErrExit("error while creating connection for checking session parameter(%q) support: %v", sqlStmt, err) + utils.ErrExit("error while creating connection for checking session parameter support: %q: %v", sqlStmt, err) } defer conn.Close(context.Background()) @@ -1034,7 +1062,7 @@ func checkSessionVariableSupport(tconf *TargetConf, sqlStmt string) bool { } return true } - utils.ErrExit("error while executing sqlStatement=%q: %v", sqlStmt, err) + utils.ErrExit("error while executing sqlStatement: %q: %v", sqlStmt, err) } else { log.Warnf("Warning: %q is not supported: %v", sqlStmt, err) } @@ -1047,7 +1075,7 @@ func (yb *TargetYugabyteDB) setTargetSchema(conn *pgx.Conn) { setSchemaQuery := fmt.Sprintf("SET SCHEMA '%s'", yb.tconf.Schema) _, err := conn.Exec(context.Background(), setSchemaQuery) if err != nil { - utils.ErrExit("run query %q on target %q: %s", setSchemaQuery, yb.tconf.Host, err) + utils.ErrExit("run query: %q on target %q: %s", setSchemaQuery, yb.tconf.Host, err) } // append oracle schema in the search_path for orafce @@ -1186,7 +1214,7 @@ func (yb *TargetYugabyteDB) isTableExists(tableNameTup sqlname.NameTuple) bool { func (yb *TargetYugabyteDB) isQueryResultNonEmpty(query string) bool { rows, err := yb.Query(query) if err != nil { - utils.ErrExit("error checking if query %s is empty: %v", query, err) + utils.ErrExit("error checking if query is empty: [%s]: %v", query, err) } defer rows.Close() @@ -1299,7 +1327,7 @@ func (yb *TargetYugabyteDB) ClearMigrationState(migrationUUID uuid.UUID, exportD tables := []sqlname.NameTuple{} for _, tableName := range tableNames { parts := strings.Split(tableName, ".") - objName := sqlname.NewObjectName(sqlname.YUGABYTEDB, "", parts[0], parts[1]) + objName := sqlname.NewObjectName(constants.YUGABYTEDB, "", parts[0], parts[1]) nt := sqlname.NameTuple{ CurrentName: objName, SourceName: objName, @@ -1369,24 +1397,55 @@ func IsCurrentUserSuperUser(tconf *TargetConf) (bool, error) { } defer conn.Close(context.Background()) - query := "SELECT rolsuper FROM pg_roles WHERE rolname=current_user" - rows, err := conn.Query(context.Background(), query) - if err != nil { - return false, fmt.Errorf("querying if user is superuser: %w", err) - } - defer rows.Close() - - var isSuperUser bool - if rows.Next() { - err = rows.Scan(&isSuperUser) + runQueryAndCheckPrivilege := func(query string) (bool, error) { + rows, err := conn.Query(context.Background(), query) if err != nil { - return false, fmt.Errorf("scanning row for superuser: %w", err) + return false, fmt.Errorf("querying if user is superuser: %w", err) } - } else { - return false, fmt.Errorf("no current user found in pg_roles") + defer rows.Close() + + var isProperUser bool + if rows.Next() { + err = rows.Scan(&isProperUser) + if err != nil { + return false, fmt.Errorf("scanning row for query: %w", err) + } + } else { + return false, fmt.Errorf("no current user found in pg_roles") + } + return isProperUser, nil + } + + //This rolsuper is set to true in the pg_roles if a user is super user + isSuperUserquery := "SELECT rolsuper FROM pg_roles WHERE rolname=current_user" + + isSuperUser, err := runQueryAndCheckPrivilege(isSuperUserquery) + if err != nil { + return false, fmt.Errorf("error checking super user privilege: %w", err) + } + if isSuperUser { + return true, nil + } + //In case of YugabyteDB Aeon deployment of target database we need to verify if yb_superuser is granted or not + isYbSuperUserQuery := `SELECT + CASE + WHEN EXISTS ( + SELECT 1 + FROM pg_auth_members m + JOIN pg_roles grantee ON m.member = grantee.oid + JOIN pg_roles granted ON m.roleid = granted.oid + WHERE grantee.rolname = CURRENT_USER AND granted.rolname = 'yb_superuser' + ) + THEN TRUE + ELSE FALSE + END AS is_yb_superuser;` + + isYBSuperUser, err := runQueryAndCheckPrivilege(isYbSuperUserQuery) + if err != nil { + return false, fmt.Errorf("error checking yb_superuser privilege: %w", err) } - return isSuperUser, nil + return isYBSuperUser, nil } func (yb *TargetYugabyteDB) GetEnabledTriggersAndFks() (enabledTriggers []string, enabledFks []string, err error) { diff --git a/yb-voyager/src/tgtdb/yugabytedb_test.go b/yb-voyager/src/tgtdb/yugabytedb_test.go new file mode 100644 index 0000000000..33ed248297 --- /dev/null +++ b/yb-voyager/src/tgtdb/yugabytedb_test.go @@ -0,0 +1,149 @@ +//go:build integration + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package tgtdb + +import ( + "database/sql" + "fmt" + "strings" + "testing" + + _ "github.com/jackc/pgx/v5/stdlib" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" + testutils "github.com/yugabyte/yb-voyager/yb-voyager/test/utils" +) + +func TestCreateVoyagerSchemaYB(t *testing.T) { + db, err := sql.Open("pgx", testYugabyteDBTarget.GetConnectionString()) + assert.NoError(t, err) + defer db.Close() + + // Wait for the database to be ready + err = testutils.WaitForDBToBeReady(db) + assert.NoError(t, err) + + // Initialize the TargetYugabyteDB instance + yb := &TargetYugabyteDB{ + db: db, + } + + // Call CreateVoyagerSchema + err = yb.CreateVoyagerSchema() + assert.NoError(t, err, "CreateVoyagerSchema failed") + + expectedTables := map[string]map[string]testutils.ColumnPropertiesPG{ + BATCH_METADATA_TABLE_NAME: { + "migration_uuid": {Type: "uuid", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "data_file_name": {Type: "text", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "batch_number": {Type: "integer", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "schema_name": {Type: "text", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "table_name": {Type: "text", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "rows_imported": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + }, + EVENT_CHANNELS_METADATA_TABLE_NAME: { + "migration_uuid": {Type: "uuid", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "channel_no": {Type: "integer", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "last_applied_vsn": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "num_inserts": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "num_deletes": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "num_updates": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + }, + EVENTS_PER_TABLE_METADATA_TABLE_NAME: { + "migration_uuid": {Type: "uuid", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "table_name": {Type: "text", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "channel_no": {Type: "integer", IsNullable: "NO", Default: sql.NullString{Valid: false}, IsPrimary: true}, + "total_events": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "num_inserts": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "num_deletes": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + "num_updates": {Type: "bigint", IsNullable: "YES", Default: sql.NullString{Valid: false}, IsPrimary: false}, + }, + } + + // Validate the schema and tables + t.Run("Check all the expected tables and no extra tables", func(t *testing.T) { + testutils.CheckTableExistencePG(t, db, BATCH_METADATA_TABLE_SCHEMA, expectedTables) + }) + + // Validate columns for each table + for tableName, expectedColumns := range expectedTables { + t.Run(fmt.Sprintf("Check columns for %s table", tableName), func(t *testing.T) { + table := strings.Split(tableName, ".")[1] + testutils.CheckTableStructurePG(t, db, BATCH_METADATA_TABLE_SCHEMA, table, expectedColumns) + }) + } +} + +func TestYugabyteGetNonEmptyTables(t *testing.T) { + testYugabyteDBTarget.ExecuteSqls( + `CREATE SCHEMA test_schema`, + `CREATE TABLE test_schema.foo ( + id INT PRIMARY KEY, + name VARCHAR + );`, + `INSERT into test_schema.foo values (1, 'abc'), (2, 'xyz');`, + `CREATE TABLE test_schema.bar ( + id INT PRIMARY KEY, + name VARCHAR + );`, + `INSERT into test_schema.bar values (1, 'abc'), (2, 'xyz');`, + `CREATE TABLE test_schema.unique_table ( + id SERIAL PRIMARY KEY, + email VARCHAR(100), + phone VARCHAR(100), + address VARCHAR(255), + UNIQUE (email, phone) + );`, + `CREATE TABLE test_schema.table1 ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) + );`, + `CREATE TABLE test_schema.table2 ( + id SERIAL PRIMARY KEY, + email VARCHAR(100) + );`, + `CREATE TABLE test_schema.non_pk1( + id INT, + name VARCHAR(255) + );`, + `CREATE TABLE test_schema.non_pk2( + id INT, + name VARCHAR(255) + );`) + defer testYugabyteDBTarget.ExecuteSqls(`DROP SCHEMA test_schema CASCADE;`) + + tables := []sqlname.NameTuple{ + {CurrentName: sqlname.NewObjectName(YUGABYTEDB, "test_schema", "test_schema", "foo")}, + {CurrentName: sqlname.NewObjectName(YUGABYTEDB, "test_schema", "test_schema", "bar")}, + {CurrentName: sqlname.NewObjectName(YUGABYTEDB, "test_schema", "test_schema", "unique_table")}, + {CurrentName: sqlname.NewObjectName(YUGABYTEDB, "test_schema", "test_schema", "table1")}, + {CurrentName: sqlname.NewObjectName(YUGABYTEDB, "test_schema", "test_schema", "table2")}, + {CurrentName: sqlname.NewObjectName(YUGABYTEDB, "test_schema", "test_schema", "non_pk1")}, + {CurrentName: sqlname.NewObjectName(YUGABYTEDB, "test_schema", "test_schema", "non_pk2")}, + } + + expectedTables := []sqlname.NameTuple{ + {CurrentName: sqlname.NewObjectName(YUGABYTEDB, "test_schema", "test_schema", "foo")}, + {CurrentName: sqlname.NewObjectName(YUGABYTEDB, "test_schema", "test_schema", "bar")}, + } + + actualTables := testYugabyteDBTarget.GetNonEmptyTables(tables) + log.Infof("non empty tables: %+v\n", actualTables) + testutils.AssertEqualNameTuplesSlice(t, expectedTables, actualTables) +} diff --git a/yb-voyager/src/utils/az/azutils.go b/yb-voyager/src/utils/az/azutils.go index b95576c709..dcbae81c55 100644 --- a/yb-voyager/src/utils/az/azutils.go +++ b/yb-voyager/src/utils/az/azutils.go @@ -38,7 +38,7 @@ func createClientIfNotExists(dataDir string) { var err error url, err := url.Parse(dataDir) if err != nil { - utils.ErrExit("parse azure blob url for dataDir %s: %w", dataDir, err) + utils.ErrExit("parse azure blob url: for dataDir %s: %w", dataDir, err) } serviceUrl := "https://" + url.Host // cred represents the default Oauth token used to authenticate the account in the url. diff --git a/yb-voyager/src/utils/commonVariables.go b/yb-voyager/src/utils/commonVariables.go index 4cb1e60474..38a924486e 100644 --- a/yb-voyager/src/utils/commonVariables.go +++ b/yb-voyager/src/utils/commonVariables.go @@ -19,6 +19,7 @@ import ( "sync" "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" + "github.com/yugabyte/yb-voyager/yb-voyager/src/ybversion" ) const ( @@ -73,10 +74,10 @@ var WaitChannel = make(chan int) // ================== Schema Report ============================== type SchemaReport struct { - VoyagerVersion string `json:"VoyagerVersion"` - MigrationComplexity string `json:"MigrationComplexity"` - SchemaSummary SchemaSummary `json:"Summary"` - Issues []Issue `json:"Issues"` + VoyagerVersion string `json:"VoyagerVersion"` + TargetDBVersion *ybversion.YBVersion `json:"TargetDBVersion"` + SchemaSummary SchemaSummary `json:"Summary"` + Issues []AnalyzeSchemaIssue `json:"Issues"` } type SchemaSummary struct { @@ -88,6 +89,7 @@ type SchemaSummary struct { DBObjects []DBObject `json:"DatabaseObjects"` } +// TODO: Rename the variables of TotalCount and InvalidCount -> TotalObjects and ObjectsWithIssues type DBObject struct { ObjectType string `json:"ObjectType"` TotalCount int `json:"TotalCount"` @@ -96,16 +98,32 @@ type DBObject struct { Details string `json:"Details,omitempty"` } -type Issue struct { - IssueType string `json:"IssueType"` - ObjectType string `json:"ObjectType"` - ObjectName string `json:"ObjectName"` - Reason string `json:"Reason"` - SqlStatement string `json:"SqlStatement,omitempty"` - FilePath string `json:"FilePath"` - Suggestion string `json:"Suggestion"` - GH string `json:"GH"` - DocsLink string `json:"DocsLink,omitempty"` +// TODO: support MinimumVersionsFixedIn in xml +type AnalyzeSchemaIssue struct { + // TODO: rename IssueType to Category + IssueType string `json:"IssueType"` //category: unsupported_features, unsupported_plpgsql_objects, etc + ObjectType string `json:"ObjectType"` + ObjectName string `json:"ObjectName"` + Reason string `json:"Reason"` + Type string `json:"-" xml:"-"` // identifier for issue type ADVISORY_LOCKS, SYSTEM_COLUMNS, etc + Impact string `json:"-" xml:"-"` // temporary field; since currently we generate assessment issue from analyze issue + SqlStatement string `json:"SqlStatement,omitempty"` + FilePath string `json:"FilePath"` + Suggestion string `json:"Suggestion"` + GH string `json:"GH"` + DocsLink string `json:"DocsLink,omitempty"` + MinimumVersionsFixedIn map[string]*ybversion.YBVersion `json:"MinimumVersionsFixedIn" xml:"-"` // key: series (2024.1, 2.21, etc) +} + +func (i AnalyzeSchemaIssue) IsFixedIn(v *ybversion.YBVersion) (bool, error) { + if i.MinimumVersionsFixedIn == nil { + return false, nil + } + minVersionFixedInSeries, ok := i.MinimumVersionsFixedIn[v.Series()] + if !ok { + return false, nil + } + return v.GreaterThanOrEqual(minVersionFixedInSeries), nil } type IndexInfo struct { @@ -124,9 +142,10 @@ type TableColumnsDataTypes struct { } type UnsupportedQueryConstruct struct { - ConstructTypeName string - Query string - DocsLink string + ConstructTypeName string + Query string + DocsLink string + MinimumVersionsFixedIn map[string]*ybversion.YBVersion // key: series (2024.1, 2.21, etc) } // ================== Segment ============================== diff --git a/yb-voyager/src/utils/jsonfile/jsonfile_test.go b/yb-voyager/src/utils/jsonfile/jsonfile_test.go index 8818734334..a0f20b57e0 100644 --- a/yb-voyager/src/utils/jsonfile/jsonfile_test.go +++ b/yb-voyager/src/utils/jsonfile/jsonfile_test.go @@ -1,3 +1,20 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ package jsonfile import ( diff --git a/yb-voyager/src/utils/logging.go b/yb-voyager/src/utils/logging.go index 74e2562bd4..c4dd17b0b0 100644 --- a/yb-voyager/src/utils/logging.go +++ b/yb-voyager/src/utils/logging.go @@ -24,7 +24,10 @@ import ( "github.com/tebeka/atexit" ) +var ErrExitErr error + func ErrExit(formatString string, args ...interface{}) { + ErrExitErr = fmt.Errorf(formatString, args...) formatString = strings.Replace(formatString, "%w", "%s", -1) fmt.Fprintf(os.Stderr, formatString+"\n", args...) log.Errorf(formatString+"\n", args...) diff --git a/yb-voyager/src/utils/s3/s3utils.go b/yb-voyager/src/utils/s3/s3utils.go index 5c335566b1..36cd6c7be6 100644 --- a/yb-voyager/src/utils/s3/s3utils.go +++ b/yb-voyager/src/utils/s3/s3utils.go @@ -95,7 +95,7 @@ func ListAllObjects(dataDir string) ([]string, error) { i++ page, err := p.NextPage(context.TODO()) if err != nil { - utils.ErrExit("failed to get page %v, %w", i, err) + utils.ErrExit("failed to get page %v: %w", i, err) } // Log the objects found for _, obj := range page.Contents { diff --git a/yb-voyager/src/utils/sqlname/nametuple.go b/yb-voyager/src/utils/sqlname/nametuple.go index cdfdbc80cd..3a9a3fa1b2 100644 --- a/yb-voyager/src/utils/sqlname/nametuple.go +++ b/yb-voyager/src/utils/sqlname/nametuple.go @@ -22,6 +22,7 @@ import ( "strings" "github.com/samber/lo" + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" ) //================================================ @@ -185,7 +186,8 @@ func (t NameTuple) Key() string { // ================================================ func quote2(dbType, name string) string { switch dbType { - case POSTGRESQL, YUGABYTEDB, ORACLE, MYSQL: + case constants.POSTGRESQL, constants.YUGABYTEDB, + constants.ORACLE, constants.MYSQL: return `"` + name + `"` default: panic("unknown source db type " + dbType) @@ -194,15 +196,15 @@ func quote2(dbType, name string) string { func minQuote2(objectName, sourceDBType string) string { switch sourceDBType { - case YUGABYTEDB, POSTGRESQL: + case constants.YUGABYTEDB, constants.POSTGRESQL: if IsAllLowercase(objectName) && !IsReservedKeywordPG(objectName) { return objectName } else { return `"` + objectName + `"` } - case MYSQL: + case constants.MYSQL: return `"` + objectName + `"` - case ORACLE: + case constants.ORACLE: if IsAllUppercase(objectName) && !IsReservedKeywordOracle(objectName) { return objectName } else { diff --git a/yb-voyager/src/utils/sqlname/nametuple_test.go b/yb-voyager/src/utils/sqlname/nametuple_test.go index d6c207c496..b89ade4c8d 100644 --- a/yb-voyager/src/utils/sqlname/nametuple_test.go +++ b/yb-voyager/src/utils/sqlname/nametuple_test.go @@ -1,3 +1,5 @@ +//go:build unit + /* Copyright (c) YugabyteDB, Inc. @@ -19,17 +21,18 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" ) func TestNameTupleEquals(t *testing.T) { assert := assert.New(t) - o1 := NewObjectName(POSTGRESQL, "public", "public", "table1") - o2 := NewObjectName(POSTGRESQL, "public", "public", "table1") + o1 := NewObjectName(constants.POSTGRESQL, "public", "public", "table1") + o2 := NewObjectName(constants.POSTGRESQL, "public", "public", "table1") nameTuple1 := NameTuple{CurrentName: o1, SourceName: o1, TargetName: o1} nameTuple2 := NameTuple{CurrentName: o2, SourceName: o2, TargetName: o2} assert.True(nameTuple1.Equals(nameTuple2)) - o3 := NewObjectName(POSTGRESQL, "public", "public", "table2") + o3 := NewObjectName(constants.POSTGRESQL, "public", "public", "table2") nameTuple3 := NameTuple{CurrentName: o3, SourceName: o3, TargetName: o3} assert.False(nameTuple1.Equals(nameTuple3)) } @@ -39,7 +42,7 @@ func TestPGDefaultSchemaCaseInsensitiveTableName(t *testing.T) { // Test NewTableName() with PostgreSQL and default schema "public" and // a table name belonging to default schema. - tableName := NewObjectName(POSTGRESQL, "public", "public", "table1") + tableName := NewObjectName(constants.POSTGRESQL, "public", "public", "table1") assert.NotNil(tableName) expectedTableName := &ObjectName{ SchemaName: "public", @@ -68,7 +71,7 @@ func TestPGNonDefaultSchemaCaseInsensitiveTableName(t *testing.T) { assert := assert.New(t) // Test NewTableName() with PostgreSQL and default schema "public" and // a table name belonging to a non-default schema. - tableName := NewObjectName(POSTGRESQL, "public", "schema1", "table1") + tableName := NewObjectName(constants.POSTGRESQL, "public", "schema1", "table1") assert.NotNil(tableName) expectedTableName := &ObjectName{ SchemaName: "schema1", @@ -97,7 +100,7 @@ func TestPGDefaultSchemaCaseSensitiveTableName(t *testing.T) { assert := assert.New(t) // Test NewTableName() with PostgreSQL and default schema "public" and // a case-sensitive name with mixed cases. - tableName := NewObjectName(POSTGRESQL, "public", "public", "Table1") + tableName := NewObjectName(constants.POSTGRESQL, "public", "public", "Table1") assert.NotNil(tableName) expectedTableName := &ObjectName{ SchemaName: "public", @@ -126,7 +129,7 @@ func TestPGNonDefaultSchemaCaseSensitiveTableName(t *testing.T) { assert := assert.New(t) // Test NewTableName() with PostgreSQL and default schema "public" and // a case-sensitive name with mixed cases. - tableName := NewObjectName(POSTGRESQL, "public", "schema1", "Table1") + tableName := NewObjectName(constants.POSTGRESQL, "public", "schema1", "Table1") assert.NotNil(tableName) expectedTableName := &ObjectName{ SchemaName: "schema1", @@ -155,7 +158,7 @@ func TestPGNonDefaultSchemaTableNameWithSpecialChars(t *testing.T) { assert := assert.New(t) // Test NewTableName() with PostgreSQL and default schema "public" and // a case-sensitive name with mixed cases. - tableName := NewObjectName(POSTGRESQL, "public", "schema1", "table$1") + tableName := NewObjectName(constants.POSTGRESQL, "public", "schema1", "table$1") assert.NotNil(tableName) expectedTableName := &ObjectName{ SchemaName: "schema1", @@ -186,7 +189,7 @@ func TestOracleDefaultSchemaCaseInsensitiveTableName(t *testing.T) { assert := assert.New(t) // Test NewTableName() with Oracle and default schema "SAKILA" and // a table name belonging to default schema. - tableName := NewObjectName(ORACLE, "SAKILA", "SAKILA", "TABLE1") + tableName := NewObjectName(constants.ORACLE, "SAKILA", "SAKILA", "TABLE1") assert.NotNil(tableName) expectedTableName := &ObjectName{ SchemaName: "SAKILA", @@ -215,7 +218,7 @@ func TestOracleNonDefaultSchemaCaseInsensitiveTableName(t *testing.T) { assert := assert.New(t) // Test NewTableName() with Oracle and default schema "SAKILA" and // a table name belonging to a non-default schema. - tableName := NewObjectName(ORACLE, "SAKILA", "SCHEMA1", "TABLE1") + tableName := NewObjectName(constants.ORACLE, "SAKILA", "SCHEMA1", "TABLE1") assert.NotNil(tableName) expectedTableName := &ObjectName{ SchemaName: "SCHEMA1", @@ -244,7 +247,7 @@ func TestOracleDefaultSchemaCaseSensitiveTableName(t *testing.T) { assert := assert.New(t) // Test NewTableName() with Oracle and default schema "SAKILA" and // a case-sensitive name with mixed cases. - tableName := NewObjectName(ORACLE, "SAKILA", "SAKILA", "Table1") + tableName := NewObjectName(constants.ORACLE, "SAKILA", "SAKILA", "Table1") assert.NotNil(tableName) expectedTableName := &ObjectName{ SchemaName: "SAKILA", @@ -273,7 +276,7 @@ func TestOracleNonDefaultSchemaCaseSensitiveTableName(t *testing.T) { assert := assert.New(t) // Test NewTableName() with Oracle and default schema "SAKILA" and // a case-sensitive name with mixed cases. - tableName := NewObjectName(ORACLE, "SAKILA", "SCHEMA1", "Table1") + tableName := NewObjectName(constants.ORACLE, "SAKILA", "SCHEMA1", "Table1") assert.NotNil(tableName) expectedTableName := &ObjectName{ SchemaName: "SCHEMA1", @@ -304,7 +307,7 @@ func TestMySQLDefaultSchemaCaseSensitiveLowerCaseTableName(t *testing.T) { assert := assert.New(t) // Test NewTableName() with MySQL and default schema "sakila" and // a table name belonging to default schema. - tableName := NewObjectName(MYSQL, "sakila", "sakila", "table1") + tableName := NewObjectName(constants.MYSQL, "sakila", "sakila", "table1") assert.NotNil(tableName) expectedTableName := &ObjectName{ SchemaName: "sakila", @@ -333,7 +336,7 @@ func TestMySQLNonDefaultSchemaCaseSensitiveLowerCaseTableName(t *testing.T) { assert := assert.New(t) // Test NewTableName() with MySQL and default schema "sakila" and // a table name belonging to a non-default schema. - tableName := NewObjectName(MYSQL, "sakila", "schema1", "table1") + tableName := NewObjectName(constants.MYSQL, "sakila", "schema1", "table1") assert.NotNil(tableName) expectedTableName := &ObjectName{ SchemaName: "schema1", @@ -362,7 +365,7 @@ func TestMySQLDefaultSchemaCaseSensitiveMixedCaseTableName(t *testing.T) { assert := assert.New(t) // Test NewTableName() with MySQL and default schema "sakila" and // a case-sensitive name with mixed cases. - tableName := NewObjectName(MYSQL, "sakila", "sakila", "Table1") + tableName := NewObjectName(constants.MYSQL, "sakila", "sakila", "Table1") assert.NotNil(tableName) expectedTableName := &ObjectName{ SchemaName: "sakila", @@ -391,7 +394,7 @@ func TestMySQLNonDefaultSchemaCaseSensitiveUpperCaseTableName(t *testing.T) { assert := assert.New(t) // Test NewTableName() with MySQL and default schema "sakila" and // a case-sensitive name with all upper case letters. - tableName := NewObjectName(MYSQL, "sakila", "schema1", "TABLE1") + tableName := NewObjectName(constants.MYSQL, "sakila", "schema1", "TABLE1") assert.NotNil(tableName) expectedTableName := &ObjectName{ SchemaName: "schema1", diff --git a/yb-voyager/src/utils/sqlname/sqlname.go b/yb-voyager/src/utils/sqlname/sqlname.go index 95d8911f44..36bb938eda 100644 --- a/yb-voyager/src/utils/sqlname/sqlname.go +++ b/yb-voyager/src/utils/sqlname/sqlname.go @@ -20,16 +20,10 @@ import ( "strings" "unicode" + "github.com/yugabyte/yb-voyager/yb-voyager/src/constants" "golang.org/x/exp/slices" ) -const ( - YUGABYTEDB = "yugabytedb" - POSTGRESQL = "postgresql" - ORACLE = "oracle" - MYSQL = "mysql" -) - var ( SourceDBType string PreserveCase bool @@ -113,9 +107,9 @@ func NewTargetName(schemaName, objectName string) *TargetName { } return &TargetName{ ObjectName: Identifier{ - Quoted: quote(objectName, YUGABYTEDB), - Unquoted: unquote(objectName, YUGABYTEDB), - MinQuoted: minQuote(objectName, YUGABYTEDB), + Quoted: quote(objectName, constants.YUGABYTEDB), + Unquoted: unquote(objectName, constants.YUGABYTEDB), + MinQuoted: minQuote(objectName, constants.YUGABYTEDB), }, SchemaName: Identifier{ Quoted: `"` + schemaName + `"`, @@ -123,9 +117,9 @@ func NewTargetName(schemaName, objectName string) *TargetName { MinQuoted: schemaName, }, Qualified: Identifier{ - Quoted: schemaName + "." + quote(objectName, YUGABYTEDB), - Unquoted: schemaName + "." + unquote(objectName, YUGABYTEDB), - MinQuoted: schemaName + "." + minQuote(objectName, YUGABYTEDB), + Quoted: schemaName + "." + quote(objectName, constants.YUGABYTEDB), + Unquoted: schemaName + "." + unquote(objectName, constants.YUGABYTEDB), + MinQuoted: schemaName + "." + minQuote(objectName, constants.YUGABYTEDB), }, } } @@ -160,17 +154,17 @@ func IsQuoted(s string) bool { func quote(s string, dbType string) string { if IsQuoted(s) { - if s[0] == '`' && dbType == YUGABYTEDB { + if s[0] == '`' && dbType == constants.YUGABYTEDB { return `"` + unquote(s, dbType) + `"` // `Foo` -> "Foo" } return s } switch dbType { - case POSTGRESQL, YUGABYTEDB: + case constants.POSTGRESQL, constants.YUGABYTEDB: return `"` + strings.ToLower(s) + `"` - case MYSQL: + case constants.MYSQL: return s // TODO - learn the semantics of quoting in MySQL. - case ORACLE: + case constants.ORACLE: return `"` + strings.ToUpper(s) + `"` default: panic("unknown source db type " + dbType) @@ -182,11 +176,11 @@ func unquote(s string, dbType string) string { return s[1 : len(s)-1] } switch dbType { - case POSTGRESQL, YUGABYTEDB: + case constants.POSTGRESQL, constants.YUGABYTEDB: return strings.ToLower(s) - case MYSQL: + case constants.MYSQL: return s - case ORACLE: + case constants.ORACLE: return strings.ToUpper(s) default: panic("unknown source db type") @@ -210,15 +204,15 @@ func SetDifference(a, b []*SourceName) []*SourceName { func minQuote(objectName, sourceDBType string) string { objectName = unquote(objectName, sourceDBType) switch sourceDBType { - case YUGABYTEDB, POSTGRESQL: + case constants.YUGABYTEDB, constants.POSTGRESQL: if IsAllLowercase(objectName) && !IsReservedKeywordPG(objectName) { return objectName } else { return `"` + objectName + `"` } - case MYSQL: + case constants.MYSQL: return objectName - case ORACLE: + case constants.ORACLE: if IsAllUppercase(objectName) && !IsReservedKeywordOracle(objectName) { return objectName } else { @@ -246,7 +240,7 @@ func IsAllLowercase(s string) bool { if !(unicode.IsLetter(rune(c)) || unicode.IsDigit(rune(c))) { // check for special chars return false } - if unicode.IsUpper(rune(c)) { + if unicode.IsUpper(rune(c)) { return false } } @@ -255,11 +249,11 @@ func IsAllLowercase(s string) bool { func IsCaseSensitive(s string, sourceDbType string) bool { switch sourceDbType { - case ORACLE: + case constants.ORACLE: return !IsAllUppercase(s) - case POSTGRESQL: + case constants.POSTGRESQL: return !IsAllLowercase(s) - case MYSQL: + case constants.MYSQL: return false } panic("invalid source db type") diff --git a/yb-voyager/src/utils/struct_map_test.go b/yb-voyager/src/utils/struct_map_test.go index 788251706e..c41f90a7d7 100644 --- a/yb-voyager/src/utils/struct_map_test.go +++ b/yb-voyager/src/utils/struct_map_test.go @@ -1,3 +1,20 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ package utils import ( diff --git a/yb-voyager/src/utils/utils.go b/yb-voyager/src/utils/utils.go index 2bb1a9bd4c..c9967e455b 100644 --- a/yb-voyager/src/utils/utils.go +++ b/yb-voyager/src/utils/utils.go @@ -24,6 +24,7 @@ import ( "net" "net/url" "os" + "os/exec" "path/filepath" "regexp" "sort" @@ -32,10 +33,13 @@ import ( "syscall" "time" + "github.com/fatih/color" "github.com/google/uuid" "github.com/samber/lo" log "github.com/sirupsen/logrus" "golang.org/x/exp/slices" + "golang.org/x/text/cases" + "golang.org/x/text/language" ) var DoNotPrompt bool @@ -430,7 +434,8 @@ func GetRedactedURLs(urlList []string) []string { for _, u := range urlList { obj, err := url.Parse(u) if err != nil { - ErrExit("invalid URL: %q", u) + log.Error("error redacting connection url: invalid connection URL") + fmt.Printf("error redacting connection url: invalid connection URL: %v", u) } result = append(result, obj.Redacted()) } @@ -445,11 +450,15 @@ func GetSqlStmtToPrint(stmt string) string { } } -func PrintSqlStmtIfDDL(stmt string, fileName string) { +func PrintSqlStmtIfDDL(stmt string, fileName string, noticeMsg string) { setOrSelectStmt := strings.HasPrefix(strings.ToUpper(stmt), "SET ") || strings.HasPrefix(strings.ToUpper(stmt), "SELECT ") if !setOrSelectStmt { fmt.Printf("%s: %s\n", fileName, GetSqlStmtToPrint(stmt)) + if noticeMsg != "" { + fmt.Printf(color.YellowString("%s\n", noticeMsg)) + log.Infof("notice for %q: %s", GetSqlStmtToPrint(stmt), noticeMsg) + } } } @@ -684,3 +693,67 @@ func ChangeFileExtension(filePath string, newExt string) string { return filePath + newExt } + +// Port 0 generally returns port number in range 30xxx - 60xxx but it also depends on OS and network configuration +func GetFreePort() (int, error) { + // Listen on port 0, which tells the OS to assign an available port + listener, err := net.Listen("tcp", ":0") + if err != nil { + return 0, fmt.Errorf("failed to listen on a port: %v", err) + } + defer listener.Close() + + // Retrieve the assigned port + addr := listener.Addr().(*net.TCPAddr) + return addr.Port, nil +} + +func GetFinalReleaseVersionFromRCVersion(msrVoyagerFinalVersion string) (string, error) { + // RC version will be like 0rc1.1.8.6 + // We need to extract 1.8.6 from it + // Compring with this 1.8.6 should be enough to check if the version is compatible with the current version + // Split the string at "rc" to isolate the part after it + parts := strings.Split(msrVoyagerFinalVersion, "rc") + if len(parts) > 1 { + // Further split the remaining part by '.' and remove the first segment + versionParts := strings.Split(parts[1], ".") + if len(versionParts) > 1 { + msrVoyagerFinalVersion = strings.Join(versionParts[1:], ".") // Join the parts after the first one + } else { + return "", fmt.Errorf("unexpected version format %q", msrVoyagerFinalVersion) + } + } else { + return "", fmt.Errorf("unexpected version format %q", msrVoyagerFinalVersion) + } + return msrVoyagerFinalVersion, nil +} + +// Return list of missing tools from the provided list of tools +func CheckTools(tools ...string) []string { + var missingTools []string + for _, tool := range tools { + execPath, err := exec.LookPath(tool) + if err != nil { + missingTools = append(missingTools, tool) + } else { + log.Infof("Found %s at %s", tool, execPath) + } + } + + return missingTools +} + +func BuildObjectName(schemaName, objName string) string { + return lo.Ternary(schemaName != "", schemaName+"."+objName, objName) +} + +// SnakeCaseToTitleCase converts a snake_case string to a title case string with spaces. +func SnakeCaseToTitleCase(snake string) string { + words := strings.Split(snake, "_") + c := cases.Title(language.English) + for i, word := range words { + words[i] = c.String(word) + } + + return strings.Join(words, " ") +} diff --git a/yb-voyager/src/utils/version.go b/yb-voyager/src/utils/version.go index 365e5d715f..d751133c1d 100644 --- a/yb-voyager/src/utils/version.go +++ b/yb-voyager/src/utils/version.go @@ -19,6 +19,9 @@ const ( // This constant must be updated on every release. YB_VOYAGER_VERSION = "main" + // This constant must be updated after every breaking change. + PREVIOUS_BREAKING_CHANGE_VERSION = "1.8.5" + // @Refer: https://icinga.com/blog/2022/05/25/embedding-git-commit-information-in-go-binaries/ GIT_COMMIT_HASH = "$Format:%H$" ) diff --git a/yb-voyager/src/ybversion/constants.go b/yb-voyager/src/ybversion/constants.go new file mode 100644 index 0000000000..94bbfffa94 --- /dev/null +++ b/yb-voyager/src/ybversion/constants.go @@ -0,0 +1,56 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package ybversion + +const ( + SERIES_2_14 = "2.14" + SERIES_2_18 = "2.18" + SERIES_2_20 = "2.20" + SERIES_2024_1 = "2024.1" + SERIES_2024_2 = "2024.2" + SERIES_2_21 = "2.21" + SERIES_2_23 = "2.23" +) + +var LatestStable *YBVersion + +var V2024_1_0_0 *YBVersion +var V2024_1_3_1 *YBVersion +var V2024_2_0_0 *YBVersion + +var V2_23_0_0 *YBVersion + +func init() { + var err error + V2024_1_0_0, err = NewYBVersion("2024.1.0.0") + if err != nil { + panic("could not create version 2024.1.0.0") + } + V2024_1_3_1, err = NewYBVersion("2024.1.3.1") + if err != nil { + panic("could not create version 2024.1.3.1") + } + V2024_2_0_0, err = NewYBVersion("2024.2.0.0") + if err != nil { + panic("could not create version 2024.2.0.0") + } + + V2_23_0_0, err = NewYBVersion("2.23.0.0") + if err != nil { + panic("could not create version 2.23.0.0") + } + LatestStable = V2024_2_0_0 +} diff --git a/yb-voyager/src/ybversion/yb_version.go b/yb-voyager/src/ybversion/yb_version.go new file mode 100644 index 0000000000..fad15e0863 --- /dev/null +++ b/yb-voyager/src/ybversion/yb_version.go @@ -0,0 +1,136 @@ +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ybversion + +import ( + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/go-version" + "github.com/samber/lo" + "golang.org/x/exp/slices" +) + +// Reference - https://docs.yugabyte.com/preview/releases/ybdb-releases/ +var supportedYBVersionStableSeriesOld = []string{SERIES_2_14, SERIES_2_18, SERIES_2_20} +var supportedYBVersionStableSeries = []string{SERIES_2024_1, SERIES_2024_2} +var supportedYBVersionPreviewSeries = []string{SERIES_2_21, SERIES_2_23} + +var allSupportedYBVersionSeries = lo.Flatten([][]string{supportedYBVersionStableSeries, supportedYBVersionPreviewSeries, supportedYBVersionStableSeriesOld}) +var ErrUnsupportedSeries = fmt.Errorf("unsupported YB version series. Supported YB version series = %v", allSupportedYBVersionSeries) + +const ( + STABLE = "stable" + PREVIEW = "preview" + STABLE_OLD = "stable_old" +) + +/* +YBVersion is a wrapper around hashicorp/go-version.Version that adds some Yugabyte-specific +functionality. + 1. It only supports versions with 4 segments (A.B.C.D). No build number is allowed. + 2. It only accepts one of supported Yugabyte version series. +*/ +type YBVersion struct { + *version.Version +} + +func NewYBVersion(v string) (*YBVersion, error) { + v1, err := version.NewVersion(v) + if err != nil { + return nil, err + } + + // Do not allow build number in the version. for example, 2024.1.1.0-b123 + if v1.Prerelease() != "" { + return nil, fmt.Errorf("invalid YB version: %s. Build number is not supported. Version should be of format (A.B.C.D) ", v) + } + + ybv := &YBVersion{v1} + origSegLen := ybv.OriginalSegmentsLen() + if origSegLen != 4 { + return nil, fmt.Errorf("invalid YB version: %s. It has %d segments. Version should have exactly 4 segments (A.B.C.D).", v, origSegLen) + } + + if !slices.Contains(allSupportedYBVersionSeries, ybv.Series()) { + return nil, ErrUnsupportedSeries + } + return ybv, nil +} + +// The first two segments essentially represent the release series +// as per https://docs.yugabyte.com/preview/releases/ybdb-releases/ +func (ybv *YBVersion) Series() string { + return joinIntsWith(ybv.Segments()[:2], ".") +} + +func (ybv *YBVersion) ReleaseType() string { + series := ybv.Series() + if slices.Contains(supportedYBVersionStableSeries, series) { + return STABLE + } else if slices.Contains(supportedYBVersionPreviewSeries, series) { + return PREVIEW + } else if slices.Contains(supportedYBVersionStableSeriesOld, series) { + return STABLE_OLD + } else { + panic("unknown release type for series: " + series) + } +} + +// This returns the len of the segments in the original +// input. For instance if input is 2024.1, +// go-version.Version.Segments() will return [2024, 1, 0, 0] +// original = 2024.1 +// originalSegmentsLen = 2 ([2024,1]) +func (ybv *YBVersion) OriginalSegmentsLen() int { + orig := ybv.Original() + segments := strings.Split(orig, ".") + return len(segments) +} + +func (ybv *YBVersion) GreaterThanOrEqual(other *YBVersion) bool { + return ybv.Version.GreaterThanOrEqual(other.Version) +} + +func (ybv *YBVersion) Equal(other *YBVersion) bool { + return ybv.Version.Equal(other.Version) +} + +func (ybv *YBVersion) String() string { + return ybv.Original() +} + +// override the UnmarshalText method of Version. +// UnmarshalText implements encoding.TextUnmarshaler interface. +func (ybv *YBVersion) UnmarshalText(b []byte) error { + temp, err := NewYBVersion(string(b)) + if err != nil { + return err + } + + *ybv = *temp + return nil +} + +func joinIntsWith(ints []int, delimiter string) string { + strs := make([]string, len(ints)) + for i, v := range ints { + strs[i] = strconv.Itoa(v) + } + return strings.Join(strs, delimiter) +} diff --git a/yb-voyager/src/ybversion/yb_version_test.go b/yb-voyager/src/ybversion/yb_version_test.go new file mode 100644 index 0000000000..e5e67b04ef --- /dev/null +++ b/yb-voyager/src/ybversion/yb_version_test.go @@ -0,0 +1,127 @@ +//go:build unit + +/* +Copyright (c) YugabyteDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ybversion + +import ( + "encoding/json" + "io" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidNewYBVersion(t *testing.T) { + validVersionStrings := []string{ + "2024.1.1.0", + "2.20.7.0", + "2.21.2.1", + } + for _, v := range validVersionStrings { + _, err := NewYBVersion(v) + assert.NoError(t, err) + } +} + +func TestInvalidNewYBVersion(t *testing.T) { + invalidVersionStrings := []string{ + "abc.def", // has to be numbers + "2024.0.1-1", // has to be in supported series + "2024", // has to have 4 segments + "2.20.7", // has to have 4 segments + "2024.1.1.1.1.1", // exactly 4 segments + "2024.1.0.0-b123", // build number is not allowed. + } + for _, v := range invalidVersionStrings { + _, err := NewYBVersion(v) + assert.Errorf(t, err, "expected error for %q", v) + } +} + +func TestStableReleaseType(t *testing.T) { + stableVersionStrings := []string{ + "2024.1.1.0", + "2024.1.0.0", + "2024.1.1.1", + } + for _, v := range stableVersionStrings { + ybVersion, _ := NewYBVersion(v) + assert.Equal(t, STABLE, ybVersion.ReleaseType()) + } +} + +func TestPreviewReleaseType(t *testing.T) { + previewVersionStrings := []string{ + "2.21.1.0", + "2.21.1.1", + } + for _, v := range previewVersionStrings { + ybVersion, _ := NewYBVersion(v) + assert.Equal(t, PREVIEW, ybVersion.ReleaseType()) + } +} + +func TestStableOldReleaseType(t *testing.T) { + stableOldVersionStrings := []string{ + "2.20.1.0", + "2.20.0.0", + } + for _, v := range stableOldVersionStrings { + ybVersion, _ := NewYBVersion(v) + assert.Equal(t, STABLE_OLD, ybVersion.ReleaseType()) + } +} + +func TestLatestStable(t *testing.T) { + type Release struct { + Name string `json:"name"` + } + + url := "https://api.github.com/repos/yugabyte/yugabyte-db/releases" + response, err := http.Get(url) + assert.NoErrorf(t, err, "could not access URL:%q", url) + defer response.Body.Close() + if response.StatusCode == 403 { + t.Skip("skipping test; rate limit exceeded") + } + assert.Equal(t, 200, response.StatusCode) + + body, err := io.ReadAll(response.Body) + assert.NoError(t, err, "could not read contents of response %q") + var releases []Release + err = json.Unmarshal(body, &releases) + assert.NoErrorf(t, err, "could not unmarshal response %q", string(body)) + assert.NotEmpty(t, releases, "no releases found") + + for _, r := range releases { + // sample - v2.20.7.1 (Released October 16, 2024) + releaseName := r.Name + releaseName = strings.Split(releaseName, " ")[0] + if releaseName[0] == 'v' { + releaseName = releaseName[1:] + } + releaseName = strings.Trim(releaseName, " ") + rVersion, err := NewYBVersion(releaseName) + assert.NoErrorf(t, err, "could not create version %q", releaseName) + if rVersion.ReleaseType() == STABLE { + assert.True(t, LatestStable.GreaterThanOrEqual(rVersion), "%s is not greater than %s", LatestStable, rVersion) + } + } +} diff --git a/yb-voyager/staticcheck.conf b/yb-voyager/staticcheck.conf new file mode 100644 index 0000000000..68d5a39312 --- /dev/null +++ b/yb-voyager/staticcheck.conf @@ -0,0 +1,2 @@ +# Disable the S1008 check to allow more explicit boolean returns +checks = ["-S1008"] \ No newline at end of file diff --git a/yb-voyager/test/containers/helpers.go b/yb-voyager/test/containers/helpers.go new file mode 100644 index 0000000000..cd3afee88d --- /dev/null +++ b/yb-voyager/test/containers/helpers.go @@ -0,0 +1,60 @@ +package testcontainers + +import ( + "context" + _ "embed" + "fmt" + "io" + + log "github.com/sirupsen/logrus" + + "github.com/testcontainers/testcontainers-go" +) + +const ( + DEFAULT_PG_PORT = "5432" + DEFAULT_YB_PORT = "5433" + DEFAULT_ORACLE_PORT = "1521" + DEFAULT_MYSQL_PORT = "3306" + + POSTGRESQL = "postgresql" + YUGABYTEDB = "yugabytedb" + ORACLE = "oracle" + MYSQL = "mysql" +) + +//go:embed test_schemas/postgresql_schema.sql +var postgresInitSchemaFile []byte + +//go:embed test_schemas/oracle_schema.sql +var oracleInitSchemaFile []byte + +//go:embed test_schemas/mysql_schema.sql +var mysqlInitSchemaFile []byte + +//go:embed test_schemas/yugabytedb_schema.sql +var yugabytedbInitSchemaFile []byte + +func printContainerLogs(container testcontainers.Container) { + if container == nil { + log.Printf("Cannot fetch logs: container is nil") + return + } + + containerID := container.GetContainerID() + logs, err := container.Logs(context.Background()) + if err != nil { + log.Printf("Error fetching logs for container %s: %v", containerID, err) + return + } + defer logs.Close() + + // Read the logs + logData, err := io.ReadAll(logs) + if err != nil { + log.Printf("Error reading logs for container %s: %v", containerID, err) + return + } + + fmt.Printf("=== Logs for container %s ===\n%s\n=== End of Logs for container %s ===\n", containerID, string(logData), containerID) +} diff --git a/yb-voyager/test/containers/mysql_container.go b/yb-voyager/test/containers/mysql_container.go new file mode 100644 index 0000000000..c2d8930cff --- /dev/null +++ b/yb-voyager/test/containers/mysql_container.go @@ -0,0 +1,149 @@ +package testcontainers + +import ( + "context" + "database/sql" + "fmt" + "os" + "time" + + "github.com/docker/go-connections/nat" + log "github.com/sirupsen/logrus" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" +) + +type MysqlContainer struct { + ContainerConfig + container testcontainers.Container + db *sql.DB +} + +func (ms *MysqlContainer) Start(ctx context.Context) (err error) { + if ms.container != nil { + utils.PrintAndLog("Mysql-%s container already running", ms.DBVersion) + return nil + } + + // since these Start() can be called from anywhere so need a way to ensure that correct files(without needing abs path) are picked from project directories + tmpFile, err := os.CreateTemp(os.TempDir(), "mysql_schema.sql") + if err != nil { + return fmt.Errorf("failed to create temp schema file: %w", err) + } + defer tmpFile.Close() + + if _, err := tmpFile.Write(mysqlInitSchemaFile); err != nil { + return fmt.Errorf("failed to write to temp schema file: %w", err) + } + + req := testcontainers.ContainerRequest{ + // TODO: verify the docker images being used are the correct/certified ones + Image: fmt.Sprintf("mysql:%s", ms.DBVersion), + ExposedPorts: []string{"3306/tcp"}, + Env: map[string]string{ + "MYSQL_ROOT_PASSWORD": ms.Password, + "MYSQL_USER": ms.User, + "MYSQL_PASSWORD": ms.Password, + "MYSQL_DATABASE": ms.DBName, + }, + WaitingFor: wait.ForListeningPort("3306/tcp").WithStartupTimeout(2 * time.Minute).WithPollInterval(5 * time.Second), + Files: []testcontainers.ContainerFile{ + { + HostFilePath: tmpFile.Name(), + ContainerFilePath: "docker-entrypoint-initdb.d/mysql_schema.sql", + FileMode: 0755, + }, + }, + } + + ms.container, err = testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) + printContainerLogs(ms.container) + if err != nil { + return err + } + + dsn := ms.GetConnectionString() + db, err := sql.Open("mysql", dsn) + if err != nil { + return fmt.Errorf("failed to open mysql connection: %w", err) + } + + if err = db.Ping(); err != nil { + db.Close() + return fmt.Errorf("failed to ping mysql after connection: %w", err) + } + + // Store the DB connection for reuse + ms.db = db + + return nil +} + +func (ms *MysqlContainer) Terminate(ctx context.Context) { + if ms == nil { + return + } + + // Close the DB connection if it exists + if ms.db != nil { + if err := ms.db.Close(); err != nil { + log.Errorf("failed to close mysql db connection: %v", err) + } + } + + err := ms.container.Terminate(ctx) + if err != nil { + log.Errorf("failed to terminate mysql container: %v", err) + } +} + +func (ms *MysqlContainer) GetHostPort() (string, int, error) { + if ms.container == nil { + return "", -1, fmt.Errorf("mysql container is not started: nil") + } + + ctx := context.Background() + host, err := ms.container.Host(ctx) + if err != nil { + return "", -1, fmt.Errorf("failed to fetch host for mysql container: %w", err) + } + + port, err := ms.container.MappedPort(ctx, nat.Port(DEFAULT_MYSQL_PORT)) + if err != nil { + return "", -1, fmt.Errorf("failed to fetch mapped port for mysql container: %w", err) + } + + return host, port.Int(), nil +} + +func (ms *MysqlContainer) GetConfig() ContainerConfig { + return ms.ContainerConfig +} + +func (ms *MysqlContainer) GetConnectionString() string { + host, port, err := ms.GetHostPort() + if err != nil { + utils.ErrExit("failed to get host port for mysql connection string: %v", err) + } + + // DSN format: user:password@tcp(host:port)/dbname + return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", + ms.User, ms.Password, host, port, ms.DBName) +} + +func (ms *MysqlContainer) ExecuteSqls(sqls ...string) { + if ms.db == nil { + utils.ErrExit("db connection not initialized for mysql container") + } + + for _, sqlStmt := range sqls { + _, err := ms.db.Exec(sqlStmt) + if err != nil { + utils.ErrExit("failed to execute sql '%s': %w", sqlStmt, err) + } + } +} diff --git a/yb-voyager/test/containers/oracle_container.go b/yb-voyager/test/containers/oracle_container.go new file mode 100644 index 0000000000..8fb8218de2 --- /dev/null +++ b/yb-voyager/test/containers/oracle_container.go @@ -0,0 +1,107 @@ +package testcontainers + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/docker/go-connections/nat" + log "github.com/sirupsen/logrus" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" +) + +type OracleContainer struct { + ContainerConfig + container testcontainers.Container +} + +func (ora *OracleContainer) Start(ctx context.Context) (err error) { + if ora.container != nil { + utils.PrintAndLog("Oracle-%s container already running", ora.DBVersion) + return nil + } + + // since these Start() can be called from anywhere so need a way to ensure that correct files(without needing abs path) are picked from project directories + tmpFile, err := os.CreateTemp(os.TempDir(), "oracle_schema.sql") + if err != nil { + return fmt.Errorf("failed to create temp schema file: %w", err) + } + defer tmpFile.Close() + + if _, err := tmpFile.Write(oracleInitSchemaFile); err != nil { + return fmt.Errorf("failed to write to temp schema file: %w", err) + } + + // refer: https://hub.docker.com/r/gvenzl/oracle-xe + req := testcontainers.ContainerRequest{ + // TODO: verify the docker images being used are the correct/certified ones (No license issue) + Image: fmt.Sprintf("gvenzl/oracle-xe:%s", ora.DBVersion), + ExposedPorts: []string{"1521/tcp"}, + Env: map[string]string{ + "ORACLE_PASSWORD": ora.Password, // for SYS user + "ORACLE_DATABASE": ora.DBName, + "APP_USER": ora.User, + "APP_USER_PASSWORD": ora.Password, + }, + WaitingFor: wait.ForLog("DATABASE IS READY TO USE").WithStartupTimeout(2 * time.Minute).WithPollInterval(5 * time.Second), + Files: []testcontainers.ContainerFile{ + { + HostFilePath: tmpFile.Name(), + ContainerFilePath: "docker-entrypoint-initdb.d/oracle_schema.sql", + FileMode: 0755, + }, + }, + } + + ora.container, err = testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) + printContainerLogs(ora.container) + return err +} + +func (ora *OracleContainer) Terminate(ctx context.Context) { + if ora == nil { + return + } + + err := ora.container.Terminate(ctx) + if err != nil { + log.Errorf("failed to terminate oracle container: %v", err) + } +} + +func (ora *OracleContainer) GetHostPort() (string, int, error) { + if ora.container == nil { + return "", -1, fmt.Errorf("oracle container is not started: nil") + } + + ctx := context.Background() + host, err := ora.container.Host(ctx) + if err != nil { + return "", -1, fmt.Errorf("failed to fetch host for oracle container: %w", err) + } + + port, err := ora.container.MappedPort(ctx, nat.Port(DEFAULT_ORACLE_PORT)) + if err != nil { + return "", -1, fmt.Errorf("failed to fetch mapped port for oracle container: %w", err) + } + + return host, port.Int(), nil +} + +func (ora *OracleContainer) GetConfig() ContainerConfig { + return ora.ContainerConfig +} + +func (ora *OracleContainer) GetConnectionString() string { + panic("GetConnectionString() not implemented yet for oracle") +} + +func (ora *OracleContainer) ExecuteSqls(sqls ...string) { + +} diff --git a/yb-voyager/test/containers/postgres_container.go b/yb-voyager/test/containers/postgres_container.go new file mode 100644 index 0000000000..539763f7de --- /dev/null +++ b/yb-voyager/test/containers/postgres_container.go @@ -0,0 +1,151 @@ +package testcontainers + +import ( + "context" + "database/sql" + "fmt" + "os" + "time" + + "github.com/docker/go-connections/nat" + log "github.com/sirupsen/logrus" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" +) + +type PostgresContainer struct { + ContainerConfig + container testcontainers.Container + db *sql.DB +} + +func (pg *PostgresContainer) Start(ctx context.Context) (err error) { + if pg.container != nil { + utils.PrintAndLog("Postgres-%s container already running", pg.DBVersion) + return nil + } + + // since these Start() can be called from anywhere so need a way to ensure that correct files(without needing abs path) are picked from project directories + tmpFile, err := os.CreateTemp(os.TempDir(), "postgresql_schema.sql") + if err != nil { + return fmt.Errorf("failed to create temp schema file: %w", err) + } + defer tmpFile.Close() + + if _, err := tmpFile.Write(postgresInitSchemaFile); err != nil { + return fmt.Errorf("failed to write to temp schema file: %w", err) + } + + req := testcontainers.ContainerRequest{ + // TODO: verify the docker images being used are the correct/certified ones + Image: fmt.Sprintf("postgres:%s", pg.DBVersion), + ExposedPorts: []string{"5432/tcp"}, + Env: map[string]string{ + "POSTGRES_USER": pg.User, + "POSTGRES_PASSWORD": pg.Password, + "POSTGRES_DB": pg.DBName, // NOTE: PG image makes the database with same name as user if not specific + }, + WaitingFor: wait.ForAll( + wait.ForListeningPort("5432/tcp").WithStartupTimeout(2*time.Minute).WithPollInterval(5*time.Second), + wait.ForLog("database system is ready to accept connections").WithStartupTimeout(3*time.Minute).WithPollInterval(5*time.Second), + ), + Files: []testcontainers.ContainerFile{ + { + HostFilePath: tmpFile.Name(), + ContainerFilePath: "docker-entrypoint-initdb.d/postgresql_schema.sql", + FileMode: 0755, + }, + }, + } + + pg.container, err = testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) + + printContainerLogs(pg.container) + if err != nil { + return err + } + + dsn := pg.GetConnectionString() + db, err := sql.Open("pgx", dsn) + if err != nil { + return fmt.Errorf("failed to open postgres connection: %w", err) + } + + if err := db.Ping(); err != nil { + db.Close() + pg.container.Terminate(ctx) + return fmt.Errorf("failed to ping postgres after connection: %w", err) + } + + // Store the DB connection for reuse + pg.db = db + return nil +} + +func (pg *PostgresContainer) Terminate(ctx context.Context) { + if pg == nil { + return + } + + // Close the DB connection if it exists + if pg.db != nil { + if err := pg.db.Close(); err != nil { + log.Errorf("failed to close postgres db connection: %v", err) + } + } + + err := pg.container.Terminate(ctx) + if err != nil { + log.Errorf("failed to terminate postgres container: %v", err) + } +} + +func (pg *PostgresContainer) GetHostPort() (string, int, error) { + if pg.container == nil { + return "", -1, fmt.Errorf("postgres container is not started: nil") + } + + ctx := context.Background() + host, err := pg.container.Host(ctx) + if err != nil { + return "", -1, fmt.Errorf("failed to fetch host for postgres container: %w", err) + } + + port, err := pg.container.MappedPort(ctx, nat.Port(DEFAULT_PG_PORT)) + if err != nil { + return "", -1, fmt.Errorf("failed to fetch mapped port for postgres container: %w", err) + } + + return host, port.Int(), nil +} + +func (pg *PostgresContainer) GetConfig() ContainerConfig { + return pg.ContainerConfig +} + +func (pg *PostgresContainer) GetConnectionString() string { + config := pg.GetConfig() + host, port, err := pg.GetHostPort() + if err != nil { + utils.ErrExit("failed to get host port for postgres connection string: %v", err) + } + + return fmt.Sprintf("postgresql://%s:%s@%s:%d/%s", config.User, config.Password, host, port, config.DBName) +} + +func (pg *PostgresContainer) ExecuteSqls(sqls ...string) { + if pg.db == nil { + utils.ErrExit("db connection not initialized for postgres container") + } + + for _, sqlStmt := range sqls { + _, err := pg.db.Exec(sqlStmt) + if err != nil { + utils.ErrExit("failed to execute sql '%s': %w", sqlStmt, err) + } + } +} diff --git a/yb-voyager/test/containers/test_schemas/mysql_schema.sql b/yb-voyager/test/containers/test_schemas/mysql_schema.sql new file mode 100644 index 0000000000..e72f4bcb17 --- /dev/null +++ b/yb-voyager/test/containers/test_schemas/mysql_schema.sql @@ -0,0 +1,6 @@ +-- TODO: create user as per User creation steps in docs and use that in tests + +-- Grant CREATE, ALTER, DROP privileges globally to 'ybvoyager' +GRANT CREATE, ALTER, DROP ON *.* TO 'ybvoyager'@'%' WITH GRANT OPTION; +-- Apply the changes +FLUSH PRIVILEGES; \ No newline at end of file diff --git a/yb-voyager/test/containers/test_schemas/oracle_schema.sql b/yb-voyager/test/containers/test_schemas/oracle_schema.sql new file mode 100644 index 0000000000..95b3a3a9b4 --- /dev/null +++ b/yb-voyager/test/containers/test_schemas/oracle_schema.sql @@ -0,0 +1,47 @@ +-- TODO: create user as per User creation steps in docs and use that in tests + +-- Used ORACLE_DATABASE=DMS i.e. pluggable database to create APP_USER +ALTER SESSION SET CONTAINER = "DMS"; + + +-- creating tables under YBVOYAGER schema, same as APP_USER +CREATE TABLE YBVOYAGER.foo ( + id NUMBER PRIMARY KEY, + name VARCHAR2(255) +); + +CREATE TABLE YBVOYAGER.bar ( + id NUMBER PRIMARY KEY, + name VARCHAR2(255) +); + +CREATE TABLE YBVOYAGER.unique_table ( + id NUMBER PRIMARY KEY, + email VARCHAR2(100), + phone VARCHAR2(100), + address VARCHAR2(255), + CONSTRAINT email_phone_unq UNIQUE (email, phone) +); + +CREATE UNIQUE INDEX YBVOYAGER.unique_address_idx ON YBVOYAGER.unique_table (address); + +CREATE TABLE YBVOYAGER.table1 ( + id NUMBER PRIMARY KEY, + name VARCHAR2(100) +); + +CREATE TABLE YBVOYAGER.table2 ( + id NUMBER PRIMARY KEY, + email VARCHAR2(100) +); + + +CREATE TABLE YBVOYAGER.non_pk1 ( + id NUMBER, + name VARCHAR2(10) +); + +CREATE TABLE YBVOYAGER.non_pk2 ( + id NUMBER, + name VARCHAR2(10) +); \ No newline at end of file diff --git a/yb-voyager/test/containers/test_schemas/postgresql_schema.sql b/yb-voyager/test/containers/test_schemas/postgresql_schema.sql new file mode 100644 index 0000000000..36bda657a5 --- /dev/null +++ b/yb-voyager/test/containers/test_schemas/postgresql_schema.sql @@ -0,0 +1 @@ +-- TODO: create source migration user as per User creation steps in docs and use that in tests diff --git a/yb-voyager/test/containers/test_schemas/yugabytedb_schema.sql b/yb-voyager/test/containers/test_schemas/yugabytedb_schema.sql new file mode 100644 index 0000000000..c36ddc5b93 --- /dev/null +++ b/yb-voyager/test/containers/test_schemas/yugabytedb_schema.sql @@ -0,0 +1 @@ +-- TODO: create user as per User creation steps in docs and use that in tests diff --git a/yb-voyager/test/containers/testcontainers.go b/yb-voyager/test/containers/testcontainers.go new file mode 100644 index 0000000000..c9fce4b15c --- /dev/null +++ b/yb-voyager/test/containers/testcontainers.go @@ -0,0 +1,143 @@ +package testcontainers + +import ( + "context" + "fmt" + "sync" + + "github.com/samber/lo" + log "github.com/sirupsen/logrus" +) + +// containerRegistry to ensure one container per database(dbtype+version) [Singleton Pattern] +// Limitation - go test spawns different process for running tests of each package, hence the containers won't be shared across packages. +var ( + containerRegistry = make(map[string]TestContainer) + registryMutex sync.Mutex +) + +type TestContainer interface { + Start(ctx context.Context) error + Terminate(ctx context.Context) + GetHostPort() (string, int, error) + GetConfig() ContainerConfig + GetConnectionString() string + /* + TODOs + // Function to run sql script for a specific test case + SetupSqlScript(scriptName string, dbName string) error + + // Add Capability to run multiple versions of a dbtype parallely + */ + ExecuteSqls(sqls ...string) +} + +type ContainerConfig struct { + DBVersion string + User string + Password string + DBName string + Schema string +} + +func NewTestContainer(dbType string, containerConfig *ContainerConfig) TestContainer { + registryMutex.Lock() + defer registryMutex.Unlock() + + // initialise containerConfig struct if nothing is provided + if containerConfig == nil { + containerConfig = &ContainerConfig{} + } + setContainerConfigDefaultsIfNotProvided(dbType, containerConfig) + + // check if container is already created after fetching default configs + containerName := fmt.Sprintf("%s-%s", dbType, containerConfig.DBVersion) + if container, exists := containerRegistry[containerName]; exists { + log.Infof("container '%s' already exists in the registry", containerName) + return container + } + + var testContainer TestContainer + switch dbType { + case POSTGRESQL: + testContainer = &PostgresContainer{ + ContainerConfig: *containerConfig, + } + case YUGABYTEDB: + testContainer = &YugabyteDBContainer{ + ContainerConfig: *containerConfig, + } + case ORACLE: + testContainer = &OracleContainer{ + ContainerConfig: *containerConfig, + } + case MYSQL: + testContainer = &MysqlContainer{ + ContainerConfig: *containerConfig, + } + default: + panic(fmt.Sprintf("unsupported db type '%q' for creating test container\n", dbType)) + } + + containerRegistry[containerName] = testContainer + return testContainer +} + +/* +Challenges in golang for running this a teardown step +1. In golang when you execute go test in the top level folder it executes all the tests one by one. +2. Where each defined package, can have its TestMain() which can control the setup and teardown steps for that package +3. There is no way to run these before/after the tests of first/last package in codebase + +Potential solution: Implement a counter(total=number_of_package) based logic to execute teardown(i.e. TerminateAllContainers() in our case) +Figure out the best solution. + +For now we can rely on TestContainer ryuk(the container repear), which terminates all the containers after the process exits. +But the test framework should have capability of terminating all containers at the end. +*/ +func TerminateAllContainers() { + registryMutex.Lock() + defer registryMutex.Unlock() + + ctx := context.Background() + for name, container := range containerRegistry { + log.Infof("terminating the container '%s'", name) + container.Terminate(ctx) + } +} + +func setContainerConfigDefaultsIfNotProvided(dbType string, config *ContainerConfig) { + // TODO: discuss and decide the default DBVersion values for each dbtype + + switch dbType { + case POSTGRESQL: + config.User = lo.Ternary(config.User == "", "ybvoyager", config.User) + config.Password = lo.Ternary(config.Password == "", "passsword", config.Password) + config.DBVersion = lo.Ternary(config.DBVersion == "", "11", config.DBVersion) + config.Schema = lo.Ternary(config.Schema == "", "public", config.Schema) + config.DBName = lo.Ternary(config.DBName == "", "postgres", config.DBName) + + case YUGABYTEDB: + config.User = lo.Ternary(config.User == "", "yugabyte", config.User) // ybdb docker doesn't create specified user + config.Password = lo.Ternary(config.Password == "", "passsword", config.Password) + config.DBVersion = lo.Ternary(config.DBVersion == "", "2.20.7.1-b10", config.DBVersion) + config.Schema = lo.Ternary(config.Schema == "", "public", config.Schema) + config.DBName = lo.Ternary(config.DBName == "", "yugabyte", config.DBName) + + case ORACLE: + config.User = lo.Ternary(config.User == "", "ybvoyager", config.User) + config.Password = lo.Ternary(config.Password == "", "passsword", config.Password) + config.DBVersion = lo.Ternary(config.DBVersion == "", "21", config.DBVersion) + config.Schema = lo.Ternary(config.Schema == "", "YBVOYAGER", config.Schema) + config.DBName = lo.Ternary(config.DBName == "", "DMS", config.DBName) + + case MYSQL: + config.User = lo.Ternary(config.User == "", "ybvoyager", config.User) + config.Password = lo.Ternary(config.Password == "", "passsword", config.Password) + config.DBVersion = lo.Ternary(config.DBVersion == "", "8.4", config.DBVersion) + config.DBName = lo.Ternary(config.DBName == "", "dms", config.DBName) + + default: + panic(fmt.Sprintf("unsupported db type '%q' for creating test container\n", dbType)) + } +} diff --git a/yb-voyager/test/containers/yugabytedb_container.go b/yb-voyager/test/containers/yugabytedb_container.go new file mode 100644 index 0000000000..822a7f6945 --- /dev/null +++ b/yb-voyager/test/containers/yugabytedb_container.go @@ -0,0 +1,129 @@ +package testcontainers + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/docker/go-connections/nat" + "github.com/jackc/pgx/v5" + log "github.com/sirupsen/logrus" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils" +) + +type YugabyteDBContainer struct { + ContainerConfig + container testcontainers.Container +} + +func (yb *YugabyteDBContainer) Start(ctx context.Context) (err error) { + if yb.container != nil { + utils.PrintAndLog("YugabyteDB-%s container already running", yb.DBVersion) + return nil + } + + // since these Start() can be called from anywhere so need a way to ensure that correct files(without needing abs path) are picked from project directories + tmpFile, err := os.CreateTemp(os.TempDir(), "yugabytedb_schema.sql") + if err != nil { + return fmt.Errorf("failed to create temp schema file: %w", err) + } + defer tmpFile.Close() + + if _, err := tmpFile.Write(yugabytedbInitSchemaFile); err != nil { + return fmt.Errorf("failed to write to temp schema file: %w", err) + } + + // this will create a 1 Node RF-1 cluster + req := testcontainers.ContainerRequest{ + Image: fmt.Sprintf("yugabytedb/yugabyte:%s", yb.DBVersion), + ExposedPorts: []string{"5433/tcp", "15433/tcp", "7000/tcp", "9000/tcp", "9042/tcp"}, + Cmd: []string{ + "bin/yugabyted", + "start", + "--daemon=false", + "--ui=false", + "--initial_scripts_dir=/home/yugabyte/initial-scripts", + }, + WaitingFor: wait.ForAll( + wait.ForListeningPort("5433/tcp").WithStartupTimeout(2*time.Minute).WithPollInterval(5*time.Second), + wait.ForLog("Data placement constraint successfully verified").WithStartupTimeout(3*time.Minute).WithPollInterval(1*time.Second), + ), + Files: []testcontainers.ContainerFile{ + { + HostFilePath: tmpFile.Name(), + ContainerFilePath: "/home/yugabyte/initial-scripts/yugabytedb_schema.sql", + FileMode: 0755, + }, + }, + } + + yb.container, err = testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) + printContainerLogs(yb.container) + return err +} + +func (yb *YugabyteDBContainer) Terminate(ctx context.Context) { + if yb == nil { + return + } + + err := yb.container.Terminate(ctx) + if err != nil { + log.Errorf("failed to terminate yugabytedb container: %v", err) + } +} + +func (yb *YugabyteDBContainer) GetHostPort() (string, int, error) { + if yb.container == nil { + return "", -1, fmt.Errorf("yugabytedb container is not started: nil") + } + + ctx := context.Background() + host, err := yb.container.Host(ctx) + if err != nil { + return "", -1, fmt.Errorf("failed to fetch host for yugabytedb container: %w", err) + } + + port, err := yb.container.MappedPort(ctx, nat.Port(DEFAULT_YB_PORT)) + if err != nil { + return "", -1, fmt.Errorf("failed to fetch mapped port for yugabytedb container: %w", err) + } + + return host, port.Int(), nil +} + +func (yb *YugabyteDBContainer) GetConfig() ContainerConfig { + return yb.ContainerConfig +} + +func (yb *YugabyteDBContainer) GetConnectionString() string { + config := yb.GetConfig() + host, port, err := yb.GetHostPort() + if err != nil { + utils.ErrExit("failed to get host port for yugabytedb connection string: %v", err) + } + + return fmt.Sprintf("postgresql://%s:%s@%s:%d/%s", config.User, config.Password, host, port, config.DBName) +} + +func (yb *YugabyteDBContainer) ExecuteSqls(sqls ...string) { + connStr := yb.GetConnectionString() + conn, err := pgx.Connect(context.Background(), connStr) + if err != nil { + utils.ErrExit("failed to connect postgres for executing sqls: %w", err) + } + defer conn.Close(context.Background()) + + for _, sql := range sqls { + _, err := conn.Exec(context.Background(), sql) + if err != nil { + utils.ErrExit("failed to execute sql '%s': %w", sql, err) + } + } +} diff --git a/yb-voyager/test/utils/testutils.go b/yb-voyager/test/utils/testutils.go new file mode 100644 index 0000000000..942e00fd67 --- /dev/null +++ b/yb-voyager/test/utils/testutils.go @@ -0,0 +1,408 @@ +package testutils + +import ( + "database/sql" + "fmt" + "os" + "reflect" + "regexp" + "sort" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + "github.com/yugabyte/yb-voyager/yb-voyager/src/utils/sqlname" +) + +type ColumnPropertiesSqlite struct { + Type string // Data type (e.g., INTEGER, TEXT) + PrimaryKey int // Whether it's a primary key values can be 0,1,2,3. If 1 then it is primary key. 2,3 etc. are used for composite primary key + NotNull bool // Whether the column has a NOT NULL constraint + Default sql.NullString // Default value, if any (nil means no default) +} + +// Column represents a column's expected metadata +type ColumnPropertiesPG struct { + Type string + IsPrimary bool + IsNullable string + Default sql.NullString +} + +// CompareStructs compares two struct types and reports any mismatches. +func CompareStructs(t *testing.T, actual, expected reflect.Type, structName string) { + assert.Equal(t, reflect.Struct, actual.Kind(), "%s: Actual type must be a struct. There is some breaking change!", structName) + assert.Equal(t, reflect.Struct, expected.Kind(), "%s: Expected type must be a struct. There is some breaking change!", structName) + + for i := 0; i < max(actual.NumField(), expected.NumField()); i++ { + var actualField, expectedField reflect.StructField + var actualExists, expectedExists bool + + if i < actual.NumField() { + actualField = actual.Field(i) + actualExists = true + } + if i < expected.NumField() { + expectedField = expected.Field(i) + expectedExists = true + } + + // Assert field names match + if actualExists && expectedExists { + assert.Equal(t, expectedField.Name, actualField.Name, "%s: Field name mismatch at position %d. There is some breaking change!", structName, i) + assert.Equal(t, expectedField.Type.String(), actualField.Type.String(), "%s: Field type mismatch for field %s. There is some breaking change!", structName, expectedField.Name) + assert.Equal(t, expectedField.Tag, actualField.Tag, "%s: Field tag mismatch for field %s. There is some breaking change!", structName, expectedField.Name) + } + + // Report missing or extra fields + if !actualExists && expectedExists { + t.Errorf("%s: Missing field %s of type %s. There is some breaking change!", structName, expectedField.Name, expectedField.Type) + } + if actualExists && !expectedExists { + t.Errorf("%s: Unexpected field %s of type %s. There is some breaking change!", structName, actualField.Name, actualField.Type) + } + } +} + +// CompareJson compares two structs by marshalling them into JSON and reports any differences. +func CompareJson(t *testing.T, outputFilePath string, expectedJSON string, exportDir string) { + // Read the output JSON file + outputBytes, err := os.ReadFile(outputFilePath) + if err != nil { + t.Fatalf("Failed to read output JSON file: %v", err) + } + + // Compare the output JSON with the expected JSON + if diff := cmp.Diff(expectedJSON, string(outputBytes)); diff != "" { + t.Errorf("JSON file mismatch (-expected +actual):\n%s", diff) + } + + // Can be used if we don't want to compare pretty printed JSON + // assert.JSONEqf(t, expectedJSON, string(outputBytes), "JSON file mismatch. There is some breaking change!") +} + +// Helper function to check table structure +func CheckTableStructureSqlite(db *sql.DB, tableName string, expectedColumns map[string]ColumnPropertiesSqlite) error { + // Query to get table info + rows, err := db.Query(fmt.Sprintf("PRAGMA table_info(%s);", tableName)) + if err != nil { + return fmt.Errorf("failed to get table info for %s: %w", tableName, err) + } + defer rows.Close() + + // Check if columns match expected ones + actualColumns := make(map[string]ColumnPropertiesSqlite) + for rows.Next() { + var cid int + var name string + var cp ColumnPropertiesSqlite + if err := rows.Scan(&cid, &name, &cp.Type, &cp.NotNull, &cp.Default, &cp.PrimaryKey); err != nil { + return err + } + actualColumns[name] = ColumnPropertiesSqlite{ + Type: cp.Type, + PrimaryKey: cp.PrimaryKey, + NotNull: cp.NotNull, + Default: cp.Default, + } + } + + // Compare actual columns with expected columns + for colName, expectedProps := range expectedColumns { + actualProps, exists := actualColumns[colName] + if !exists { + return fmt.Errorf("table %s missing expected column: %s. There is some breaking change!", tableName, colName) + } + + // Check type + if actualProps.Type != expectedProps.Type { + return fmt.Errorf("table %s column %s: expected type %s, got %s. There is some breaking change!", tableName, colName, expectedProps.Type, actualProps.Type) + } + + // Check if it's part of the primary key + if actualProps.PrimaryKey != expectedProps.PrimaryKey { + return fmt.Errorf("table %s column %s: expected primary key to be %v, got %v. There is some breaking change!", tableName, colName, expectedProps.PrimaryKey, actualProps.PrimaryKey) + } + + // Check NOT NULL constraint + if actualProps.NotNull != expectedProps.NotNull { + return fmt.Errorf("table %s column %s: expected NOT NULL to be %v, got %v. There is some breaking change!", tableName, colName, expectedProps.NotNull, actualProps.NotNull) + } + + // Check default value + if (expectedProps.Default.Valid && !actualProps.Default.Valid) || (!expectedProps.Default.Valid && actualProps.Default.Valid) || (expectedProps.Default.Valid && actualProps.Default.Valid && expectedProps.Default.String != actualProps.Default.String) { + return fmt.Errorf("table %s column %s: expected default value %v, got %v. There is some breaking change!", tableName, colName, expectedProps.Default, actualProps.Default) + } + } + + // Check for any additional unexpected columns + for colName := range actualColumns { + if _, exists := expectedColumns[colName]; !exists { + return fmt.Errorf("table %s has unexpected additional column: %s. There is some breaking change!", tableName, colName) + } + } + + return nil +} + +// Helper function to check table structure +func CheckTableStructurePG(t *testing.T, db *sql.DB, schema, table string, expectedColumns map[string]ColumnPropertiesPG) { + queryColumns := ` + SELECT column_name, data_type, is_nullable, column_default + FROM information_schema.columns + WHERE table_name = $1;` + + rows, err := db.Query(queryColumns, table) + if err != nil { + t.Fatalf("Failed to query columns for table %s.%s: %v", schema, table, err) + } + defer rows.Close() + + actualColumns := make(map[string]ColumnPropertiesPG) + for rows.Next() { + var colName string + var col ColumnPropertiesPG + err := rows.Scan(&colName, &col.Type, &col.IsNullable, &col.Default) + if err != nil { + t.Fatalf("Failed to scan column metadata: %v", err) + } + actualColumns[colName] = col + } + + // Compare columns + for colName, expectedProps := range expectedColumns { + actual, found := actualColumns[colName] + if !found { + t.Errorf("Missing expected column in table %s.%s: %s.\nThere is some breaking change!", schema, table, colName) + continue + } + if actual.Type != expectedProps.Type || actual.IsNullable != expectedProps.IsNullable || actual.Default != expectedProps.Default { + t.Errorf("Column mismatch in table %s.%s: \nexpected %+v, \ngot %+v.\nThere is some breaking change!", schema, table, expectedProps, actual) + } + } + + // Check for extra columns + for actualName := range actualColumns { + found := false + for expectedName, _ := range expectedColumns { + if actualName == expectedName { + found = true + break + } + } + if !found { + t.Errorf("Unexpected column in table %s.%s: %s.\nThere is some breaking change!", schema, table, actualName) + } + } + + // Check primary keys + checkPrimaryKeyOfTablePG(t, db, schema, table, expectedColumns) +} + +func checkPrimaryKeyOfTablePG(t *testing.T, db *sql.DB, schema, table string, expectedColumns map[string]ColumnPropertiesPG) { + // Validate primary keys + queryPrimaryKeys := ` + SELECT conrelid::regclass AS table_name, + conname AS primary_key, + pg_get_constraintdef(oid) + FROM pg_constraint + WHERE contype = 'p' -- 'p' indicates primary key + AND conrelid::regclass::text = $1 + ORDER BY conrelid::regclass::text, contype DESC;` + + rows, err := db.Query(queryPrimaryKeys, fmt.Sprintf("%s.%s", schema, table)) + if err != nil { + t.Fatalf("Failed to query primary keys for table %s.%s: %v", schema, table, err) + } + defer rows.Close() + + // Map to store primary key columns (not just the constraint name) + // Output is like: + // table_name | primary_key | primary_key_definition + // ybvoyager_metadata.ybvoyager_import_data_batches_metainfo_v3 | ybvoyager_import_data_batches_metainfo_v3_pkey | PRIMARY KEY (migration_uuid, data_file_name, batch_number, schema_name, table_name) + primaryKeyColumns := map[string]bool{} + for rows.Next() { + var tableName, pk, constraintDef string + if err := rows.Scan(&tableName, &pk, &constraintDef); err != nil { + t.Fatalf("Failed to scan primary key: %v", err) + } + + // Parse the columns from the constraint definition (e.g., "PRIMARY KEY (col1, col2, ...)") + columns := parsePrimaryKeyColumnsPG(constraintDef) + for _, col := range columns { + primaryKeyColumns[col] = true + } + } + + // Check if the primary key columns match the expected primary key columns + for expectedName, expectedParams := range expectedColumns { + if expectedParams.IsPrimary { + if _, found := primaryKeyColumns[expectedName]; !found { + t.Errorf("Missing expected primary key column in table %s.%s: %s.\nThere is some breaking change!", schema, table, expectedName) + } + } + } + + // Check if there are any extra primary key columns + for col := range primaryKeyColumns { + found := false + for expectedName, expectedParams := range expectedColumns { + if expectedName == col && expectedParams.IsPrimary { + found = true + break + } + } + if !found { + t.Errorf("Unexpected primary key column in table %s.%s: %s.\nThere is some breaking change!", schema, table, col) + } + } +} + +// Helper function to parse primary key columns from the constraint definition +func parsePrimaryKeyColumnsPG(constraintDef string) []string { + // Define the regex pattern + re := regexp.MustCompile(`PRIMARY KEY\s*\((.*?)\)`) + + // Extract the column list inside "PRIMARY KEY(...)" + matches := re.FindStringSubmatch(constraintDef) + if len(matches) < 2 { + return nil // Return nil if no match is found + } + + // Split by commas to get individual column names + columns := strings.Split(matches[1], ",") + for i := range columns { + columns[i] = strings.TrimSpace(columns[i]) // Remove extra spaces around column names + } + + return columns +} + +// Helper function to check table existence and no extra tables +func CheckTableExistenceSqlite(t *testing.T, db *sql.DB, expectedTables map[string]map[string]ColumnPropertiesSqlite) error { + // Query to get table names + rows, err := db.Query("SELECT name FROM sqlite_master WHERE type='table';") + if err != nil { + return fmt.Errorf("failed to get table names: %w", err) + } + defer rows.Close() + + // Check if tables match expected ones + actualTables := make(map[string]struct{}) + for rows.Next() { + var tableName string + if err := rows.Scan(&tableName); err != nil { + return err + } + actualTables[tableName] = struct{}{} + } + + // Compare actual tables with expected tables + for tableName := range expectedTables { + if _, exists := actualTables[tableName]; !exists { + return fmt.Errorf("expected table %s not found. There is some breaking change!", tableName) + } else { + t.Logf("Found table: %s", tableName) + } + } + + // Check for any additional unexpected tables + for tableName := range actualTables { + if _, exists := expectedTables[tableName]; !exists { + return fmt.Errorf("unexpected additional table: %s. There is some breaking change!", tableName) + } + } + + return nil +} + +// validateSchema validates the schema, tables, and columns +func CheckTableExistencePG(t *testing.T, db *sql.DB, schema string, expectedTables map[string]map[string]ColumnPropertiesPG) { + // Check all tables in the schema + queryTables := `SELECT table_schema || '.' || table_name AS qualified_table_name + FROM information_schema.tables + WHERE table_schema = $1;` + rows, err := db.Query(queryTables, schema) + if err != nil { + t.Fatalf("Failed to query tables in schema %s: %v", schema, err) + } + defer rows.Close() + + actualTables := make(map[string]bool) + for rows.Next() { + var tableName string + if err := rows.Scan(&tableName); err != nil { + t.Fatalf("Failed to scan table name: %v", err) + } + actualTables[tableName] = true + } + + // Compare tables + for expectedTable := range expectedTables { + if !actualTables[expectedTable] { + t.Errorf("Missing expected table: %s", expectedTable) + } + } + + // Check for extra tables + for actualTable := range actualTables { + if _, found := expectedTables[actualTable]; !found { + t.Errorf("Unexpected table found: %s", actualTable) + } + } +} + +// === assertion helper functions +func AssertEqualStringSlices(t *testing.T, expected, actual []string) { + t.Helper() + if len(expected) != len(actual) { + t.Errorf("Mismatch in slice length. Expected: %v, Actual: %v", expected, actual) + } + + sort.Strings(expected) + sort.Strings(actual) + assert.Equal(t, expected, actual) +} + +func AssertEqualSourceNameSlices(t *testing.T, expected, actual []*sqlname.SourceName) { + SortSourceNames(expected) + SortSourceNames(actual) + assert.Equal(t, expected, actual) +} + +func SortSourceNames(tables []*sqlname.SourceName) { + sort.Slice(tables, func(i, j int) bool { + return tables[i].Qualified.MinQuoted < tables[j].Qualified.MinQuoted + }) +} + +func AssertEqualNameTuplesSlice(t *testing.T, expected, actual []sqlname.NameTuple) { + sortNameTuples(expected) + sortNameTuples(actual) + assert.Equal(t, expected, actual) +} + +func sortNameTuples(tables []sqlname.NameTuple) { + sort.Slice(tables, func(i, j int) bool { + return tables[i].ForOutput() < tables[j].ForOutput() + }) +} + +// waitForDBConnection waits until the database is ready for connections. +func WaitForDBToBeReady(db *sql.DB) error { + for i := 0; i < 12; i++ { + if err := db.Ping(); err == nil { + return nil + } + time.Sleep(5 * time.Second) + } + return fmt.Errorf("database did not become ready in time") +} + +func FatalIfError(t *testing.T, err error) { + if err != nil { + t.Fatalf("error: %v", err) + } +}
    ObjectTotal CountValid CountInvalid CountObject NamesDetails
    ObjectTotal ObjectsObjects Without IssuesObjects With IssuesObject NamesDetails