From dea3c21ae263c4f1d7340b5d2cc78fc190faa98d Mon Sep 17 00:00:00 2001 From: Jeroen Brouwer Date: Mon, 3 Sep 2018 12:16:21 +0200 Subject: [PATCH 01/13] added get_sequences --- .../postgresql_backend/introspection.py | 30 ++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/tenant_schemas/postgresql_backend/introspection.py b/tenant_schemas/postgresql_backend/introspection.py index 7ff2cca5..a45174e5 100644 --- a/tenant_schemas/postgresql_backend/introspection.py +++ b/tenant_schemas/postgresql_backend/introspection.py @@ -5,6 +5,7 @@ from django.db.backends.base.introspection import ( BaseDatabaseIntrospection, FieldInfo, TableInfo, ) + try: # Django >= 1.11 from django.db.models.indexes import Index @@ -79,7 +80,8 @@ class DatabaseSchemaIntrospection(BaseDatabaseIntrospection): ON ccu.constraint_catalog = kcu.constraint_catalog AND ccu.constraint_schema = kcu.constraint_schema AND ccu.constraint_name = kcu.constraint_name - LEFT JOIN information_schema.table_constraints tc + LEFT JOIN information_sche + ma.table_constraints tc ON ccu.constraint_catalog = tc.constraint_catalog AND ccu.constraint_schema = tc.constraint_schema AND ccu.constraint_name = tc.constraint_name @@ -174,6 +176,21 @@ class DatabaseSchemaIntrospection(BaseDatabaseIntrospection): GROUP BY indexname, indisunique, indisprimary, amname, exprdef, attoptions; """ + _get_sequences_query = """ + SELECT s.relname as sequence_name, col.attname + FROM pg_class s + JOIN pg_namespace sn ON sn.oid = s.relnamespace + JOIN pg_depend d ON d.refobjid = s.oid AND d.refclassid='pg_class'::regclass + JOIN pg_attrdef ad ON ad.oid = d.objid AND d.classid = 'pg_attrdef'::regclass + JOIN pg_attribute col ON col.attrelid = ad.adrelid AND col.attnum = ad.adnum + JOIN pg_class tbl ON tbl.oid = ad.adrelid + JOIN pg_namespace n ON n.oid = tbl.relnamespace + WHERE s.relkind = 'S' + AND d.deptype in ('a', 'n') + AND n.nspname = %(schema)s + AND tbl.relname = %(table)s + """ + def get_field_type(self, data_type, description): field_type = super(DatabaseSchemaIntrospection, self).get_field_type(data_type, description) if description.default and 'nextval' in description.default: @@ -315,3 +332,14 @@ def get_constraints(self, cursor, table_name): "options": options, } return constraints + + def get_sequences(self, cursor, table_name, table_fields=()): + sequences = [] + cursor.execute(self._get_sequences_query, { + 'schema': self.connection.schema_name, + 'table': table_name, + }) + + for row in cursor.fetchall(): + sequences.append({'name': row[0], 'table': table_name, 'column': row[1]}) + return sequences From d82839cf5e40a6b2952802488a953f71c99f6530 Mon Sep 17 00:00:00 2001 From: jeroenbrouwer Date: Mon, 3 Sep 2018 12:26:17 +0200 Subject: [PATCH 02/13] Update introspection.py --- tenant_schemas/postgresql_backend/introspection.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tenant_schemas/postgresql_backend/introspection.py b/tenant_schemas/postgresql_backend/introspection.py index a45174e5..3316b8e3 100644 --- a/tenant_schemas/postgresql_backend/introspection.py +++ b/tenant_schemas/postgresql_backend/introspection.py @@ -80,8 +80,7 @@ class DatabaseSchemaIntrospection(BaseDatabaseIntrospection): ON ccu.constraint_catalog = kcu.constraint_catalog AND ccu.constraint_schema = kcu.constraint_schema AND ccu.constraint_name = kcu.constraint_name - LEFT JOIN information_sche - ma.table_constraints tc + LEFT JOIN information_schema.table_constraints tc ON ccu.constraint_catalog = tc.constraint_catalog AND ccu.constraint_schema = tc.constraint_schema AND ccu.constraint_name = tc.constraint_name From 18f187c147619dc6ca3c19179cf4a7c64b2dfe30 Mon Sep 17 00:00:00 2001 From: Jeroen Brouwer Date: Fri, 30 Aug 2019 11:07:00 +0200 Subject: [PATCH 03/13] Update setup.py to work with psycopg2-binary --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 6b8613af..b01b112e 100755 --- a/setup.py +++ b/setup.py @@ -43,7 +43,7 @@ ], install_requires=[ 'Django >= 1.8.0', - 'psycopg2', + 'psycopg2-binary', ], zip_safe=False, ) From 5e139692190153ed3ecfe1d307a139c5ee64a35d Mon Sep 17 00:00:00 2001 From: Jeroen Brouwer Date: Sat, 26 Sep 2020 13:55:19 +0200 Subject: [PATCH 04/13] bug fix for migrate_schemas.py for django >= 3.1 --- tenant_schemas/management/commands/migrate_schemas.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tenant_schemas/management/commands/migrate_schemas.py b/tenant_schemas/management/commands/migrate_schemas.py index 3dc9d87b..93ac11c1 100644 --- a/tenant_schemas/management/commands/migrate_schemas.py +++ b/tenant_schemas/management/commands/migrate_schemas.py @@ -1,5 +1,6 @@ from django.core.management.commands.migrate import Command as MigrateCommand from django.db.migrations.exceptions import MigrationSchemaMissing +import django from tenant_schemas.management.commands import SyncCommon from tenant_schemas.migration_executors import get_executor from tenant_schemas.utils import ( @@ -10,6 +11,9 @@ class Command(SyncCommon): + if django.VERSION >= (3, 1): + # https://github.com/bernardopires/django-tenant-schemas/issues/648#issuecomment-671115840 + requires_system_checks = [] help = ( "Updates database schema. Manages both apps with migrations and those without." ) @@ -41,7 +45,7 @@ def handle(self, *args, **options): else: tenants = ( get_tenant_model() - .objects.exclude(schema_name=get_public_schema_name()) - .values_list("schema_name", flat=True) + .objects.exclude(schema_name=get_public_schema_name()) + .values_list("schema_name", flat=True) ) executor.run_migrations(tenants=tenants) From 7ae346ee3f30d696ac87cdf21284afac37de3945 Mon Sep 17 00:00:00 2001 From: Jeroen Brouwer Date: Fri, 30 Oct 2020 07:25:12 +0100 Subject: [PATCH 05/13] cloning --- tenant_schemas/clone.py | 837 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 837 insertions(+) create mode 100644 tenant_schemas/clone.py diff --git a/tenant_schemas/clone.py b/tenant_schemas/clone.py new file mode 100644 index 00000000..2b6a75ec --- /dev/null +++ b/tenant_schemas/clone.py @@ -0,0 +1,837 @@ +from django.conf import settings +from django.core.exceptions import ValidationError +from django.db import connection, transaction +from django.db.utils import ProgrammingError + +from tenant_schemas.utils import schema_exists +from tenant_schemas.postgresql_backend.base import _check_schema_name + +# NOTE +# SQL is coming from https://github.com/denishpatel/pg-clone-schema/ rev ea3fbd7 + + +CLONE_SCHEMA_FUNCTION = """ +-- Function: clone_schema(text, text, boolean, boolean) + +-- DROP FUNCTION clone_schema(text, text, boolean, boolean); + +CREATE OR REPLACE FUNCTION public.clone_schema( + source_schema text, + dest_schema text, + include_recs boolean, + ddl_only boolean) + RETURNS void AS +$BODY$ + +-- This function will clone all sequences, tables, data, views & functions from any existing schema to a new one +-- SAMPLE CALL: +-- SELECT clone_schema('public', 'new_schema', True, False); + +DECLARE + src_oid oid; + tbl_oid oid; + func_oid oid; + object text; + buffer text; + buffer2 text; + buffer3 text; + srctbl text; + default_ text; + column_ text; + qry text; + ix_old_name text; + ix_new_name text; + aname text; + relpersist text; + relispart text; + relknd text; + adef text; + dest_qry text; + v_def text; + part_range text; + src_path_old text; + aclstr text; + grantor text; + grantee text; + privs text; + seqval bigint; + sq_last_value bigint; + sq_max_value bigint; + sq_start_value bigint; + sq_increment_by bigint; + sq_min_value bigint; + sq_cache_value bigint; + sq_is_called boolean; + sq_is_cycled boolean; + sq_data_type text; + sq_cycled char(10); + arec RECORD; + cnt integer; + cnt2 integer; + pos integer; + action text := 'N/A'; + v_ret text; + v_diag1 text; + v_diag2 text; + v_diag3 text; + v_diag4 text; + v_diag5 text; + v_diag6 text; + +BEGIN + + -- Make sure NOTICE are shown + set client_min_messages = 'notice'; + + -- Check that source_schema exists + SELECT oid INTO src_oid + FROM pg_namespace + WHERE nspname = quote_ident(source_schema); + IF NOT FOUND + THEN + RAISE NOTICE 'source schema % does not exist!', source_schema; + RETURN ; + END IF; + + -- Check that dest_schema does not yet exist + PERFORM nspname + FROM pg_namespace + WHERE nspname = quote_ident(dest_schema); + IF FOUND + THEN + RAISE NOTICE 'dest schema % already exists!', dest_schema; + RETURN ; + END IF; + IF ddl_only and include_recs THEN + RAISE WARNING 'You cannot specify to clone data and generate ddl at the same time.'; + RETURN ; + END IF; + + -- Set the search_path to source schema. Before exiting set it back to what it was before. + SELECT setting INTO src_path_old FROM pg_settings WHERE name='search_path'; + EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; + -- RAISE NOTICE 'Using source search_path=%', buffer; + + -- Validate required types exist. If not, create them. + select a.objtypecnt, b.permtypecnt INTO cnt, cnt2 FROM + (SELECT count(*) as objtypecnt FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace + WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) + AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid) + AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema' AND pg_catalog.pg_type_is_visible(t.oid) AND pg_catalog.format_type(t.oid, NULL) = 'obj_type') a, + (SELECT count(*) as permtypecnt FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace + WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) + AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid) + AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema' AND pg_catalog.pg_type_is_visible(t.oid) AND pg_catalog.format_type(t.oid, NULL) = 'perm_type') b; + IF cnt = 0 THEN + CREATE TYPE obj_type AS ENUM ('TABLE','VIEW','COLUMN','SEQUENCE','FUNCTION','SCHEMA','DATABASE'); + END IF; + IF cnt2 = 0 THEN + CREATE TYPE perm_type AS ENUM ('SELECT','INSERT','UPDATE','DELETE','TRUNCATE','REFERENCES','TRIGGER','USAGE','CREATE','EXECUTE','CONNECT','TEMPORARY'); + END IF; + + IF ddl_only THEN + RAISE NOTICE 'Only generating DDL, not actually creating anything...'; + END IF; + + IF ddl_only THEN + RAISE NOTICE '%', 'CREATE SCHEMA ' || quote_ident(dest_schema); + ELSE + EXECUTE 'CREATE SCHEMA ' || quote_ident(dest_schema) ; + END IF; + + -- MV: Create Collations + action := 'Collations'; + cnt := 0; + FOR arec IN + SELECT n.nspname as schemaname, a.rolname as ownername , c.collname, c.collprovider, c.collcollate as locale, + 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || CASE WHEN c.collprovider = 'i' THEN 'icu' WHEN c.collprovider = 'c' THEN 'libc' ELSE '' END || ', locale = ''' || c.collcollate || ''');' as COLL_DDL + FROM pg_collation c JOIN pg_namespace n ON (c.collnamespace = n.oid) JOIN pg_roles a ON (c.collowner = a.oid) WHERE n.nspname = quote_ident(source_schema) order by c.collname + LOOP + BEGIN + cnt := cnt + 1; + IF ddl_only THEN + RAISE INFO '%', arec.coll_ddl; + ELSE + EXECUTE arec.coll_ddl; + END IF; + END; + END LOOP; + RAISE NOTICE ' COLLATIONS cloned: %', LPAD(cnt::text, 5, ' '); + + -- MV: Create Domains + action := 'Domains'; + cnt := 0; + FOR arec IN + SELECT n.nspname as "Schema", t.typname as "Name", pg_catalog.format_type(t.typbasetype, t.typtypmod) as "Type", + (SELECT c.collname FROM pg_catalog.pg_collation c, pg_catalog.pg_type bt WHERE c.oid = t.typcollation AND + bt.oid = t.typbasetype AND t.typcollation <> bt.typcollation) as "Collation", + CASE WHEN t.typnotnull THEN 'not null' END as "Nullable", t.typdefault as "Default", + pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM pg_catalog.pg_constraint r WHERE t.oid = r.contypid), ' ') as "Check", + 'CREATE DOMAIN ' || quote_ident(dest_schema) || '.' || t.typname || ' AS ' || pg_catalog.format_type(t.typbasetype, t.typtypmod) || + CASE WHEN t.typnotnull IS NOT NULL THEN ' NOT NULL ' ELSE ' ' END || CASE WHEN t.typdefault IS NOT NULL THEN 'DEFAULT ' || t.typdefault || ' ' ELSE ' ' END || + pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM pg_catalog.pg_constraint r WHERE t.oid = r.contypid), ' ') || ';' AS DOM_DDL + FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace + WHERE t.typtype = 'd' AND n.nspname = quote_ident(source_schema) AND pg_catalog.pg_type_is_visible(t.oid) ORDER BY 1, 2 + LOOP + BEGIN + cnt := cnt + 1; + IF ddl_only THEN + RAISE INFO '%', arec.dom_ddl; + ELSE + EXECUTE arec.dom_ddl; + END IF; + END; + END LOOP; + RAISE NOTICE ' DOMAINS cloned: %', LPAD(cnt::text, 5, ' '); + + -- MV: Create types + action := 'Types'; + cnt := 0; + FOR arec IN + SELECT c.relkind, n.nspname AS schemaname, t.typname AS typname, t.typcategory, CASE WHEN t.typcategory='C' THEN + 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS (' || array_to_string(array_agg(a.attname || ' ' || pg_catalog.format_type(a.atttypid, a.atttypmod) ORDER BY c.relname, a.attnum),', ') || ');' + WHEN t.typcategory='E' THEN + 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS ENUM (' || REPLACE(quote_literal(array_to_string(array_agg(e.enumlabel ORDER BY e.enumsortorder),',')), ',', ''',''') || ');' + ELSE '' END AS type_ddl FROM pg_type t JOIN pg_namespace n ON (n.oid = t.typnamespace) + LEFT JOIN pg_enum e ON (t.oid = e.enumtypid) + LEFT JOIN pg_class c ON (c.reltype = t.oid) LEFT JOIN pg_attribute a ON (a.attrelid = c.oid) + WHERE n.nspname = quote_ident(source_schema) and (c.relkind IS NULL or c.relkind = 'c') and t.typcategory in ('C', 'E') group by 1,2,3,4 order by n.nspname, t.typcategory, t.typname + LOOP + BEGIN + cnt := cnt + 1; + -- Keep composite and enum types in separate branches for fine tuning later if needed. + IF arec.typcategory = 'E' THEN + -- RAISE NOTICE '%', arec.type_ddl; + IF ddl_only THEN + RAISE INFO '%', arec.type_ddl; + ELSE + EXECUTE arec.type_ddl; + END IF; + + ELSEIF arec.typcategory = 'C' THEN + -- RAISE NOTICE '%', arec.type_ddl; + IF ddl_only THEN + RAISE INFO '%', arec.type_ddl; + ELSE + EXECUTE arec.type_ddl; + END IF; + ELSE + RAISE NOTICE 'Unhandled type:%-%', arec.typcategory, arec.typname; + END IF; + END; + END LOOP; + RAISE NOTICE ' TYPES cloned: %', LPAD(cnt::text, 5, ' '); + + -- Create sequences + action := 'Sequences'; + cnt := 0; + -- TODO: Find a way to make this sequence's owner is the correct table. + FOR object IN + SELECT sequence_name::text + FROM information_schema.sequences + WHERE sequence_schema = quote_ident(source_schema) + LOOP + cnt := cnt + 1; + IF ddl_only THEN + RAISE INFO '%', 'CREATE SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ';'; + ELSE + EXECUTE 'CREATE SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object); + END IF; + srctbl := quote_ident(source_schema) || '.' || quote_ident(object); + + EXECUTE 'SELECT last_value, is_called + FROM ' || quote_ident(source_schema) || '.' || quote_ident(object) || ';' + INTO sq_last_value, sq_is_called; + + EXECUTE 'SELECT max_value, start_value, increment_by, min_value, cache_size, cycle, data_type + FROM pg_catalog.pg_sequences WHERE schemaname='|| quote_literal(source_schema) || ' AND sequencename=' || quote_literal(object) || ';' + INTO sq_max_value, sq_start_value, sq_increment_by, sq_min_value, sq_cache_value, sq_is_cycled, sq_data_type ; + + IF sq_is_cycled + THEN + sq_cycled := 'CYCLE'; + ELSE + sq_cycled := 'NO CYCLE'; + END IF; + + qry := 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) + || ' AS ' || sq_data_type + || ' INCREMENT BY ' || sq_increment_by + || ' MINVALUE ' || sq_min_value + || ' MAXVALUE ' || sq_max_value + || ' START WITH ' || sq_start_value + || ' RESTART ' || sq_min_value + || ' CACHE ' || sq_cache_value + || ' ' || sq_cycled || ' ;' ; + + IF ddl_only THEN + RAISE INFO '%', qry; + ELSE + EXECUTE qry; + END IF; + + buffer := quote_ident(dest_schema) || '.' || quote_ident(object); + IF include_recs THEN + EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; + ELSE + if ddl_only THEN + RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; + ELSE + EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; + END IF; + + END IF; + END LOOP; + RAISE NOTICE ' SEQUENCES cloned: %', LPAD(cnt::text, 5, ' '); + +-- Create tables including partitioned ones (parent/children) and unlogged ones. Order by is critical since child partition range logic is dependent on it. + action := 'Tables'; + cnt := 0; + FOR object, relpersist, relispart, relknd IN + select c.relname, c.relpersistence, c.relispartition, c.relkind + FROM pg_class c join pg_namespace n on (n.oid = c.relnamespace) + WHERE n.nspname = quote_ident(source_schema) and c.relkind in ('r','p') order by c.relkind desc, c.relname + LOOP + cnt := cnt + 1; + buffer := quote_ident(dest_schema) || '.' || quote_ident(object); + buffer2 := ''; + IF relpersist = 'u' THEN + buffer2 := 'UNLOGGED '; + END IF; + + IF relknd = 'r' THEN + IF ddl_only THEN + RAISE INFO '%', 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(object) || ' INCLUDING ALL)'; + ELSE + EXECUTE 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(object) || ' INCLUDING ALL)'; + END IF; + ELSIF relknd = 'p' THEN + -- define parent table and assume child tables have already been created based on top level sort order. + SELECT 'CREATE TABLE ' || quote_ident(dest_schema) || '.' || pc.relname || E'(\n' || string_agg(pa.attname || ' ' || pg_catalog.format_type(pa.atttypid, pa.atttypmod) || + coalesce(' DEFAULT ' || (SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid) FROM pg_catalog.pg_attrdef d + WHERE d.adrelid = pa.attrelid AND d.adnum = pa.attnum AND pa.atthasdef), '') || ' ' || CASE pa.attnotnull WHEN TRUE THEN 'NOT NULL' ELSE 'NULL' END, E',\n') || + coalesce((SELECT E',\n' || string_agg('CONSTRAINT ' || pc1.conname || ' ' || pg_get_constraintdef(pc1.oid), E',\n' ORDER BY pc1.conindid) + FROM pg_constraint pc1 WHERE pc1.conrelid = pa.attrelid), '') into buffer FROM pg_catalog.pg_attribute pa JOIN pg_catalog.pg_class pc ON pc.oid = pa.attrelid AND + pc.relname = quote_ident(object) JOIN pg_catalog.pg_namespace pn ON pn.oid = pc.relnamespace AND pn.nspname = quote_ident(source_schema) + WHERE pa.attnum > 0 AND NOT pa.attisdropped GROUP BY pn.nspname, pc.relname, pa.attrelid; + + -- append partition keyword to it + SELECT pg_catalog.pg_get_partkeydef(c.oid::pg_catalog.oid) into buffer2 FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE c.relname = quote_ident(object) COLLATE pg_catalog.default AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default; + + -- RAISE NOTICE ' buffer = % buffer2 = %',buffer, buffer2; + qry := buffer || ') PARTITION BY ' || buffer2 || ';'; + IF ddl_only THEN + RAISE INFO '%', qry; + ELSE + EXECUTE qry; + END IF; + + -- loop for child tables and alter them to attach to parent for specific partition method. + FOR aname, part_range, object IN + SELECT quote_ident(dest_schema) || '.' || c1.relname as tablename, pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) as partrange, quote_ident(dest_schema) || '.' || c2.relname as object + FROM pg_catalog.pg_class c1, pg_namespace n, pg_catalog.pg_inherits i, pg_class c2 WHERE n.nspname = 'sample' AND c1.relnamespace = n.oid AND c1.relkind = 'r' AND + c1.relispartition AND c1.oid=i.inhrelid AND i.inhparent = c2.oid AND c2.relnamespace = n.oid ORDER BY pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) = 'DEFAULT', c1.oid::pg_catalog.regclass::pg_catalog.text + LOOP + qry := 'ALTER TABLE ONLY ' || object || ' ATTACH PARTITION ' || aname || ' ' || part_range || ';'; + IF ddl_only THEN + RAISE INFO '%', qry; + ELSE + EXECUTE qry; + END IF; + + END LOOP; + END IF; + + -- INCLUDING ALL creates new index names, we restore them to the old name. + -- There should be no conflicts since they live in different schemas + FOR ix_old_name, ix_new_name IN + SELECT old.indexname, new.indexname + FROM pg_indexes old, pg_indexes new + WHERE old.schemaname = source_schema + AND new.schemaname = dest_schema + AND old.tablename = new.tablename + AND old.tablename = object + AND old.indexname <> new.indexname + AND regexp_replace(old.indexdef, E'.*USING','') = regexp_replace(new.indexdef, E'.*USING','') + ORDER BY old.indexname, new.indexname + LOOP + IF ddl_only THEN + RAISE INFO '%', 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; + ELSE + EXECUTE 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; + END IF; + END LOOP; + + IF include_recs + THEN + -- Insert records from source table + RAISE NOTICE 'Populating cloned table, %', buffer; + + -- 2020/06/18 - Issue #31 fix: add "OVERRIDING SYSTEM VALUE" for IDENTITY columns marked as GENERATED ALWAYS. + select count(*) into cnt from pg_class c, pg_attribute a, pg_namespace n + where a.attrelid = c.oid and c.relname = quote_ident(object) and n.oid = c.relnamespace and n.nspname = quote_ident(source_schema) and a.attidentity = 'a'; + buffer3 := ''; + IF cnt > 0 THEN + buffer3 := ' OVERRIDING SYSTEM VALUE'; + END IF; + + EXECUTE 'INSERT INTO ' || buffer || buffer3 || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(object) || ';'; + END IF; + + SET search_path = ''; + FOR column_, default_ IN + SELECT column_name::text, + REPLACE(column_default::text, source_schema, dest_schema) + FROM information_schema.COLUMNS + WHERE table_schema = source_schema + AND TABLE_NAME = object + AND column_default LIKE 'nextval(%' || quote_ident(source_schema) || '%::regclass)' + LOOP + IF ddl_only THEN + -- May need to come back and revisit this since previous sql will not return anything since no schema as created! + RAISE INFO '%', 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || column_ || ' SET DEFAULT ' || default_ || ';'; + ELSE + EXECUTE 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || column_ || ' SET DEFAULT ' || default_; + END IF; + END LOOP; + EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; + + END LOOP; + RAISE NOTICE ' TABLES cloned: %', LPAD(cnt::text, 5, ' '); + + -- add FK constraint + action := 'FK Constraints'; + cnt := 0; + SET search_path = ''; + FOR qry IN + SELECT 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || quote_ident(rn.relname) + || ' ADD CONSTRAINT ' || quote_ident(ct.conname) || ' ' || REPLACE(pg_get_constraintdef(ct.oid), 'REFERENCES ' ||quote_ident(source_schema), 'REFERENCES ' || quote_ident(dest_schema)) || ';' + FROM pg_constraint ct + JOIN pg_class rn ON rn.oid = ct.conrelid + WHERE connamespace = src_oid + AND rn.relkind = 'r' + AND ct.contype = 'f' + LOOP + cnt := cnt + 1; + IF ddl_only THEN + RAISE INFO '%', qry; + ELSE + EXECUTE qry; + END IF; + END LOOP; + EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; + RAISE NOTICE ' FKEYS cloned: %', LPAD(cnt::text, 5, ' '); + +-- Create views + action := 'Views'; + cnt := 0; + FOR object IN + SELECT table_name::text, + view_definition + FROM information_schema.views + WHERE table_schema = quote_ident(source_schema) + + LOOP + cnt := cnt + 1; + buffer := quote_ident(dest_schema) || '.' || quote_ident(object); + SELECT view_definition INTO v_def + FROM information_schema.views + WHERE table_schema = quote_ident(source_schema) + AND table_name = quote_ident(object); + + IF ddl_only THEN + RAISE INFO '%', 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def || ';' ; + ELSE + EXECUTE 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def || ';' ; + END IF; + END LOOP; + RAISE NOTICE ' VIEWS cloned: %', LPAD(cnt::text, 5, ' '); + + -- Create Materialized views + action := 'Mat. Views'; + cnt := 0; + -- RAISE INFO 'mat views start1'; + FOR object, v_def IN + SELECT matviewname::text, replace(definition,';','') FROM pg_catalog.pg_matviews WHERE schemaname = quote_ident(source_schema) + LOOP + cnt := cnt + 1; + buffer := dest_schema || '.' || quote_ident(object); + IF include_recs THEN + EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ' WITH DATA;' ; + ELSE + IF ddl_only THEN + RAISE INFO '%', 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ' WITH NO DATA;' ; + ELSE + EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ' WITH NO DATA;' ; + END IF; + END IF; + SELECT coalesce(obj_description(oid), '') into adef from pg_class where relkind = 'm' and relname = object; + IF adef <> '' THEN + IF ddl_only THEN + RAISE INFO '%', 'COMMENT ON MATERIALIZED VIEW ' || quote_ident(dest_schema) || '.' || object || ' IS ''' || adef || ''';'; + ELSE + EXECUTE 'COMMENT ON MATERIALIZED VIEW ' || quote_ident(dest_schema) || '.' || object || ' IS ''' || adef || ''';'; + END IF; + END IF; + + FOR aname, adef IN + SELECT indexname, replace(indexdef, quote_ident(source_schema), quote_ident(dest_schema)) as newdef FROM pg_indexes where schemaname = quote_ident(source_schema) and tablename = object order by indexname + LOOP + IF ddl_only THEN + RAISE INFO '%', adef || ';'; + ELSE + EXECUTE adef || ';'; + END IF; + END LOOP; + + END LOOP; + RAISE NOTICE ' MAT VIEWS cloned: %', LPAD(cnt::text, 5, ' '); + + +-- Create functions + action := 'Functions'; + cnt := 0; + SET search_path = ''; + FOR func_oid IN + SELECT oid + FROM pg_proc + WHERE pronamespace = src_oid + LOOP + cnt := cnt + 1; + SELECT pg_get_functiondef(func_oid) INTO qry; + SELECT replace(qry, source_schema, dest_schema) INTO dest_qry; + IF ddl_only THEN + RAISE INFO '%', dest_qry; + ELSE + EXECUTE dest_qry; + END IF; + + END LOOP; + EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; + RAISE NOTICE ' FUNCTIONS cloned: %', LPAD(cnt::text, 5, ' '); + + -- MV: Create Triggers + action := 'Triggers'; + cnt := 0; + FOR arec IN + SELECT trigger_schema, trigger_name, event_object_table, action_order, action_condition, action_statement, action_orientation, action_timing, array_to_string(array_agg(event_manipulation::text), ' OR '), + 'CREATE TRIGGER ' || trigger_name || ' ' || action_timing || ' ' || array_to_string(array_agg(event_manipulation::text), ' OR ') || ' ON ' || quote_ident(dest_schema) || '.' || event_object_table || + ' FOR EACH ' || action_orientation || ' ' || action_statement || ';' as TRIG_DDL + FROM information_schema.triggers where trigger_schema = quote_ident(source_schema) GROUP BY 1,2,3,4,5,6,7,8 + LOOP + BEGIN + cnt := cnt + 1; + IF ddl_only THEN + RAISE INFO '%', arec.trig_ddl; + ELSE + EXECUTE arec.trig_ddl; + END IF; + + END; + END LOOP; + RAISE NOTICE ' TRIGGERS cloned: %', LPAD(cnt::text, 5, ' '); + + -- --------------------- + -- MV: Permissions: Defaults + -- --------------------- + action := 'PRIVS: Defaults'; + cnt := 0; + FOR arec IN + SELECT pg_catalog.pg_get_userbyid(d.defaclrole) AS "owner", n.nspname AS schema, + CASE d.defaclobjtype WHEN 'r' THEN 'table' WHEN 'S' THEN 'sequence' WHEN 'f' THEN 'function' WHEN 'T' THEN 'type' WHEN 'n' THEN 'schema' END AS atype, + d.defaclacl as defaclacl, pg_catalog.array_to_string(d.defaclacl, ',') as defaclstr + FROM pg_catalog.pg_default_acl d LEFT JOIN pg_catalog.pg_namespace n ON (n.oid = d.defaclnamespace) WHERE n.nspname IS NOT NULL and n.nspname = quote_ident(source_schema) ORDER BY 3, 2, 1 + LOOP + BEGIN + -- RAISE NOTICE 'owner=% type=% defaclacl=% defaclstr=%', arec.owner, arec.atype, arec.defaclacl, arec.defaclstr; + + FOREACH aclstr IN ARRAY arec.defaclacl + LOOP + cnt := cnt + 1; + -- RAISE NOTICE 'aclstr=%', aclstr; + -- break up into grantor, grantee, and privs, mydb_update=rwU/mydb_owner + SELECT split_part(aclstr, '=',1) INTO grantee; + SELECT split_part(aclstr, '=',2) INTO grantor; + SELECT split_part(grantor, '/',1) INTO privs; + SELECT split_part(grantor, '/',2) INTO grantor; + -- RAISE NOTICE 'grantor=% grantee=% privs=%', grantor, grantee, privs; + + IF arec.atype = 'function' THEN + -- Just having execute is enough to grant all apparently. + buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON FUNCTIONS TO "' || grantee || '";'; + IF ddl_only THEN + RAISE INFO '%', buffer; + ELSE + EXECUTE buffer; + END IF; + + ELSIF arec.atype = 'sequence' THEN + IF POSITION('r' IN privs) > 0 AND POSITION('w' IN privs) > 0 AND POSITION('U' IN privs) > 0 THEN + -- arU is enough for all privs + buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON SEQUENCES TO "' || grantee || '";'; + IF ddl_only THEN + RAISE INFO '%', buffer; + ELSE + EXECUTE buffer; + END IF; + + ELSE + -- have to specify each priv individually + buffer2 := ''; + IF POSITION('r' IN privs) > 0 THEN + buffer2 := 'SELECT'; + END IF; + IF POSITION('w' IN privs) > 0 THEN + IF buffer2 = '' THEN + buffer2 := 'UPDATE'; + ELSE + buffer2 := buffer2 || ', UPDATE'; + END IF; + END IF; + IF POSITION('U' IN privs) > 0 THEN + IF buffer2 = '' THEN + buffer2 := 'USAGE'; + ELSE + buffer2 := buffer2 || ', USAGE'; + END IF; + END IF; + buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON SEQUENCES TO "' || grantee || '";'; + IF ddl_only THEN + RAISE INFO '%', buffer; + ELSE + EXECUTE buffer; + END IF; + END IF; + + ELSIF arec.atype = 'table' THEN + -- do each priv individually, jeeeesh! + buffer2 := ''; + IF POSITION('a' IN privs) > 0 THEN + buffer2 := 'INSERT'; + END IF; + IF POSITION('r' IN privs) > 0 THEN + IF buffer2 = '' THEN + buffer2 := 'SELECT'; + ELSE + buffer2 := buffer2 || ', SELECT'; + END IF; + END IF; + IF POSITION('w' IN privs) > 0 THEN + IF buffer2 = '' THEN + buffer2 := 'UPDATE'; + ELSE + buffer2 := buffer2 || ', UPDATE'; + END IF; + END IF; + IF POSITION('d' IN privs) > 0 THEN + IF buffer2 = '' THEN + buffer2 := 'DELETE'; + ELSE + buffer2 := buffer2 || ', DELETE'; + END IF; + END IF; + IF POSITION('t' IN privs) > 0 THEN + IF buffer2 = '' THEN + buffer2 := 'TRIGGER'; + ELSE + buffer2 := buffer2 || ', TRIGGER'; + END IF; + END IF; + IF POSITION('T' IN privs) > 0 THEN + IF buffer2 = '' THEN + buffer2 := 'TRUNCATE'; + ELSE + buffer2 := buffer2 || ', TRUNCATE'; + END IF; + END IF; + buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON TABLES TO "' || grantee || '";'; + IF ddl_only THEN + RAISE INFO '%', buffer; + ELSE + EXECUTE buffer; + END IF; + + ELSIF arec.atype = 'type' THEN + IF POSITION('r' IN privs) > 0 AND POSITION('w' IN privs) > 0 AND POSITION('U' IN privs) > 0 THEN + -- arU is enough for all privs + buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON TYPES TO "' || grantee || '";'; + IF ddl_only THEN + RAISE INFO '%', buffer; + ELSE + EXECUTE buffer; + END IF; + ELSIF POSITION('U' IN privs) THEN + buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT USAGE ON TYPES TO "' || grantee || '";'; + IF ddl_only THEN + RAISE INFO '%', buffer; + ELSE + EXECUTE buffer; + END IF; + ELSE + RAISE WARNING 'Unhandled TYPE Privs:: type=% privs=% owner=% defaclacl=% defaclstr=% grantor=% grantee=% ', arec.atype, privs, arec.owner, arec.defaclacl, arec.defaclstr, grantor, grantee; + END IF; + + ELSE + RAISE WARNING 'Unhandled Privs:: type=% privs=% owner=% defaclacl=% defaclstr=% grantor=% grantee=% ', arec.atype, privs, arec.owner, arec.defaclacl, arec.defaclstr, grantor, grantee; + END IF; + END LOOP; + END; + END LOOP; + + RAISE NOTICE ' DFLT PRIVS cloned: %', LPAD(cnt::text, 5, ' '); + + -- MV: PRIVS: schema + -- crunchy data extension, check_access + -- SELECT role_path, base_role, as_role, objtype, schemaname, objname, array_to_string(array_agg(privname),',') as privs FROM all_access() + -- WHERE base_role != CURRENT_USER and objtype = 'schema' and schemaname = 'public' group by 1,2,3,4,5,6; + + action := 'PRIVS: Schema'; + cnt := 0; + FOR arec IN + SELECT 'GRANT ' || p.perm::perm_type || ' ON SCHEMA ' || quote_ident(dest_schema) || ' TO "' || r.rolname || '";' as schema_ddl + FROM pg_catalog.pg_namespace AS n CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES ('USAGE'), ('CREATE')) AS p(perm) + WHERE n.nspname = quote_ident(source_schema) AND NOT r.rolsuper AND has_schema_privilege(r.oid, n.oid, p.perm) order by r.rolname, p.perm::perm_type + LOOP + BEGIN + cnt := cnt + 1; + IF ddl_only THEN + RAISE INFO '%', arec.schema_ddl; + ELSE + EXECUTE arec.schema_ddl; + END IF; + + END; + END LOOP; + RAISE NOTICE 'SCHEMA PRIVS cloned: %', LPAD(cnt::text, 5, ' '); + + -- MV: PRIVS: sequences + action := 'PRIVS: Sequences'; + cnt := 0; + FOR arec IN + SELECT 'GRANT ' || p.perm::perm_type || ' ON ' || quote_ident(dest_schema) || '.' || t.relname::text || ' TO "' || r.rolname || '";' as seq_ddl + FROM pg_catalog.pg_class AS t CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES ('SELECT'), ('USAGE'), ('UPDATE')) AS p(perm) + WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind = 'S' AND NOT r.rolsuper AND has_sequence_privilege(r.oid, t.oid, p.perm) + LOOP + BEGIN + cnt := cnt + 1; + IF ddl_only THEN + RAISE INFO '%', arec.seq_ddl; + ELSE + EXECUTE arec.seq_ddl; + END IF; + + END; + END LOOP; + RAISE NOTICE ' SEQ. PRIVS cloned: %', LPAD(cnt::text, 5, ' '); + + -- MV: PRIVS: functions + action := 'PRIVS: Functions'; + cnt := 0; + EXECUTE 'SET search_path = ' || quote_ident(dest_schema) ; + FOR arec IN + SELECT 'GRANT EXECUTE ON FUNCTION ' || quote_ident(dest_schema) || '.' || replace(regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', ''), source_schema, dest_schema) || ' TO "' || r.rolname || '";' as func_ddl + FROM pg_catalog.pg_proc f CROSS JOIN pg_catalog.pg_roles AS r WHERE f.pronamespace::regnamespace::name = quote_ident(source_schema) AND NOT r.rolsuper AND has_function_privilege(r.oid, f.oid, 'EXECUTE') + order by regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', '') + LOOP + BEGIN + cnt := cnt + 1; + IF ddl_only THEN + RAISE INFO '%', arec.func_ddl; + ELSE + EXECUTE arec.func_ddl; + END IF; + + END; + END LOOP; + EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; + RAISE NOTICE ' FUNC PRIVS cloned: %', LPAD(cnt::text, 5, ' '); + + -- MV: PRIVS: tables + action := 'PRIVS: Tables'; + -- regular, partitioned, and foreign tables plus view and materialized view permissions. TODO: implement foreign table defs. + cnt := 0; + FOR arec IN + SELECT 'GRANT ' || p.perm::perm_type || CASE WHEN t.relkind in ('r', 'p', 'f') THEN ' ON TABLE ' WHEN t.relkind in ('v', 'm') THEN ' ON ' END || quote_ident(dest_schema) || '.' || t.relname::text || ' TO "' || r.rolname || '";' as tbl_ddl, + has_table_privilege(r.oid, t.oid, p.perm) AS granted, t.relkind + FROM pg_catalog.pg_class AS t CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES (TEXT 'SELECT'), ('INSERT'), ('UPDATE'), ('DELETE'), ('TRUNCATE'), ('REFERENCES'), ('TRIGGER')) AS p(perm) + WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind in ('r', 'p', 'f', 'v', 'm') AND NOT r.rolsuper AND has_table_privilege(r.oid, t.oid, p.perm) order by t.relname::text, t.relkind + LOOP + BEGIN + cnt := cnt + 1; + -- RAISE NOTICE 'ddl=%', arec.tbl_ddl; + IF arec.relkind = 'f' THEN + RAISE WARNING 'Foreign tables are not currently implemented, so skipping privs for them. ddl=%', arec.tbl_ddl; + ELSE + IF ddl_only THEN + RAISE INFO '%', arec.tbl_ddl; + ELSE + EXECUTE arec.tbl_ddl; + END IF; + + END IF; + END; + END LOOP; + RAISE NOTICE ' TABLE PRIVS cloned: %', LPAD(cnt::text, 5, ' '); + + -- Set the search_path back to what it was before + EXECUTE 'SET search_path = ' || src_path_old; + + EXCEPTION + WHEN others THEN + BEGIN + GET STACKED DIAGNOSTICS v_diag1 = MESSAGE_TEXT, v_diag2 = PG_EXCEPTION_DETAIL, v_diag3 = PG_EXCEPTION_HINT, v_diag4 = RETURNED_SQLSTATE, v_diag5 = PG_CONTEXT, v_diag6 = PG_EXCEPTION_CONTEXT; + -- v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1 || ' .' || v_diag2 || ' .' || v_diag3; + v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1; + RAISE EXCEPTION 'Action: % Diagnostics: %',action, v_ret; + -- Set the search_path back to what it was before + EXECUTE 'SET search_path = ' || src_path_old; + RETURN; + END; + +RETURN; +END; + +$BODY$ + LANGUAGE plpgsql VOLATILE COST 100; +-- ALTER FUNCTION public.clone_schema(text, text, boolean, boolean) OWNER TO {db_user}; +""" + + +def _create_clone_schema_function(): + """ + Creates a postgres function `clone_schema` that copies a schema and its + contents. Will replace any existing `clone_schema` functions owned by the + `postgres` superuser. + """ + cursor = connection.cursor() + + # TODO db_user isn't even actually used in latest sql version of the clone schema. We can probably remove format. + db_user = settings.DATABASES["default"].get("USER", None) or "postgres" + cursor.execute(CLONE_SCHEMA_FUNCTION.format(db_user=db_user)) + cursor.close() + + +def clone_schema(schema_name, clone_schema_name, set_connection=True): + """ + Creates a full clone of an existing schema. + """ + # check the clone_schema_name like we usually would + _check_schema_name(clone_schema_name) + if schema_exists(clone_schema_name): + raise ValidationError("Schema name already exists") + + # The schema is changed to public because the clone function should live there. + if set_connection: + connection.set_schema_to_public() + cursor = connection.cursor() + + # check if the clone_schema function already exists in the db + try: + cursor.execute("SELECT 'clone_schema'::regproc") + except ProgrammingError: + _create_clone_schema_function() + transaction.commit() + + cursor.execute(f'SELECT clone_schema({schema_name}, {clone_schema_name}, true, false)') + cursor.close() From 3b3916c141b2d0e5cc72f8e916673a5412cfcd72 Mon Sep 17 00:00:00 2001 From: Jeroen Brouwer Date: Wed, 4 Nov 2020 15:06:58 +0100 Subject: [PATCH 06/13] bug fix clone --- tenant_schemas/clone.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tenant_schemas/clone.py b/tenant_schemas/clone.py index 2b6a75ec..cb45d9a5 100644 --- a/tenant_schemas/clone.py +++ b/tenant_schemas/clone.py @@ -833,5 +833,12 @@ def clone_schema(schema_name, clone_schema_name, set_connection=True): _create_clone_schema_function() transaction.commit() - cursor.execute(f'SELECT clone_schema({schema_name}, {clone_schema_name}, true, false)') + sql = 'SELECT clone_schema(%(schema_name)s, %(clone_schema_name)s, true, false)' + cursor.execute( + sql, + { + 'schema_name': schema_name, + 'clone_schema_name': clone_schema_name + } + ) cursor.close() From 66807d153b38716c2169c614a3dcafeb9aba3d98 Mon Sep 17 00:00:00 2001 From: Jeroen Brouwer Date: Wed, 4 Nov 2020 15:14:49 +0100 Subject: [PATCH 07/13] rename schema, tenant --- tenant_schemas/rename.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 tenant_schemas/rename.py diff --git a/tenant_schemas/rename.py b/tenant_schemas/rename.py new file mode 100644 index 00000000..0602168f --- /dev/null +++ b/tenant_schemas/rename.py @@ -0,0 +1,26 @@ +from django.core.exceptions import ValidationError +from django.db import connection +from tenant_schemas.postgresql_backend.base import _is_valid_schema_name +from tenant_schemas.utils import schema_exists + + +def rename_schema(*, schema_name, new_schema_name): + """ + This renames a schema to a new name. It checks to see if it exists first + """ + cursor = connection.cursor() + + if schema_exists(new_schema_name): + raise ValidationError("New schema name already exists") + if not _is_valid_schema_name(new_schema_name): + raise ValidationError("Invalid string used for the schema name.") + + sql = 'ALTER SCHEMA {0} RENAME TO {1}'.format(schema_name, new_schema_name) + cursor.execute(sql) + cursor.close() + + +def rename_tenant(*, tenant, new_schema_name): + rename_schema(schema_name=tenant.schema_name, new_schema_name=new_schema_name) + tenant.schema_name = new_schema_name + tenant.save() From eba5fb24e81a516f516521728d7f14dae176da7b Mon Sep 17 00:00:00 2001 From: Jeroen Brouwer Date: Wed, 4 Nov 2020 15:18:41 +0100 Subject: [PATCH 08/13] removed tenant specific rename --- tenant_schemas/rename.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tenant_schemas/rename.py b/tenant_schemas/rename.py index 0602168f..7e83bc7f 100644 --- a/tenant_schemas/rename.py +++ b/tenant_schemas/rename.py @@ -18,9 +18,3 @@ def rename_schema(*, schema_name, new_schema_name): sql = 'ALTER SCHEMA {0} RENAME TO {1}'.format(schema_name, new_schema_name) cursor.execute(sql) cursor.close() - - -def rename_tenant(*, tenant, new_schema_name): - rename_schema(schema_name=tenant.schema_name, new_schema_name=new_schema_name) - tenant.schema_name = new_schema_name - tenant.save() From e69f748b6c55d4ddb78560fb26e549678b6fdfce Mon Sep 17 00:00:00 2001 From: Jeroen Brouwer Date: Thu, 1 Sep 2022 10:12:13 +0200 Subject: [PATCH 09/13] support for django4 --- tenant_schemas/postgresql_backend/introspection.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tenant_schemas/postgresql_backend/introspection.py b/tenant_schemas/postgresql_backend/introspection.py index 3316b8e3..b73ae2d1 100644 --- a/tenant_schemas/postgresql_backend/introspection.py +++ b/tenant_schemas/postgresql_backend/introspection.py @@ -11,7 +11,13 @@ from django.db.models.indexes import Index except ImportError: Index = None -from django.utils.encoding import force_text + +try: + from django.utils.encoding import force_str +except ImportError: + # Django < 4.0 + from django.utils.encoding import force_text as force_str + fields = FieldInfo._fields if 'default' not in fields: @@ -229,9 +235,9 @@ def get_table_description(self, cursor, table_name): return [ FieldInfo(*( - (force_text(line[0]),) + + (force_str(line[0]),) + line[1:6] + - (field_map[force_text(line[0])][0] == 'YES', field_map[force_text(line[0])][1]) + (field_map[force_str(line[0])][0] == 'YES', field_map[force_str(line[0])][1]) )) for line in cursor.description ] From 732b8a88a209d91fabfe53f492c54007f04e73ee Mon Sep 17 00:00:00 2001 From: Jeroen Brouwer Date: Thu, 1 Sep 2022 10:16:48 +0200 Subject: [PATCH 10/13] support for django4.1 --- tenant_schemas/signals.py | 6 +++++- tenant_schemas/tests/test_routes.py | 12 ++++++++---- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/tenant_schemas/signals.py b/tenant_schemas/signals.py index f0a75dec..9c4819ad 100644 --- a/tenant_schemas/signals.py +++ b/tenant_schemas/signals.py @@ -1,6 +1,10 @@ from django.dispatch import Signal -post_schema_sync = Signal(providing_args=['tenant']) +try: + # Django < 4.0 + post_schema_sync = Signal(providing_args=['tenant']) +except TypeError: + post_schema_sync = Signal() post_schema_sync.__doc__ = """ Sent after a tenant has been saved, its schema created and synced """ diff --git a/tenant_schemas/tests/test_routes.py b/tenant_schemas/tests/test_routes.py index cdca6063..6915c6b2 100644 --- a/tenant_schemas/tests/test_routes.py +++ b/tenant_schemas/tests/test_routes.py @@ -19,6 +19,10 @@ class MissingDefaultTenantMiddleware(DefaultTenantMiddleware): DEFAULT_SCHEMA_NAME = "missing" +def dummy_get_response(request): # pragma: no cover + return None + + @unittest.skipIf(six.PY2, "Unexpectedly failing only on Python 2.7") class RoutesTestCase(BaseTestCase): @classmethod @@ -40,8 +44,8 @@ def setUpClass(cls): def setUp(self): super(RoutesTestCase, self).setUp() self.factory = RequestFactory() - self.tm = TenantMiddleware() - self.dtm = DefaultTenantMiddleware() + self.tm = TenantMiddleware(dummy_get_response) + self.dtm = DefaultTenantMiddleware(dummy_get_response) self.tenant_domain = "tenant.test.com" self.tenant = Tenant(domain_url=self.tenant_domain, schema_name="test") @@ -84,7 +88,7 @@ def test_non_existent_tenant_to_default_schema_routing(self): def test_non_existent_tenant_custom_middleware(self): """Route unrecognised hostnames to the 'test' tenant.""" - dtm = TestDefaultTenantMiddleware() + dtm = TestDefaultTenantMiddleware(dummy_get_response) request = self.factory.get( self.url, HTTP_HOST=self.non_existent_tenant.domain_url ) @@ -94,7 +98,7 @@ def test_non_existent_tenant_custom_middleware(self): def test_non_existent_tenant_and_default_custom_middleware(self): """Route unrecognised hostnames to the 'missing' tenant.""" - dtm = MissingDefaultTenantMiddleware() + dtm = MissingDefaultTenantMiddleware(dummy_get_response) request = self.factory.get( self.url, HTTP_HOST=self.non_existent_tenant.domain_url ) From 51bad35607e956c7d93c64ae650a9537f3d724c6 Mon Sep 17 00:00:00 2001 From: Jeroen Brouwer Date: Fri, 20 Oct 2023 16:55:24 +0200 Subject: [PATCH 11/13] move towards psycopg --- setup.py | 2 +- tenant_schemas/postgresql_backend/base.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index cc69b6cc..bcf45d83 100755 --- a/setup.py +++ b/setup.py @@ -35,7 +35,7 @@ "Topic :: Database", "Topic :: Software Development :: Libraries", ], - install_requires=["Django>=1.11", "ordered-set", "psycopg2-binary", "six"], + install_requires=["Django>=1.11", "ordered-set", "psycopg-binary", "six"], setup_requires=["setuptools-scm"], use_scm_version=True, zip_safe=False, diff --git a/tenant_schemas/postgresql_backend/base.py b/tenant_schemas/postgresql_backend/base.py index 212d3eae..f3364596 100644 --- a/tenant_schemas/postgresql_backend/base.py +++ b/tenant_schemas/postgresql_backend/base.py @@ -1,6 +1,6 @@ import re import warnings -import psycopg2 +import psycopg from django.conf import settings from django.contrib.contenttypes.models import ContentType @@ -131,7 +131,6 @@ def _cursor(self, name=None): "to call set_schema() or set_tenant()?") _check_schema_name(self.schema_name) public_schema_name = get_public_schema_name() - search_paths = [] if self.schema_name == public_schema_name: search_paths = [public_schema_name] @@ -155,7 +154,7 @@ def _cursor(self, name=None): # we do not have to worry that it's not the good one try: cursor_for_search_path.execute('SET search_path = {0}'.format(','.join(search_paths))) - except (django.db.utils.DatabaseError, psycopg2.InternalError): + except (django.db.utils.DatabaseError, psycopg.InternalError): self.search_path_set = False else: self.search_path_set = True From c322e65e2b5a1d473a83a0f1496087e75d8dba6b Mon Sep 17 00:00:00 2001 From: Jeroen Brouwer Date: Fri, 20 Oct 2023 17:06:50 +0200 Subject: [PATCH 12/13] try to fix recursionerror --- tenant_schemas/postgresql_backend/base.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tenant_schemas/postgresql_backend/base.py b/tenant_schemas/postgresql_backend/base.py index f3364596..6c61f793 100644 --- a/tenant_schemas/postgresql_backend/base.py +++ b/tenant_schemas/postgresql_backend/base.py @@ -153,11 +153,10 @@ def _cursor(self, name=None): # if the next instruction is not a rollback it will just fail also, so # we do not have to worry that it's not the good one try: + self.search_path_set = True cursor_for_search_path.execute('SET search_path = {0}'.format(','.join(search_paths))) except (django.db.utils.DatabaseError, psycopg.InternalError): self.search_path_set = False - else: - self.search_path_set = True if name: cursor_for_search_path.close() From 61dc693006fca8c1014e7be6480cc23eb4f3f054 Mon Sep 17 00:00:00 2001 From: Jeroen Brouwer Date: Mon, 23 Oct 2023 07:06:54 +0200 Subject: [PATCH 13/13] fix for psycopg3 --- tenant_schemas/postgresql_backend/base.py | 25 +++++++++++++++-------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/tenant_schemas/postgresql_backend/base.py b/tenant_schemas/postgresql_backend/base.py index 6c61f793..636e5716 100644 --- a/tenant_schemas/postgresql_backend/base.py +++ b/tenant_schemas/postgresql_backend/base.py @@ -1,6 +1,5 @@ import re import warnings -import psycopg from django.conf import settings from django.contrib.contenttypes.models import ContentType @@ -10,6 +9,15 @@ from tenant_schemas.utils import get_public_schema_name, get_limit_set_calls from tenant_schemas.postgresql_backend.introspection import DatabaseSchemaIntrospection +try: + from django.db.backends.postgresql.psycopg_any import is_psycopg3 +except ImportError: + is_psycopg3 = False + +if is_psycopg3: + import psycopg +else: + import psycopg2 as psycopg ORIGINAL_BACKEND = getattr(settings, 'ORIGINAL_BACKEND', 'django.db.backends.postgresql_psycopg2') # Django 1.9+ takes care to rename the default backend to 'django.db.backends.postgresql' @@ -141,24 +149,22 @@ def _cursor(self, name=None): search_paths.extend(EXTRA_SEARCH_PATHS) - if name: - # Named cursor can only be used once - cursor_for_search_path = self.connection.cursor() - else: - # Reuse - cursor_for_search_path = cursor + # Named cursor can only be used once, just like psycopg3 cursors. + needs_new_cursor = name or is_psycopg3 + cursor_for_search_path = self.connection.cursor() if needs_new_cursor else cursor # In the event that an error already happened in this transaction and we are going # to rollback we should just ignore database error when setting the search_path # if the next instruction is not a rollback it will just fail also, so # we do not have to worry that it's not the good one try: - self.search_path_set = True cursor_for_search_path.execute('SET search_path = {0}'.format(','.join(search_paths))) except (django.db.utils.DatabaseError, psycopg.InternalError): self.search_path_set = False + else: + self.search_path_set = True - if name: + if needs_new_cursor: cursor_for_search_path.close() return cursor @@ -169,5 +175,6 @@ class FakeTenant: We can't import any db model in a backend (apparently?), so this class is used for wrapping schema names in a tenant-like structure. """ + def __init__(self, schema_name): self.schema_name = schema_name