Browse Source

Use attempt_to_set_autocommit everywhere. (#16615)

To avoid asserting the type of the database connection.
tags/v1.97.0rc1
Patrick Cloke 6 months ago
committed by GitHub
parent
commit
2c6a7dfcbf
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 24 additions and 22 deletions
  1. +1
    -0
      changelog.d/16615.misc
  2. +12
    -6
      synapse/storage/background_updates.py
  3. +4
    -4
      synapse/storage/databases/main/search.py
  4. +2
    -2
      synapse/storage/databases/state/bg_updates.py
  5. +5
    -10
      tests/server.py

+ 1
- 0
changelog.d/16615.misc View File

@@ -0,0 +1 @@
Use more generic database methods.

+ 12
- 6
synapse/storage/background_updates.py View File

@@ -49,7 +49,11 @@ else:


if TYPE_CHECKING: if TYPE_CHECKING:
from synapse.server import HomeServer from synapse.server import HomeServer
from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
)


logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)


@@ -746,10 +750,10 @@ class BackgroundUpdater:
The named index will be dropped upon completion of the new index. The named index will be dropped upon completion of the new index.
""" """


def create_index_psql(conn: Connection) -> None:
def create_index_psql(conn: "LoggingDatabaseConnection") -> None:
conn.rollback() conn.rollback()
# postgres insists on autocommit for the index # postgres insists on autocommit for the index
conn.set_session(autocommit=True) # type: ignore
conn.engine.attempt_to_set_autocommit(conn.conn, True)


try: try:
c = conn.cursor() c = conn.cursor()
@@ -793,9 +797,9 @@ class BackgroundUpdater:
undo_timeout_sql = f"SET statement_timeout = {default_timeout}" undo_timeout_sql = f"SET statement_timeout = {default_timeout}"
conn.cursor().execute(undo_timeout_sql) conn.cursor().execute(undo_timeout_sql)


conn.set_session(autocommit=False) # type: ignore
conn.engine.attempt_to_set_autocommit(conn.conn, False)


def create_index_sqlite(conn: Connection) -> None:
def create_index_sqlite(conn: "LoggingDatabaseConnection") -> None:
# Sqlite doesn't support concurrent creation of indexes. # Sqlite doesn't support concurrent creation of indexes.
# #
# We assume that sqlite doesn't give us invalid indices; however # We assume that sqlite doesn't give us invalid indices; however
@@ -825,7 +829,9 @@ class BackgroundUpdater:
c.execute(sql) c.execute(sql)


if isinstance(self.db_pool.engine, engines.PostgresEngine): if isinstance(self.db_pool.engine, engines.PostgresEngine):
runner: Optional[Callable[[Connection], None]] = create_index_psql
runner: Optional[
Callable[[LoggingDatabaseConnection], None]
] = create_index_psql
elif psql_only: elif psql_only:
runner = None runner = None
else: else:


+ 4
- 4
synapse/storage/databases/main/search.py View File

@@ -275,7 +275,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):


# we have to set autocommit, because postgres refuses to # we have to set autocommit, because postgres refuses to
# CREATE INDEX CONCURRENTLY without it. # CREATE INDEX CONCURRENTLY without it.
conn.set_session(autocommit=True)
conn.engine.attempt_to_set_autocommit(conn.conn, True)


try: try:
c = conn.cursor() c = conn.cursor()
@@ -301,7 +301,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
# we should now be able to delete the GIST index. # we should now be able to delete the GIST index.
c.execute("DROP INDEX IF EXISTS event_search_fts_idx_gist") c.execute("DROP INDEX IF EXISTS event_search_fts_idx_gist")
finally: finally:
conn.set_session(autocommit=False)
conn.engine.attempt_to_set_autocommit(conn.conn, False)


if isinstance(self.database_engine, PostgresEngine): if isinstance(self.database_engine, PostgresEngine):
await self.db_pool.runWithConnection(create_index) await self.db_pool.runWithConnection(create_index)
@@ -323,7 +323,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):


def create_index(conn: LoggingDatabaseConnection) -> None: def create_index(conn: LoggingDatabaseConnection) -> None:
conn.rollback() conn.rollback()
conn.set_session(autocommit=True)
conn.engine.attempt_to_set_autocommit(conn.conn, True)
c = conn.cursor() c = conn.cursor()


# We create with NULLS FIRST so that when we search *backwards* # We create with NULLS FIRST so that when we search *backwards*
@@ -340,7 +340,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
ON event_search(origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST) ON event_search(origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST)
""" """
) )
conn.set_session(autocommit=False)
conn.engine.attempt_to_set_autocommit(conn.conn, False)


await self.db_pool.runWithConnection(create_index) await self.db_pool.runWithConnection(create_index)




+ 2
- 2
synapse/storage/databases/state/bg_updates.py View File

@@ -492,7 +492,7 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
conn.rollback() conn.rollback()
if isinstance(self.database_engine, PostgresEngine): if isinstance(self.database_engine, PostgresEngine):
# postgres insists on autocommit for the index # postgres insists on autocommit for the index
conn.set_session(autocommit=True)
conn.engine.attempt_to_set_autocommit(conn.conn, True)
try: try:
txn = conn.cursor() txn = conn.cursor()
txn.execute( txn.execute(
@@ -501,7 +501,7 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
) )
txn.execute("DROP INDEX IF EXISTS state_groups_state_id") txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
finally: finally:
conn.set_session(autocommit=False)
conn.engine.attempt_to_set_autocommit(conn.conn, False)
else: else:
txn = conn.cursor() txn = conn.cursor()
txn.execute( txn.execute(


+ 5
- 10
tests/server.py View File

@@ -88,7 +88,7 @@ from synapse.module_api.callbacks.third_party_event_rules_callbacks import (
from synapse.server import HomeServer from synapse.server import HomeServer
from synapse.storage import DataStore from synapse.storage import DataStore
from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.database import LoggingDatabaseConnection
from synapse.storage.engines import PostgresEngine, create_engine
from synapse.storage.engines import create_engine
from synapse.storage.prepare_database import prepare_database from synapse.storage.prepare_database import prepare_database
from synapse.types import ISynapseReactor, JsonDict from synapse.types import ISynapseReactor, JsonDict
from synapse.util import Clock from synapse.util import Clock
@@ -1029,9 +1029,7 @@ def setup_test_homeserver(


# Create the database before we actually try and connect to it, based off # Create the database before we actually try and connect to it, based off
# the template database we generate in setupdb() # the template database we generate in setupdb()
if isinstance(db_engine, PostgresEngine):
import psycopg2.extensions

if USE_POSTGRES_FOR_TESTS:
db_conn = db_engine.module.connect( db_conn = db_engine.module.connect(
dbname=POSTGRES_BASE_DB, dbname=POSTGRES_BASE_DB,
user=POSTGRES_USER, user=POSTGRES_USER,
@@ -1039,8 +1037,7 @@ def setup_test_homeserver(
port=POSTGRES_PORT, port=POSTGRES_PORT,
password=POSTGRES_PASSWORD, password=POSTGRES_PASSWORD,
) )
assert isinstance(db_conn, psycopg2.extensions.connection)
db_conn.autocommit = True
db_engine.attempt_to_set_autocommit(db_conn, True)
cur = db_conn.cursor() cur = db_conn.cursor()
cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,))
cur.execute( cur.execute(
@@ -1065,13 +1062,12 @@ def setup_test_homeserver(


hs.setup() hs.setup()


if isinstance(db_engine, PostgresEngine):
if USE_POSTGRES_FOR_TESTS:
database_pool = hs.get_datastores().databases[0] database_pool = hs.get_datastores().databases[0]


# We need to do cleanup on PostgreSQL # We need to do cleanup on PostgreSQL
def cleanup() -> None: def cleanup() -> None:
import psycopg2 import psycopg2
import psycopg2.extensions


# Close all the db pools # Close all the db pools
database_pool._db_pool.close() database_pool._db_pool.close()
@@ -1086,8 +1082,7 @@ def setup_test_homeserver(
port=POSTGRES_PORT, port=POSTGRES_PORT,
password=POSTGRES_PASSWORD, password=POSTGRES_PASSWORD,
) )
assert isinstance(db_conn, psycopg2.extensions.connection)
db_conn.autocommit = True
db_engine.attempt_to_set_autocommit(db_conn, True)
cur = db_conn.cursor() cur = db_conn.cursor()


# Try a few times to drop the DB. Some things may hold on to the # Try a few times to drop the DB. Some things may hold on to the


Loading…
Cancel
Save