* Add tests for StreamIdGenerator
* Drive-by: annotate all defs
* Revert "Revert "Remove slaved id tracker (#14376)" (#14463)"
This reverts commit d63814fd736fed5d3d45ff3af5e6d3bfae50c439, which in
turn reverted 36097e88c4
. This restores
the latter.
* Fix StreamIdGenerator not handling unpersisted IDs
Spotted by @erikjohnston.
Closes #14456.
* Changelog
Co-authored-by: Nick Mills-Barrett <nick@fizzadar.com>
Co-authored-by: Erik Johnston <erik@matrix.org>
tags/v1.73.0rc1
@@ -0,0 +1 @@ | |||
Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). |
@@ -0,0 +1 @@ | |||
Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). |
@@ -117,6 +117,9 @@ disallow_untyped_defs = True | |||
[mypy-tests.state.test_profile] | |||
disallow_untyped_defs = True | |||
[mypy-tests.storage.test_id_generators] | |||
disallow_untyped_defs = True | |||
[mypy-tests.storage.test_profile] | |||
disallow_untyped_defs = True | |||
@@ -1,13 +0,0 @@ | |||
# Copyright 2016 OpenMarket Ltd | |||
# | |||
# Licensed under the Apache License, Version 2.0 (the "License"); | |||
# you may not use this file except in compliance with the License. | |||
# You may obtain a copy of the License at | |||
# | |||
# http://www.apache.org/licenses/LICENSE-2.0 | |||
# | |||
# Unless required by applicable law or agreed to in writing, software | |||
# distributed under the License is distributed on an "AS IS" BASIS, | |||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
# See the License for the specific language governing permissions and | |||
# limitations under the License. |
@@ -1,13 +0,0 @@ | |||
# Copyright 2016 OpenMarket Ltd | |||
# | |||
# Licensed under the Apache License, Version 2.0 (the "License"); | |||
# you may not use this file except in compliance with the License. | |||
# You may obtain a copy of the License at | |||
# | |||
# http://www.apache.org/licenses/LICENSE-2.0 | |||
# | |||
# Unless required by applicable law or agreed to in writing, software | |||
# distributed under the License is distributed on an "AS IS" BASIS, | |||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
# See the License for the specific language governing permissions and | |||
# limitations under the License. |
@@ -1,50 +0,0 @@ | |||
# Copyright 2016 OpenMarket Ltd | |||
# | |||
# Licensed under the Apache License, Version 2.0 (the "License"); | |||
# you may not use this file except in compliance with the License. | |||
# You may obtain a copy of the License at | |||
# | |||
# http://www.apache.org/licenses/LICENSE-2.0 | |||
# | |||
# Unless required by applicable law or agreed to in writing, software | |||
# distributed under the License is distributed on an "AS IS" BASIS, | |||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
# See the License for the specific language governing permissions and | |||
# limitations under the License. | |||
from typing import List, Optional, Tuple | |||
from synapse.storage.database import LoggingDatabaseConnection | |||
from synapse.storage.util.id_generators import AbstractStreamIdTracker, _load_current_id | |||
class SlavedIdTracker(AbstractStreamIdTracker): | |||
"""Tracks the "current" stream ID of a stream with a single writer. | |||
See `AbstractStreamIdTracker` for more details. | |||
Note that this class does not work correctly when there are multiple | |||
writers. | |||
""" | |||
def __init__( | |||
self, | |||
db_conn: LoggingDatabaseConnection, | |||
table: str, | |||
column: str, | |||
extra_tables: Optional[List[Tuple[str, str]]] = None, | |||
step: int = 1, | |||
): | |||
self.step = step | |||
self._current = _load_current_id(db_conn, table, column, step) | |||
if extra_tables: | |||
for table, column in extra_tables: | |||
self.advance(None, _load_current_id(db_conn, table, column)) | |||
def advance(self, instance_name: Optional[str], new_id: int) -> None: | |||
self._current = (max if self.step > 0 else min)(self._current, new_id) | |||
def get_current_token(self) -> int: | |||
return self._current | |||
def get_current_token_for_writer(self, instance_name: str) -> int: | |||
return self.get_current_token() |
@@ -27,7 +27,6 @@ from typing import ( | |||
) | |||
from synapse.api.constants import AccountDataTypes | |||
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker | |||
from synapse.replication.tcp.streams import AccountDataStream, TagAccountDataStream | |||
from synapse.storage._base import db_to_json | |||
from synapse.storage.database import ( | |||
@@ -68,12 +67,11 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) | |||
# to write account data. A value of `True` implies that `_account_data_id_gen` | |||
# is an `AbstractStreamIdGenerator` and not just a tracker. | |||
self._account_data_id_gen: AbstractStreamIdTracker | |||
self._can_write_to_account_data = ( | |||
self._instance_name in hs.config.worker.writers.account_data | |||
) | |||
if isinstance(database.engine, PostgresEngine): | |||
self._can_write_to_account_data = ( | |||
self._instance_name in hs.config.worker.writers.account_data | |||
) | |||
self._account_data_id_gen = MultiWriterIdGenerator( | |||
db_conn=db_conn, | |||
db=database, | |||
@@ -95,21 +93,13 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) | |||
# `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets | |||
# updated over replication. (Multiple writers are not supported for | |||
# SQLite). | |||
if self._instance_name in hs.config.worker.writers.account_data: | |||
self._can_write_to_account_data = True | |||
self._account_data_id_gen = StreamIdGenerator( | |||
db_conn, | |||
"room_account_data", | |||
"stream_id", | |||
extra_tables=[("room_tags_revisions", "stream_id")], | |||
) | |||
else: | |||
self._account_data_id_gen = SlavedIdTracker( | |||
db_conn, | |||
"room_account_data", | |||
"stream_id", | |||
extra_tables=[("room_tags_revisions", "stream_id")], | |||
) | |||
self._account_data_id_gen = StreamIdGenerator( | |||
db_conn, | |||
"room_account_data", | |||
"stream_id", | |||
extra_tables=[("room_tags_revisions", "stream_id")], | |||
is_writer=self._instance_name in hs.config.worker.writers.account_data, | |||
) | |||
account_max = self.get_max_account_data_stream_id() | |||
self._account_data_stream_cache = StreamChangeCache( | |||
@@ -38,7 +38,6 @@ from synapse.logging.opentracing import ( | |||
whitelisted_homeserver, | |||
) | |||
from synapse.metrics.background_process_metrics import wrap_as_background_process | |||
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker | |||
from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream | |||
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause | |||
from synapse.storage.database import ( | |||
@@ -86,28 +85,19 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): | |||
): | |||
super().__init__(database, db_conn, hs) | |||
if hs.config.worker.worker_app is None: | |||
self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator( | |||
db_conn, | |||
"device_lists_stream", | |||
"stream_id", | |||
extra_tables=[ | |||
("user_signature_stream", "stream_id"), | |||
("device_lists_outbound_pokes", "stream_id"), | |||
("device_lists_changes_in_room", "stream_id"), | |||
], | |||
) | |||
else: | |||
self._device_list_id_gen = SlavedIdTracker( | |||
db_conn, | |||
"device_lists_stream", | |||
"stream_id", | |||
extra_tables=[ | |||
("user_signature_stream", "stream_id"), | |||
("device_lists_outbound_pokes", "stream_id"), | |||
("device_lists_changes_in_room", "stream_id"), | |||
], | |||
) | |||
# In the worker store this is an ID tracker which we overwrite in the non-worker | |||
# class below that is used on the main process. | |||
self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator( | |||
db_conn, | |||
"device_lists_stream", | |||
"stream_id", | |||
extra_tables=[ | |||
("user_signature_stream", "stream_id"), | |||
("device_lists_outbound_pokes", "stream_id"), | |||
("device_lists_changes_in_room", "stream_id"), | |||
], | |||
is_writer=hs.config.worker.worker_app is None, | |||
) | |||
# Type-ignore: _device_list_id_gen is mixed in from either DataStore (as a | |||
# StreamIdGenerator) or SlavedDataStore (as a SlavedIdTracker). | |||
@@ -59,7 +59,6 @@ from synapse.metrics.background_process_metrics import ( | |||
run_as_background_process, | |||
wrap_as_background_process, | |||
) | |||
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker | |||
from synapse.replication.tcp.streams import BackfillStream | |||
from synapse.replication.tcp.streams.events import EventsStream | |||
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause | |||
@@ -213,26 +212,20 @@ class EventsWorkerStore(SQLBaseStore): | |||
# `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets | |||
# updated over replication. (Multiple writers are not supported for | |||
# SQLite). | |||
if hs.get_instance_name() in hs.config.worker.writers.events: | |||
self._stream_id_gen = StreamIdGenerator( | |||
db_conn, | |||
"events", | |||
"stream_ordering", | |||
) | |||
self._backfill_id_gen = StreamIdGenerator( | |||
db_conn, | |||
"events", | |||
"stream_ordering", | |||
step=-1, | |||
extra_tables=[("ex_outlier_stream", "event_stream_ordering")], | |||
) | |||
else: | |||
self._stream_id_gen = SlavedIdTracker( | |||
db_conn, "events", "stream_ordering" | |||
) | |||
self._backfill_id_gen = SlavedIdTracker( | |||
db_conn, "events", "stream_ordering", step=-1 | |||
) | |||
self._stream_id_gen = StreamIdGenerator( | |||
db_conn, | |||
"events", | |||
"stream_ordering", | |||
is_writer=hs.get_instance_name() in hs.config.worker.writers.events, | |||
) | |||
self._backfill_id_gen = StreamIdGenerator( | |||
db_conn, | |||
"events", | |||
"stream_ordering", | |||
step=-1, | |||
extra_tables=[("ex_outlier_stream", "event_stream_ordering")], | |||
is_writer=hs.get_instance_name() in hs.config.worker.writers.events, | |||
) | |||
events_max = self._stream_id_gen.get_current_token() | |||
curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict( | |||
@@ -30,7 +30,6 @@ from typing import ( | |||
from synapse.api.errors import StoreError | |||
from synapse.config.homeserver import ExperimentalConfig | |||
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker | |||
from synapse.replication.tcp.streams import PushRulesStream | |||
from synapse.storage._base import SQLBaseStore | |||
from synapse.storage.database import ( | |||
@@ -111,14 +110,14 @@ class PushRulesWorkerStore( | |||
): | |||
super().__init__(database, db_conn, hs) | |||
if hs.config.worker.worker_app is None: | |||
self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator( | |||
db_conn, "push_rules_stream", "stream_id" | |||
) | |||
else: | |||
self._push_rules_stream_id_gen = SlavedIdTracker( | |||
db_conn, "push_rules_stream", "stream_id" | |||
) | |||
# In the worker store this is an ID tracker which we overwrite in the non-worker | |||
# class below that is used on the main process. | |||
self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator( | |||
db_conn, | |||
"push_rules_stream", | |||
"stream_id", | |||
is_writer=hs.config.worker.worker_app is None, | |||
) | |||
push_rules_prefill, push_rules_id = self.db_pool.get_cache_dict( | |||
db_conn, | |||
@@ -27,7 +27,6 @@ from typing import ( | |||
) | |||
from synapse.push import PusherConfig, ThrottleParams | |||
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker | |||
from synapse.replication.tcp.streams import PushersStream | |||
from synapse.storage._base import SQLBaseStore, db_to_json | |||
from synapse.storage.database import ( | |||
@@ -59,20 +58,15 @@ class PusherWorkerStore(SQLBaseStore): | |||
): | |||
super().__init__(database, db_conn, hs) | |||
if hs.config.worker.worker_app is None: | |||
self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator( | |||
db_conn, | |||
"pushers", | |||
"id", | |||
extra_tables=[("deleted_pushers", "stream_id")], | |||
) | |||
else: | |||
self._pushers_id_gen = SlavedIdTracker( | |||
db_conn, | |||
"pushers", | |||
"id", | |||
extra_tables=[("deleted_pushers", "stream_id")], | |||
) | |||
# In the worker store this is an ID tracker which we overwrite in the non-worker | |||
# class below that is used on the main process. | |||
self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator( | |||
db_conn, | |||
"pushers", | |||
"id", | |||
extra_tables=[("deleted_pushers", "stream_id")], | |||
is_writer=hs.config.worker.worker_app is None, | |||
) | |||
self.db_pool.updates.register_background_update_handler( | |||
"remove_deactivated_pushers", | |||
@@ -27,7 +27,6 @@ from typing import ( | |||
) | |||
from synapse.api.constants import EduTypes | |||
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker | |||
from synapse.replication.tcp.streams import ReceiptsStream | |||
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause | |||
from synapse.storage.database import ( | |||
@@ -61,6 +60,9 @@ class ReceiptsWorkerStore(SQLBaseStore): | |||
hs: "HomeServer", | |||
): | |||
self._instance_name = hs.get_instance_name() | |||
# In the worker store this is an ID tracker which we overwrite in the non-worker | |||
# class below that is used on the main process. | |||
self._receipts_id_gen: AbstractStreamIdTracker | |||
if isinstance(database.engine, PostgresEngine): | |||
@@ -87,14 +89,12 @@ class ReceiptsWorkerStore(SQLBaseStore): | |||
# `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets | |||
# updated over replication. (Multiple writers are not supported for | |||
# SQLite). | |||
if hs.get_instance_name() in hs.config.worker.writers.receipts: | |||
self._receipts_id_gen = StreamIdGenerator( | |||
db_conn, "receipts_linearized", "stream_id" | |||
) | |||
else: | |||
self._receipts_id_gen = SlavedIdTracker( | |||
db_conn, "receipts_linearized", "stream_id" | |||
) | |||
self._receipts_id_gen = StreamIdGenerator( | |||
db_conn, | |||
"receipts_linearized", | |||
"stream_id", | |||
is_writer=hs.get_instance_name() in hs.config.worker.writers.receipts, | |||
) | |||
super().__init__(database, db_conn, hs) | |||
@@ -186,11 +186,13 @@ class StreamIdGenerator(AbstractStreamIdGenerator): | |||
column: str, | |||
extra_tables: Iterable[Tuple[str, str]] = (), | |||
step: int = 1, | |||
is_writer: bool = True, | |||
) -> None: | |||
assert step != 0 | |||
self._lock = threading.Lock() | |||
self._step: int = step | |||
self._current: int = _load_current_id(db_conn, table, column, step) | |||
self._is_writer = is_writer | |||
for table, column in extra_tables: | |||
self._current = (max if step > 0 else min)( | |||
self._current, _load_current_id(db_conn, table, column, step) | |||
@@ -204,9 +206,11 @@ class StreamIdGenerator(AbstractStreamIdGenerator): | |||
self._unfinished_ids: OrderedDict[int, int] = OrderedDict() | |||
def advance(self, instance_name: str, new_id: int) -> None: | |||
# `StreamIdGenerator` should only be used when there is a single writer, | |||
# so replication should never happen. | |||
raise Exception("Replication is not supported by StreamIdGenerator") | |||
# Advance should never be called on a writer instance, only over replication | |||
if self._is_writer: | |||
raise Exception("Replication is not supported by writer StreamIdGenerator") | |||
self._current = (max if self._step > 0 else min)(self._current, new_id) | |||
def get_next(self) -> AsyncContextManager[int]: | |||
with self._lock: | |||
@@ -249,6 +253,9 @@ class StreamIdGenerator(AbstractStreamIdGenerator): | |||
return _AsyncCtxManagerWrapper(manager()) | |||
def get_current_token(self) -> int: | |||
if not self._is_writer: | |||
return self._current | |||
with self._lock: | |||
if self._unfinished_ids: | |||
return next(iter(self._unfinished_ids)) - self._step | |||
@@ -16,15 +16,157 @@ from typing import List, Optional | |||
from twisted.test.proto_helpers import MemoryReactor | |||
from synapse.server import HomeServer | |||
from synapse.storage.database import DatabasePool, LoggingTransaction | |||
from synapse.storage.database import ( | |||
DatabasePool, | |||
LoggingDatabaseConnection, | |||
LoggingTransaction, | |||
) | |||
from synapse.storage.engines import IncorrectDatabaseSetup | |||
from synapse.storage.util.id_generators import MultiWriterIdGenerator | |||
from synapse.storage.types import Cursor | |||
from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator | |||
from synapse.util import Clock | |||
from tests.unittest import HomeserverTestCase | |||
from tests.utils import USE_POSTGRES_FOR_TESTS | |||
class StreamIdGeneratorTestCase(HomeserverTestCase): | |||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: | |||
self.store = hs.get_datastores().main | |||
self.db_pool: DatabasePool = self.store.db_pool | |||
self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db)) | |||
def _setup_db(self, txn: LoggingTransaction) -> None: | |||
txn.execute( | |||
""" | |||
CREATE TABLE foobar ( | |||
stream_id BIGINT NOT NULL, | |||
data TEXT | |||
); | |||
""" | |||
) | |||
txn.execute("INSERT INTO foobar VALUES (123, 'hello world');") | |||
def _create_id_generator(self) -> StreamIdGenerator: | |||
def _create(conn: LoggingDatabaseConnection) -> StreamIdGenerator: | |||
return StreamIdGenerator( | |||
db_conn=conn, | |||
table="foobar", | |||
column="stream_id", | |||
) | |||
return self.get_success_or_raise(self.db_pool.runWithConnection(_create)) | |||
def test_initial_value(self) -> None: | |||
"""Check that we read the current token from the DB.""" | |||
id_gen = self._create_id_generator() | |||
self.assertEqual(id_gen.get_current_token(), 123) | |||
def test_single_gen_next(self) -> None: | |||
"""Check that we correctly increment the current token from the DB.""" | |||
id_gen = self._create_id_generator() | |||
async def test_gen_next() -> None: | |||
async with id_gen.get_next() as next_id: | |||
# We haven't persisted `next_id` yet; current token is still 123 | |||
self.assertEqual(id_gen.get_current_token(), 123) | |||
# But we did learn what the next value is | |||
self.assertEqual(next_id, 124) | |||
# Once the context manager closes we assume that the `next_id` has been | |||
# written to the DB. | |||
self.assertEqual(id_gen.get_current_token(), 124) | |||
self.get_success(test_gen_next()) | |||
def test_multiple_gen_nexts(self) -> None: | |||
"""Check that we handle overlapping calls to gen_next sensibly.""" | |||
id_gen = self._create_id_generator() | |||
async def test_gen_next() -> None: | |||
ctx1 = id_gen.get_next() | |||
ctx2 = id_gen.get_next() | |||
ctx3 = id_gen.get_next() | |||
# Request three new stream IDs. | |||
self.assertEqual(await ctx1.__aenter__(), 124) | |||
self.assertEqual(await ctx2.__aenter__(), 125) | |||
self.assertEqual(await ctx3.__aenter__(), 126) | |||
# None are persisted: current token unchanged. | |||
self.assertEqual(id_gen.get_current_token(), 123) | |||
# Persist each in turn. | |||
await ctx1.__aexit__(None, None, None) | |||
self.assertEqual(id_gen.get_current_token(), 124) | |||
await ctx2.__aexit__(None, None, None) | |||
self.assertEqual(id_gen.get_current_token(), 125) | |||
await ctx3.__aexit__(None, None, None) | |||
self.assertEqual(id_gen.get_current_token(), 126) | |||
self.get_success(test_gen_next()) | |||
def test_multiple_gen_nexts_closed_in_different_order(self) -> None: | |||
"""Check that we handle overlapping calls to gen_next, even when their IDs | |||
created and persisted in different orders.""" | |||
id_gen = self._create_id_generator() | |||
async def test_gen_next() -> None: | |||
ctx1 = id_gen.get_next() | |||
ctx2 = id_gen.get_next() | |||
ctx3 = id_gen.get_next() | |||
# Request three new stream IDs. | |||
self.assertEqual(await ctx1.__aenter__(), 124) | |||
self.assertEqual(await ctx2.__aenter__(), 125) | |||
self.assertEqual(await ctx3.__aenter__(), 126) | |||
# None are persisted: current token unchanged. | |||
self.assertEqual(id_gen.get_current_token(), 123) | |||
# Persist them in a different order, starting with 126 from ctx3. | |||
await ctx3.__aexit__(None, None, None) | |||
# We haven't persisted 124 from ctx1 yet---current token is still 123. | |||
self.assertEqual(id_gen.get_current_token(), 123) | |||
# Now persist 124 from ctx1. | |||
await ctx1.__aexit__(None, None, None) | |||
# Current token is then 124, waiting for 125 to be persisted. | |||
self.assertEqual(id_gen.get_current_token(), 124) | |||
# Finally persist 125 from ctx2. | |||
await ctx2.__aexit__(None, None, None) | |||
# Current token is then 126 (skipping over 125). | |||
self.assertEqual(id_gen.get_current_token(), 126) | |||
self.get_success(test_gen_next()) | |||
def test_gen_next_while_still_waiting_for_persistence(self) -> None: | |||
"""Check that we handle overlapping calls to gen_next.""" | |||
id_gen = self._create_id_generator() | |||
async def test_gen_next() -> None: | |||
ctx1 = id_gen.get_next() | |||
ctx2 = id_gen.get_next() | |||
ctx3 = id_gen.get_next() | |||
# Request two new stream IDs. | |||
self.assertEqual(await ctx1.__aenter__(), 124) | |||
self.assertEqual(await ctx2.__aenter__(), 125) | |||
# Persist ctx2 first. | |||
await ctx2.__aexit__(None, None, None) | |||
# Still waiting on ctx1's ID to be persisted. | |||
self.assertEqual(id_gen.get_current_token(), 123) | |||
# Now request a third stream ID. It should be 126 (the smallest ID that | |||
# we've not yet handed out.) | |||
self.assertEqual(await ctx3.__aenter__(), 126) | |||
self.get_success(test_gen_next()) | |||
class MultiWriterIdGeneratorTestCase(HomeserverTestCase): | |||
if not USE_POSTGRES_FOR_TESTS: | |||
skip = "Requires Postgres" | |||
@@ -48,9 +190,9 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): | |||
) | |||
def _create_id_generator( | |||
self, instance_name="master", writers: Optional[List[str]] = None | |||
self, instance_name: str = "master", writers: Optional[List[str]] = None | |||
) -> MultiWriterIdGenerator: | |||
def _create(conn): | |||
def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator: | |||
return MultiWriterIdGenerator( | |||
conn, | |||
self.db_pool, | |||
@@ -446,7 +588,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): | |||
self._insert_row_with_id("master", 3) | |||
# Now we add a row *without* updating the stream ID | |||
def _insert(txn): | |||
def _insert(txn: Cursor) -> None: | |||
txn.execute("INSERT INTO foobar VALUES (26, 'master')") | |||
self.get_success(self.db_pool.runInteraction("_insert", _insert)) | |||
@@ -481,9 +623,9 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): | |||
) | |||
def _create_id_generator( | |||
self, instance_name="master", writers: Optional[List[str]] = None | |||
self, instance_name: str = "master", writers: Optional[List[str]] = None | |||
) -> MultiWriterIdGenerator: | |||
def _create(conn): | |||
def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator: | |||
return MultiWriterIdGenerator( | |||
conn, | |||
self.db_pool, | |||
@@ -617,9 +759,9 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase): | |||
) | |||
def _create_id_generator( | |||
self, instance_name="master", writers: Optional[List[str]] = None | |||
self, instance_name: str = "master", writers: Optional[List[str]] = None | |||
) -> MultiWriterIdGenerator: | |||
def _create(conn): | |||
def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator: | |||
return MultiWriterIdGenerator( | |||
conn, | |||
self.db_pool, | |||
@@ -641,7 +783,7 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase): | |||
instance_name: str, | |||
number: int, | |||
update_stream_table: bool = True, | |||
): | |||
) -> None: | |||
"""Insert N rows as the given instance, inserting with stream IDs pulled | |||
from the postgres sequence. | |||
""" | |||