You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

1068 lines
36 KiB

  1. # Copyright 2014-2016 OpenMarket Ltd
  2. # Copyright 2018 New Vector Ltd
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import logging
  16. from typing import (
  17. TYPE_CHECKING,
  18. Any,
  19. Collection,
  20. Dict,
  21. Iterable,
  22. List,
  23. Mapping,
  24. Optional,
  25. Sequence,
  26. Tuple,
  27. cast,
  28. )
  29. from synapse.api.constants import EduTypes
  30. from synapse.replication.tcp.streams import ReceiptsStream
  31. from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
  32. from synapse.storage.database import (
  33. DatabasePool,
  34. LoggingDatabaseConnection,
  35. LoggingTransaction,
  36. )
  37. from synapse.storage.engines import PostgresEngine
  38. from synapse.storage.engines._base import IsolationLevel
  39. from synapse.storage.util.id_generators import (
  40. AbstractStreamIdTracker,
  41. MultiWriterIdGenerator,
  42. StreamIdGenerator,
  43. )
  44. from synapse.types import JsonDict
  45. from synapse.util import json_encoder
  46. from synapse.util.caches.descriptors import cached, cachedList
  47. from synapse.util.caches.stream_change_cache import StreamChangeCache
  48. if TYPE_CHECKING:
  49. from synapse.server import HomeServer
  50. logger = logging.getLogger(__name__)
  51. class ReceiptsWorkerStore(SQLBaseStore):
  52. def __init__(
  53. self,
  54. database: DatabasePool,
  55. db_conn: LoggingDatabaseConnection,
  56. hs: "HomeServer",
  57. ):
  58. self._instance_name = hs.get_instance_name()
  59. # In the worker store this is an ID tracker which we overwrite in the non-worker
  60. # class below that is used on the main process.
  61. self._receipts_id_gen: AbstractStreamIdTracker
  62. if isinstance(database.engine, PostgresEngine):
  63. self._can_write_to_receipts = (
  64. self._instance_name in hs.config.worker.writers.receipts
  65. )
  66. self._receipts_id_gen = MultiWriterIdGenerator(
  67. db_conn=db_conn,
  68. db=database,
  69. notifier=hs.get_replication_notifier(),
  70. stream_name="receipts",
  71. instance_name=self._instance_name,
  72. tables=[("receipts_linearized", "instance_name", "stream_id")],
  73. sequence_name="receipts_sequence",
  74. writers=hs.config.worker.writers.receipts,
  75. )
  76. else:
  77. self._can_write_to_receipts = True
  78. # We shouldn't be running in worker mode with SQLite, but its useful
  79. # to support it for unit tests.
  80. #
  81. # If this process is the writer than we need to use
  82. # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
  83. # updated over replication. (Multiple writers are not supported for
  84. # SQLite).
  85. self._receipts_id_gen = StreamIdGenerator(
  86. db_conn,
  87. hs.get_replication_notifier(),
  88. "receipts_linearized",
  89. "stream_id",
  90. is_writer=hs.get_instance_name() in hs.config.worker.writers.receipts,
  91. )
  92. super().__init__(database, db_conn, hs)
  93. max_receipts_stream_id = self.get_max_receipt_stream_id()
  94. receipts_stream_prefill, min_receipts_stream_id = self.db_pool.get_cache_dict(
  95. db_conn,
  96. "receipts_linearized",
  97. entity_column="room_id",
  98. stream_column="stream_id",
  99. max_value=max_receipts_stream_id,
  100. limit=10000,
  101. )
  102. self._receipts_stream_cache = StreamChangeCache(
  103. "ReceiptsRoomChangeCache",
  104. min_receipts_stream_id,
  105. prefilled_cache=receipts_stream_prefill,
  106. )
  107. def get_max_receipt_stream_id(self) -> int:
  108. """Get the current max stream ID for receipts stream"""
  109. return self._receipts_id_gen.get_current_token()
  110. def get_last_unthreaded_receipt_for_user_txn(
  111. self,
  112. txn: LoggingTransaction,
  113. user_id: str,
  114. room_id: str,
  115. receipt_types: Collection[str],
  116. ) -> Optional[Tuple[str, int]]:
  117. """
  118. Fetch the event ID and stream_ordering for the latest unthreaded receipt
  119. in a room with one of the given receipt types.
  120. Args:
  121. user_id: The user to fetch receipts for.
  122. room_id: The room ID to fetch the receipt for.
  123. receipt_types: The receipt types to fetch.
  124. Returns:
  125. The event ID and stream ordering of the latest receipt, if one exists.
  126. """
  127. clause, args = make_in_list_sql_clause(
  128. self.database_engine, "receipt_type", receipt_types
  129. )
  130. sql = f"""
  131. SELECT event_id, stream_ordering
  132. FROM receipts_linearized
  133. INNER JOIN events USING (room_id, event_id)
  134. WHERE {clause}
  135. AND user_id = ?
  136. AND room_id = ?
  137. AND thread_id IS NULL
  138. ORDER BY stream_ordering DESC
  139. LIMIT 1
  140. """
  141. args.extend((user_id, room_id))
  142. txn.execute(sql, args)
  143. return cast(Optional[Tuple[str, int]], txn.fetchone())
  144. async def get_receipts_for_user(
  145. self, user_id: str, receipt_types: Iterable[str]
  146. ) -> Dict[str, str]:
  147. """
  148. Fetch the event IDs for the latest receipts sent by the given user.
  149. Args:
  150. user_id: The user to fetch receipts for.
  151. receipt_types: The receipt types to check.
  152. Returns:
  153. A map of room ID to the event ID of the latest receipt for that room.
  154. If the user has not sent a receipt to a room then it will not appear
  155. in the returned dictionary.
  156. """
  157. results = await self.get_receipts_for_user_with_orderings(
  158. user_id, receipt_types
  159. )
  160. # Reduce the result to room ID -> event ID.
  161. return {
  162. room_id: room_result["event_id"] for room_id, room_result in results.items()
  163. }
  164. async def get_receipts_for_user_with_orderings(
  165. self, user_id: str, receipt_types: Iterable[str]
  166. ) -> JsonDict:
  167. """
  168. Fetch receipts for all rooms that the given user is joined to.
  169. Args:
  170. user_id: The user to fetch receipts for.
  171. receipt_types: The receipt types to fetch. Earlier receipt types
  172. are given priority if multiple receipts point to the same event.
  173. Returns:
  174. A map of room ID to the latest receipt (for the given types).
  175. """
  176. results: JsonDict = {}
  177. for receipt_type in receipt_types:
  178. partial_result = await self._get_receipts_for_user_with_orderings(
  179. user_id, receipt_type
  180. )
  181. for room_id, room_result in partial_result.items():
  182. # If the room has not yet been seen, or the receipt is newer,
  183. # use it.
  184. if (
  185. room_id not in results
  186. or results[room_id]["stream_ordering"]
  187. < room_result["stream_ordering"]
  188. ):
  189. results[room_id] = room_result
  190. return results
  191. @cached()
  192. async def _get_receipts_for_user_with_orderings(
  193. self, user_id: str, receipt_type: str
  194. ) -> JsonDict:
  195. """
  196. Fetch receipts for all rooms that the given user is joined to.
  197. Args:
  198. user_id: The user to fetch receipts for.
  199. receipt_type: The receipt type to fetch.
  200. Returns:
  201. A map of room ID to the latest receipt information.
  202. """
  203. def f(txn: LoggingTransaction) -> List[Tuple[str, str, int, int]]:
  204. sql = (
  205. "SELECT rl.room_id, rl.event_id,"
  206. " e.topological_ordering, e.stream_ordering"
  207. " FROM receipts_linearized AS rl"
  208. " INNER JOIN events AS e USING (room_id, event_id)"
  209. " WHERE rl.room_id = e.room_id"
  210. " AND rl.event_id = e.event_id"
  211. " AND user_id = ?"
  212. " AND receipt_type = ?"
  213. )
  214. txn.execute(sql, (user_id, receipt_type))
  215. return cast(List[Tuple[str, str, int, int]], txn.fetchall())
  216. rows = await self.db_pool.runInteraction(
  217. "get_receipts_for_user_with_orderings", f
  218. )
  219. return {
  220. row[0]: {
  221. "event_id": row[1],
  222. "topological_ordering": row[2],
  223. "stream_ordering": row[3],
  224. }
  225. for row in rows
  226. }
  227. async def get_linearized_receipts_for_rooms(
  228. self, room_ids: Iterable[str], to_key: int, from_key: Optional[int] = None
  229. ) -> List[dict]:
  230. """Get receipts for multiple rooms for sending to clients.
  231. Args:
  232. room_id: The room IDs to fetch receipts of.
  233. to_key: Max stream id to fetch receipts up to.
  234. from_key: Min stream id to fetch receipts from. None fetches
  235. from the start.
  236. Returns:
  237. A list of receipts.
  238. """
  239. room_ids = set(room_ids)
  240. if from_key is not None:
  241. # Only ask the database about rooms where there have been new
  242. # receipts added since `from_key`
  243. room_ids = self._receipts_stream_cache.get_entities_changed(
  244. room_ids, from_key
  245. )
  246. results = await self._get_linearized_receipts_for_rooms(
  247. room_ids, to_key, from_key=from_key
  248. )
  249. return [ev for res in results.values() for ev in res]
  250. async def get_linearized_receipts_for_room(
  251. self, room_id: str, to_key: int, from_key: Optional[int] = None
  252. ) -> Sequence[JsonDict]:
  253. """Get receipts for a single room for sending to clients.
  254. Args:
  255. room_ids: The room id.
  256. to_key: Max stream id to fetch receipts up to.
  257. from_key: Min stream id to fetch receipts from. None fetches
  258. from the start.
  259. Returns:
  260. A list of receipts.
  261. """
  262. if from_key is not None:
  263. # Check the cache first to see if any new receipts have been added
  264. # since`from_key`. If not we can no-op.
  265. if not self._receipts_stream_cache.has_entity_changed(room_id, from_key):
  266. return []
  267. return await self._get_linearized_receipts_for_room(room_id, to_key, from_key)
  268. @cached(tree=True)
  269. async def _get_linearized_receipts_for_room(
  270. self, room_id: str, to_key: int, from_key: Optional[int] = None
  271. ) -> Sequence[JsonDict]:
  272. """See get_linearized_receipts_for_room"""
  273. def f(txn: LoggingTransaction) -> List[Dict[str, Any]]:
  274. if from_key:
  275. sql = (
  276. "SELECT * FROM receipts_linearized WHERE"
  277. " room_id = ? AND stream_id > ? AND stream_id <= ?"
  278. )
  279. txn.execute(sql, (room_id, from_key, to_key))
  280. else:
  281. sql = (
  282. "SELECT * FROM receipts_linearized WHERE"
  283. " room_id = ? AND stream_id <= ?"
  284. )
  285. txn.execute(sql, (room_id, to_key))
  286. rows = self.db_pool.cursor_to_dict(txn)
  287. return rows
  288. rows = await self.db_pool.runInteraction("get_linearized_receipts_for_room", f)
  289. if not rows:
  290. return []
  291. content: JsonDict = {}
  292. for row in rows:
  293. content.setdefault(row["event_id"], {}).setdefault(row["receipt_type"], {})[
  294. row["user_id"]
  295. ] = db_to_json(row["data"])
  296. return [{"type": EduTypes.RECEIPT, "room_id": room_id, "content": content}]
  297. @cachedList(
  298. cached_method_name="_get_linearized_receipts_for_room",
  299. list_name="room_ids",
  300. num_args=3,
  301. )
  302. async def _get_linearized_receipts_for_rooms(
  303. self, room_ids: Collection[str], to_key: int, from_key: Optional[int] = None
  304. ) -> Dict[str, Sequence[JsonDict]]:
  305. if not room_ids:
  306. return {}
  307. def f(txn: LoggingTransaction) -> List[Dict[str, Any]]:
  308. if from_key:
  309. sql = """
  310. SELECT * FROM receipts_linearized WHERE
  311. stream_id > ? AND stream_id <= ? AND
  312. """
  313. clause, args = make_in_list_sql_clause(
  314. self.database_engine, "room_id", room_ids
  315. )
  316. txn.execute(sql + clause, [from_key, to_key] + list(args))
  317. else:
  318. sql = """
  319. SELECT * FROM receipts_linearized WHERE
  320. stream_id <= ? AND
  321. """
  322. clause, args = make_in_list_sql_clause(
  323. self.database_engine, "room_id", room_ids
  324. )
  325. txn.execute(sql + clause, [to_key] + list(args))
  326. return self.db_pool.cursor_to_dict(txn)
  327. txn_results = await self.db_pool.runInteraction(
  328. "_get_linearized_receipts_for_rooms", f
  329. )
  330. results: JsonDict = {}
  331. for row in txn_results:
  332. # We want a single event per room, since we want to batch the
  333. # receipts by room, event and type.
  334. room_event = results.setdefault(
  335. row["room_id"],
  336. {"type": EduTypes.RECEIPT, "room_id": row["room_id"], "content": {}},
  337. )
  338. # The content is of the form:
  339. # {"$foo:bar": { "read": { "@user:host": <receipt> }, .. }, .. }
  340. event_entry = room_event["content"].setdefault(row["event_id"], {})
  341. receipt_type = event_entry.setdefault(row["receipt_type"], {})
  342. receipt_type[row["user_id"]] = db_to_json(row["data"])
  343. if row["thread_id"]:
  344. receipt_type[row["user_id"]]["thread_id"] = row["thread_id"]
  345. results = {
  346. room_id: [results[room_id]] if room_id in results else []
  347. for room_id in room_ids
  348. }
  349. return results
  350. @cached(
  351. num_args=2,
  352. )
  353. async def get_linearized_receipts_for_all_rooms(
  354. self, to_key: int, from_key: Optional[int] = None
  355. ) -> Mapping[str, JsonDict]:
  356. """Get receipts for all rooms between two stream_ids, up
  357. to a limit of the latest 100 read receipts.
  358. Args:
  359. to_key: Max stream id to fetch receipts up to.
  360. from_key: Min stream id to fetch receipts from. None fetches
  361. from the start.
  362. Returns:
  363. A dictionary of roomids to a list of receipts.
  364. """
  365. def f(txn: LoggingTransaction) -> List[Dict[str, Any]]:
  366. if from_key:
  367. sql = """
  368. SELECT * FROM receipts_linearized WHERE
  369. stream_id > ? AND stream_id <= ?
  370. ORDER BY stream_id DESC
  371. LIMIT 100
  372. """
  373. txn.execute(sql, [from_key, to_key])
  374. else:
  375. sql = """
  376. SELECT * FROM receipts_linearized WHERE
  377. stream_id <= ?
  378. ORDER BY stream_id DESC
  379. LIMIT 100
  380. """
  381. txn.execute(sql, [to_key])
  382. return self.db_pool.cursor_to_dict(txn)
  383. txn_results = await self.db_pool.runInteraction(
  384. "get_linearized_receipts_for_all_rooms", f
  385. )
  386. results: JsonDict = {}
  387. for row in txn_results:
  388. # We want a single event per room, since we want to batch the
  389. # receipts by room, event and type.
  390. room_event = results.setdefault(
  391. row["room_id"],
  392. {"type": EduTypes.RECEIPT, "room_id": row["room_id"], "content": {}},
  393. )
  394. # The content is of the form:
  395. # {"$foo:bar": { "read": { "@user:host": <receipt> }, .. }, .. }
  396. event_entry = room_event["content"].setdefault(row["event_id"], {})
  397. receipt_type = event_entry.setdefault(row["receipt_type"], {})
  398. receipt_type[row["user_id"]] = db_to_json(row["data"])
  399. return results
  400. async def get_users_sent_receipts_between(
  401. self, last_id: int, current_id: int
  402. ) -> List[str]:
  403. """Get all users who sent receipts between `last_id` exclusive and
  404. `current_id` inclusive.
  405. Returns:
  406. The list of users.
  407. """
  408. if last_id == current_id:
  409. return []
  410. def _get_users_sent_receipts_between_txn(txn: LoggingTransaction) -> List[str]:
  411. sql = """
  412. SELECT DISTINCT user_id FROM receipts_linearized
  413. WHERE ? < stream_id AND stream_id <= ?
  414. """
  415. txn.execute(sql, (last_id, current_id))
  416. return [r[0] for r in txn]
  417. return await self.db_pool.runInteraction(
  418. "get_users_sent_receipts_between", _get_users_sent_receipts_between_txn
  419. )
  420. async def get_all_updated_receipts(
  421. self, instance_name: str, last_id: int, current_id: int, limit: int
  422. ) -> Tuple[
  423. List[Tuple[int, Tuple[str, str, str, str, Optional[str], JsonDict]]], int, bool
  424. ]:
  425. """Get updates for receipts replication stream.
  426. Args:
  427. instance_name: The writer we want to fetch updates from. Unused
  428. here since there is only ever one writer.
  429. last_id: The token to fetch updates from. Exclusive.
  430. current_id: The token to fetch updates up to. Inclusive.
  431. limit: The requested limit for the number of rows to return. The
  432. function may return more or fewer rows.
  433. Returns:
  434. A tuple consisting of: the updates, a token to use to fetch
  435. subsequent updates, and whether we returned fewer rows than exists
  436. between the requested tokens due to the limit.
  437. The token returned can be used in a subsequent call to this
  438. function to get further updatees.
  439. The updates are a list of 2-tuples of stream ID and the row data
  440. """
  441. if last_id == current_id:
  442. return [], current_id, False
  443. def get_all_updated_receipts_txn(
  444. txn: LoggingTransaction,
  445. ) -> Tuple[
  446. List[Tuple[int, Tuple[str, str, str, str, Optional[str], JsonDict]]],
  447. int,
  448. bool,
  449. ]:
  450. sql = """
  451. SELECT stream_id, room_id, receipt_type, user_id, event_id, thread_id, data
  452. FROM receipts_linearized
  453. WHERE ? < stream_id AND stream_id <= ?
  454. ORDER BY stream_id ASC
  455. LIMIT ?
  456. """
  457. txn.execute(sql, (last_id, current_id, limit))
  458. updates = cast(
  459. List[Tuple[int, Tuple[str, str, str, str, Optional[str], JsonDict]]],
  460. [(r[0], r[1:6] + (db_to_json(r[6]),)) for r in txn],
  461. )
  462. limited = False
  463. upper_bound = current_id
  464. if len(updates) == limit:
  465. limited = True
  466. upper_bound = updates[-1][0]
  467. return updates, upper_bound, limited
  468. return await self.db_pool.runInteraction(
  469. "get_all_updated_receipts", get_all_updated_receipts_txn
  470. )
  471. def invalidate_caches_for_receipt(
  472. self, room_id: str, receipt_type: str, user_id: str
  473. ) -> None:
  474. self._get_receipts_for_user_with_orderings.invalidate((user_id, receipt_type))
  475. self._get_linearized_receipts_for_room.invalidate((room_id,))
  476. # We use this method to invalidate so that we don't end up with circular
  477. # dependencies between the receipts and push action stores.
  478. self._attempt_to_invalidate_cache(
  479. "get_unread_event_push_actions_by_room_for_user", (room_id,)
  480. )
  481. def process_replication_rows(
  482. self,
  483. stream_name: str,
  484. instance_name: str,
  485. token: int,
  486. rows: Iterable[Any],
  487. ) -> None:
  488. if stream_name == ReceiptsStream.NAME:
  489. self._receipts_id_gen.advance(instance_name, token)
  490. for row in rows:
  491. self.invalidate_caches_for_receipt(
  492. row.room_id, row.receipt_type, row.user_id
  493. )
  494. self._receipts_stream_cache.entity_has_changed(row.room_id, token)
  495. return super().process_replication_rows(stream_name, instance_name, token, rows)
  496. def process_replication_position(
  497. self, stream_name: str, instance_name: str, token: int
  498. ) -> None:
  499. if stream_name == ReceiptsStream.NAME:
  500. self._receipts_id_gen.advance(instance_name, token)
  501. super().process_replication_position(stream_name, instance_name, token)
  502. def _insert_linearized_receipt_txn(
  503. self,
  504. txn: LoggingTransaction,
  505. room_id: str,
  506. receipt_type: str,
  507. user_id: str,
  508. event_id: str,
  509. thread_id: Optional[str],
  510. data: JsonDict,
  511. stream_id: int,
  512. ) -> Optional[int]:
  513. """Inserts a receipt into the database if it's newer than the current one.
  514. Returns:
  515. None if the receipt is older than the current receipt
  516. otherwise, the rx timestamp of the event that the receipt corresponds to
  517. (or 0 if the event is unknown)
  518. """
  519. assert self._can_write_to_receipts
  520. res = self.db_pool.simple_select_one_txn(
  521. txn,
  522. table="events",
  523. retcols=["stream_ordering", "received_ts"],
  524. keyvalues={"event_id": event_id},
  525. allow_none=True,
  526. )
  527. stream_ordering = int(res["stream_ordering"]) if res else None
  528. rx_ts = res["received_ts"] if res else 0
  529. # We don't want to clobber receipts for more recent events, so we
  530. # have to compare orderings of existing receipts
  531. if stream_ordering is not None:
  532. if thread_id is None:
  533. thread_clause = "r.thread_id IS NULL"
  534. thread_args: Tuple[str, ...] = ()
  535. else:
  536. thread_clause = "r.thread_id = ?"
  537. thread_args = (thread_id,)
  538. sql = f"""
  539. SELECT stream_ordering, event_id FROM events
  540. INNER JOIN receipts_linearized AS r USING (event_id, room_id)
  541. WHERE r.room_id = ? AND r.receipt_type = ? AND r.user_id = ? AND {thread_clause}
  542. """
  543. txn.execute(
  544. sql,
  545. (
  546. room_id,
  547. receipt_type,
  548. user_id,
  549. )
  550. + thread_args,
  551. )
  552. for so, eid in txn:
  553. if int(so) >= stream_ordering:
  554. logger.debug(
  555. "Ignoring new receipt for %s in favour of existing "
  556. "one for later event %s",
  557. event_id,
  558. eid,
  559. )
  560. return None
  561. txn.call_after(
  562. self.invalidate_caches_for_receipt, room_id, receipt_type, user_id
  563. )
  564. txn.call_after(
  565. self._receipts_stream_cache.entity_has_changed, room_id, stream_id
  566. )
  567. keyvalues = {
  568. "room_id": room_id,
  569. "receipt_type": receipt_type,
  570. "user_id": user_id,
  571. }
  572. where_clause = ""
  573. if thread_id is None:
  574. where_clause = "thread_id IS NULL"
  575. else:
  576. keyvalues["thread_id"] = thread_id
  577. self.db_pool.simple_upsert_txn(
  578. txn,
  579. table="receipts_linearized",
  580. keyvalues=keyvalues,
  581. values={
  582. "stream_id": stream_id,
  583. "event_id": event_id,
  584. "event_stream_ordering": stream_ordering,
  585. "data": json_encoder.encode(data),
  586. },
  587. where_clause=where_clause,
  588. )
  589. return rx_ts
  590. def _graph_to_linear(
  591. self, txn: LoggingTransaction, room_id: str, event_ids: List[str]
  592. ) -> str:
  593. """
  594. Generate a linearized event from a list of events (i.e. a list of forward
  595. extremities in the room).
  596. This should allow for calculation of the correct read receipt even if
  597. servers have different event ordering.
  598. Args:
  599. txn: The transaction
  600. room_id: The room ID the events are in.
  601. event_ids: The list of event IDs to linearize.
  602. Returns:
  603. The linearized event ID.
  604. """
  605. # TODO: Make this better.
  606. clause, args = make_in_list_sql_clause(
  607. self.database_engine, "event_id", event_ids
  608. )
  609. sql = """
  610. SELECT event_id WHERE room_id = ? AND stream_ordering IN (
  611. SELECT max(stream_ordering) WHERE %s
  612. )
  613. """ % (
  614. clause,
  615. )
  616. txn.execute(sql, [room_id] + list(args))
  617. rows = txn.fetchall()
  618. if rows:
  619. return rows[0][0]
  620. else:
  621. raise RuntimeError("Unrecognized event_ids: %r" % (event_ids,))
  622. async def insert_receipt(
  623. self,
  624. room_id: str,
  625. receipt_type: str,
  626. user_id: str,
  627. event_ids: List[str],
  628. thread_id: Optional[str],
  629. data: dict,
  630. ) -> Optional[Tuple[int, int]]:
  631. """Insert a receipt, either from local client or remote server.
  632. Automatically does conversion between linearized and graph
  633. representations.
  634. Returns:
  635. The new receipts stream ID and token, if the receipt is newer than
  636. what was previously persisted. None, otherwise.
  637. """
  638. assert self._can_write_to_receipts
  639. if not event_ids:
  640. return None
  641. if len(event_ids) == 1:
  642. linearized_event_id = event_ids[0]
  643. else:
  644. # we need to points in graph -> linearized form.
  645. linearized_event_id = await self.db_pool.runInteraction(
  646. "insert_receipt_conv", self._graph_to_linear, room_id, event_ids
  647. )
  648. async with self._receipts_id_gen.get_next() as stream_id: # type: ignore[attr-defined]
  649. event_ts = await self.db_pool.runInteraction(
  650. "insert_linearized_receipt",
  651. self._insert_linearized_receipt_txn,
  652. room_id,
  653. receipt_type,
  654. user_id,
  655. linearized_event_id,
  656. thread_id,
  657. data,
  658. stream_id=stream_id,
  659. # Read committed is actually beneficial here because we check for a receipt with
  660. # greater stream order, and checking the very latest data at select time is better
  661. # than the data at transaction start time.
  662. isolation_level=IsolationLevel.READ_COMMITTED,
  663. )
  664. # If the receipt was older than the currently persisted one, nothing to do.
  665. if event_ts is None:
  666. return None
  667. now = self._clock.time_msec()
  668. logger.debug(
  669. "Receipt %s for event %s in %s (%i ms old)",
  670. receipt_type,
  671. linearized_event_id,
  672. room_id,
  673. now - event_ts,
  674. )
  675. await self.db_pool.runInteraction(
  676. "insert_graph_receipt",
  677. self._insert_graph_receipt_txn,
  678. room_id,
  679. receipt_type,
  680. user_id,
  681. event_ids,
  682. thread_id,
  683. data,
  684. )
  685. max_persisted_id = self._receipts_id_gen.get_current_token()
  686. return stream_id, max_persisted_id
  687. def _insert_graph_receipt_txn(
  688. self,
  689. txn: LoggingTransaction,
  690. room_id: str,
  691. receipt_type: str,
  692. user_id: str,
  693. event_ids: List[str],
  694. thread_id: Optional[str],
  695. data: JsonDict,
  696. ) -> None:
  697. assert self._can_write_to_receipts
  698. txn.call_after(
  699. self._get_receipts_for_user_with_orderings.invalidate,
  700. (user_id, receipt_type),
  701. )
  702. # FIXME: This shouldn't invalidate the whole cache
  703. txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,))
  704. keyvalues = {
  705. "room_id": room_id,
  706. "receipt_type": receipt_type,
  707. "user_id": user_id,
  708. }
  709. where_clause = ""
  710. if thread_id is None:
  711. where_clause = "thread_id IS NULL"
  712. else:
  713. keyvalues["thread_id"] = thread_id
  714. self.db_pool.simple_upsert_txn(
  715. txn,
  716. table="receipts_graph",
  717. keyvalues=keyvalues,
  718. values={
  719. "event_ids": json_encoder.encode(event_ids),
  720. "data": json_encoder.encode(data),
  721. },
  722. where_clause=where_clause,
  723. )
  724. class ReceiptsBackgroundUpdateStore(SQLBaseStore):
  725. POPULATE_RECEIPT_EVENT_STREAM_ORDERING = "populate_event_stream_ordering"
  726. RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME = "receipts_linearized_unique_index"
  727. RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME = "receipts_graph_unique_index"
  728. def __init__(
  729. self,
  730. database: DatabasePool,
  731. db_conn: LoggingDatabaseConnection,
  732. hs: "HomeServer",
  733. ):
  734. super().__init__(database, db_conn, hs)
  735. self.db_pool.updates.register_background_update_handler(
  736. self.POPULATE_RECEIPT_EVENT_STREAM_ORDERING,
  737. self._populate_receipt_event_stream_ordering,
  738. )
  739. self.db_pool.updates.register_background_update_handler(
  740. self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME,
  741. self._background_receipts_linearized_unique_index,
  742. )
  743. self.db_pool.updates.register_background_update_handler(
  744. self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME,
  745. self._background_receipts_graph_unique_index,
  746. )
  747. async def _populate_receipt_event_stream_ordering(
  748. self, progress: JsonDict, batch_size: int
  749. ) -> int:
  750. def _populate_receipt_event_stream_ordering_txn(
  751. txn: LoggingTransaction,
  752. ) -> bool:
  753. if "max_stream_id" in progress:
  754. max_stream_id = progress["max_stream_id"]
  755. else:
  756. txn.execute("SELECT max(stream_id) FROM receipts_linearized")
  757. res = txn.fetchone()
  758. if res is None or res[0] is None:
  759. return True
  760. else:
  761. max_stream_id = res[0]
  762. start = progress.get("stream_id", 0)
  763. stop = start + batch_size
  764. sql = """
  765. UPDATE receipts_linearized
  766. SET event_stream_ordering = (
  767. SELECT stream_ordering
  768. FROM events
  769. WHERE event_id = receipts_linearized.event_id
  770. )
  771. WHERE stream_id >= ? AND stream_id < ?
  772. """
  773. txn.execute(sql, (start, stop))
  774. self.db_pool.updates._background_update_progress_txn(
  775. txn,
  776. self.POPULATE_RECEIPT_EVENT_STREAM_ORDERING,
  777. {
  778. "stream_id": stop,
  779. "max_stream_id": max_stream_id,
  780. },
  781. )
  782. return stop > max_stream_id
  783. finished = await self.db_pool.runInteraction(
  784. "_remove_devices_from_device_inbox_txn",
  785. _populate_receipt_event_stream_ordering_txn,
  786. )
  787. if finished:
  788. await self.db_pool.updates._end_background_update(
  789. self.POPULATE_RECEIPT_EVENT_STREAM_ORDERING
  790. )
  791. return batch_size
  792. async def _background_receipts_linearized_unique_index(
  793. self, progress: dict, batch_size: int
  794. ) -> int:
  795. """Removes duplicate receipts and adds a unique index on
  796. `(room_id, receipt_type, user_id)` to `receipts_linearized`, for non-thread
  797. receipts."""
  798. def _remote_duplicate_receipts_txn(txn: LoggingTransaction) -> None:
  799. if isinstance(self.database_engine, PostgresEngine):
  800. ROW_ID_NAME = "ctid"
  801. else:
  802. ROW_ID_NAME = "rowid"
  803. # Identify any duplicate receipts arising from
  804. # https://github.com/matrix-org/synapse/issues/14406.
  805. # The following query takes less than a minute on matrix.org.
  806. sql = """
  807. SELECT MAX(stream_id), room_id, receipt_type, user_id
  808. FROM receipts_linearized
  809. WHERE thread_id IS NULL
  810. GROUP BY room_id, receipt_type, user_id
  811. HAVING COUNT(*) > 1
  812. """
  813. txn.execute(sql)
  814. duplicate_keys = cast(List[Tuple[int, str, str, str]], list(txn))
  815. # Then remove duplicate receipts, keeping the one with the highest
  816. # `stream_id`. Since there might be duplicate rows with the same
  817. # `stream_id`, we delete by the ctid instead.
  818. for stream_id, room_id, receipt_type, user_id in duplicate_keys:
  819. sql = f"""
  820. SELECT {ROW_ID_NAME}
  821. FROM receipts_linearized
  822. WHERE
  823. room_id = ? AND
  824. receipt_type = ? AND
  825. user_id = ? AND
  826. thread_id IS NULL AND
  827. stream_id = ?
  828. LIMIT 1
  829. """
  830. txn.execute(sql, (room_id, receipt_type, user_id, stream_id))
  831. row_id = cast(Tuple[str], txn.fetchone())[0]
  832. sql = f"""
  833. DELETE FROM receipts_linearized
  834. WHERE
  835. room_id = ? AND
  836. receipt_type = ? AND
  837. user_id = ? AND
  838. thread_id IS NULL AND
  839. {ROW_ID_NAME} != ?
  840. """
  841. txn.execute(sql, (room_id, receipt_type, user_id, row_id))
  842. await self.db_pool.runInteraction(
  843. self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME,
  844. _remote_duplicate_receipts_txn,
  845. )
  846. await self.db_pool.updates.create_index_in_background(
  847. index_name="receipts_linearized_unique_index",
  848. table="receipts_linearized",
  849. columns=["room_id", "receipt_type", "user_id"],
  850. where_clause="thread_id IS NULL",
  851. unique=True,
  852. )
  853. await self.db_pool.updates._end_background_update(
  854. self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME
  855. )
  856. return 1
  857. async def _background_receipts_graph_unique_index(
  858. self, progress: dict, batch_size: int
  859. ) -> int:
  860. """Removes duplicate receipts and adds a unique index on
  861. `(room_id, receipt_type, user_id)` to `receipts_graph`, for non-thread
  862. receipts."""
  863. def _remote_duplicate_receipts_txn(txn: LoggingTransaction) -> None:
  864. # Identify any duplicate receipts arising from
  865. # https://github.com/matrix-org/synapse/issues/14406.
  866. # We expect the following query to use the per-thread receipt index and take
  867. # less than a minute.
  868. sql = """
  869. SELECT room_id, receipt_type, user_id FROM receipts_graph
  870. WHERE thread_id IS NULL
  871. GROUP BY room_id, receipt_type, user_id
  872. HAVING COUNT(*) > 1
  873. """
  874. txn.execute(sql)
  875. duplicate_keys = cast(List[Tuple[str, str, str]], list(txn))
  876. # Then remove all duplicate receipts.
  877. # We could be clever and try to keep the latest receipt out of every set of
  878. # duplicates, but it's far simpler to remove them all.
  879. for room_id, receipt_type, user_id in duplicate_keys:
  880. sql = """
  881. DELETE FROM receipts_graph
  882. WHERE
  883. room_id = ? AND
  884. receipt_type = ? AND
  885. user_id = ? AND
  886. thread_id IS NULL
  887. """
  888. txn.execute(sql, (room_id, receipt_type, user_id))
  889. await self.db_pool.runInteraction(
  890. self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME,
  891. _remote_duplicate_receipts_txn,
  892. )
  893. await self.db_pool.updates.create_index_in_background(
  894. index_name="receipts_graph_unique_index",
  895. table="receipts_graph",
  896. columns=["room_id", "receipt_type", "user_id"],
  897. where_clause="thread_id IS NULL",
  898. unique=True,
  899. )
  900. await self.db_pool.updates._end_background_update(
  901. self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME
  902. )
  903. return 1
  904. class ReceiptsStore(ReceiptsWorkerStore, ReceiptsBackgroundUpdateStore):
  905. pass