Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.
 
 
 
 
 
 

1560 lignes
57 KiB

  1. # Copyright 2015, 2016 OpenMarket Ltd
  2. # Copyright 2019 New Vector Ltd
  3. # Copyright 2019,2020 The Matrix.org Foundation C.I.C.
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. import abc
  17. from typing import (
  18. TYPE_CHECKING,
  19. Any,
  20. Collection,
  21. Dict,
  22. Iterable,
  23. List,
  24. Mapping,
  25. Optional,
  26. Sequence,
  27. Tuple,
  28. Union,
  29. cast,
  30. overload,
  31. )
  32. import attr
  33. from canonicaljson import encode_canonical_json
  34. from typing_extensions import Literal
  35. from synapse.api.constants import DeviceKeyAlgorithms
  36. from synapse.appservice import (
  37. TransactionOneTimeKeysCount,
  38. TransactionUnusedFallbackKeys,
  39. )
  40. from synapse.logging.opentracing import log_kv, set_tag, trace
  41. from synapse.replication.tcp.streams._base import DeviceListsStream
  42. from synapse.storage._base import SQLBaseStore, db_to_json
  43. from synapse.storage.database import (
  44. DatabasePool,
  45. LoggingDatabaseConnection,
  46. LoggingTransaction,
  47. make_in_list_sql_clause,
  48. make_tuple_in_list_sql_clause,
  49. )
  50. from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
  51. from synapse.storage.engines import PostgresEngine
  52. from synapse.storage.util.id_generators import StreamIdGenerator
  53. from synapse.types import JsonDict, JsonMapping
  54. from synapse.util import json_decoder, json_encoder
  55. from synapse.util.caches.descriptors import cached, cachedList
  56. from synapse.util.cancellation import cancellable
  57. from synapse.util.iterutils import batch_iter
  58. if TYPE_CHECKING:
  59. from synapse.handlers.e2e_keys import SignatureListItem
  60. from synapse.server import HomeServer
  61. @attr.s(slots=True, auto_attribs=True)
  62. class DeviceKeyLookupResult:
  63. """The type returned by get_e2e_device_keys_and_signatures"""
  64. display_name: Optional[str]
  65. # the key data from e2e_device_keys_json. Typically includes fields like
  66. # "algorithm", "keys" (including the curve25519 identity key and the ed25519 signing
  67. # key) and "signatures" (a map from (user id) to (key id/device_id) to signature.)
  68. keys: Optional[JsonDict]
  69. class EndToEndKeyBackgroundStore(SQLBaseStore):
  70. def __init__(
  71. self,
  72. database: DatabasePool,
  73. db_conn: LoggingDatabaseConnection,
  74. hs: "HomeServer",
  75. ):
  76. super().__init__(database, db_conn, hs)
  77. self.db_pool.updates.register_background_index_update(
  78. "e2e_cross_signing_keys_idx",
  79. index_name="e2e_cross_signing_keys_stream_idx",
  80. table="e2e_cross_signing_keys",
  81. columns=["stream_id"],
  82. unique=True,
  83. )
  84. class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorkerStore):
  85. def __init__(
  86. self,
  87. database: DatabasePool,
  88. db_conn: LoggingDatabaseConnection,
  89. hs: "HomeServer",
  90. ):
  91. super().__init__(database, db_conn, hs)
  92. self._allow_device_name_lookup_over_federation = (
  93. self.hs.config.federation.allow_device_name_lookup_over_federation
  94. )
  95. def process_replication_rows(
  96. self,
  97. stream_name: str,
  98. instance_name: str,
  99. token: int,
  100. rows: Iterable[Any],
  101. ) -> None:
  102. if stream_name == DeviceListsStream.NAME:
  103. for row in rows:
  104. assert isinstance(row, DeviceListsStream.DeviceListsStreamRow)
  105. if row.entity.startswith("@"):
  106. self._get_e2e_device_keys_for_federation_query_inner.invalidate(
  107. (row.entity,)
  108. )
  109. super().process_replication_rows(stream_name, instance_name, token, rows)
  110. async def get_e2e_device_keys_for_federation_query(
  111. self, user_id: str
  112. ) -> Tuple[int, Sequence[JsonMapping]]:
  113. """Get all devices (with any device keys) for a user
  114. Returns:
  115. (stream_id, devices)
  116. """
  117. now_stream_id = self.get_device_stream_token()
  118. # We need to be careful with the caching here, as we need to always
  119. # return *all* persisted devices, however there may be a lag between a
  120. # new device being persisted and the cache being invalidated.
  121. cached_results = (
  122. self._get_e2e_device_keys_for_federation_query_inner.cache.get_immediate(
  123. user_id, None
  124. )
  125. )
  126. if cached_results is not None:
  127. # Check that there have been no new devices added by another worker
  128. # after the cache. This should be quick as there should be few rows
  129. # with a higher stream ordering.
  130. #
  131. # Note that we invalidate based on the device stream, so we only
  132. # have to check for potential invalidations after the
  133. # `now_stream_id`.
  134. sql = """
  135. SELECT user_id FROM device_lists_stream
  136. WHERE stream_id >= ? AND user_id = ?
  137. """
  138. rows = await self.db_pool.execute(
  139. "get_e2e_device_keys_for_federation_query_check",
  140. None,
  141. sql,
  142. now_stream_id,
  143. user_id,
  144. )
  145. if not rows:
  146. # No new rows, so cache is still valid.
  147. return now_stream_id, cached_results
  148. # There has, so let's invalidate the cache and run the query.
  149. self._get_e2e_device_keys_for_federation_query_inner.invalidate((user_id,))
  150. results = await self._get_e2e_device_keys_for_federation_query_inner(user_id)
  151. return now_stream_id, results
  152. @cached(iterable=True)
  153. async def _get_e2e_device_keys_for_federation_query_inner(
  154. self, user_id: str
  155. ) -> Sequence[JsonMapping]:
  156. """Get all devices (with any device keys) for a user"""
  157. devices = await self.get_e2e_device_keys_and_signatures([(user_id, None)])
  158. if devices:
  159. user_devices = devices[user_id]
  160. results = []
  161. for device_id, device in user_devices.items():
  162. result: JsonDict = {"device_id": device_id}
  163. keys = device.keys
  164. if keys:
  165. result["keys"] = keys
  166. device_display_name = None
  167. if self._allow_device_name_lookup_over_federation:
  168. device_display_name = device.display_name
  169. if device_display_name:
  170. result["device_display_name"] = device_display_name
  171. results.append(result)
  172. return results
  173. return []
  174. @trace
  175. @cancellable
  176. async def get_e2e_device_keys_for_cs_api(
  177. self,
  178. query_list: Collection[Tuple[str, Optional[str]]],
  179. include_displaynames: bool = True,
  180. ) -> Dict[str, Dict[str, JsonDict]]:
  181. """Fetch a list of device keys, formatted suitably for the C/S API.
  182. Args:
  183. query_list: List of pairs of user_ids and device_ids.
  184. include_displaynames: Whether to include the displayname of returned devices
  185. (if one exists).
  186. Returns:
  187. Dict mapping from user-id to dict mapping from device_id to
  188. key data. The key data will be a dict in the same format as the
  189. DeviceKeys type returned by POST /_matrix/client/r0/keys/query.
  190. """
  191. set_tag("query_list", str(query_list))
  192. if not query_list:
  193. return {}
  194. results = await self.get_e2e_device_keys_and_signatures(query_list)
  195. # Build the result structure, un-jsonify the results, and add the
  196. # "unsigned" section
  197. rv: Dict[str, Dict[str, JsonDict]] = {}
  198. for user_id, device_keys in results.items():
  199. rv[user_id] = {}
  200. for device_id, device_info in device_keys.items():
  201. r = device_info.keys
  202. if r is None:
  203. continue
  204. r["unsigned"] = {}
  205. if include_displaynames:
  206. # Include the device's display name in the "unsigned" dictionary
  207. display_name = device_info.display_name
  208. if display_name is not None:
  209. r["unsigned"]["device_display_name"] = display_name
  210. rv[user_id][device_id] = r
  211. return rv
  212. @overload
  213. async def get_e2e_device_keys_and_signatures(
  214. self,
  215. query_list: Collection[Tuple[str, Optional[str]]],
  216. include_all_devices: Literal[False] = False,
  217. ) -> Dict[str, Dict[str, DeviceKeyLookupResult]]:
  218. ...
  219. @overload
  220. async def get_e2e_device_keys_and_signatures(
  221. self,
  222. query_list: Collection[Tuple[str, Optional[str]]],
  223. include_all_devices: bool = False,
  224. include_deleted_devices: Literal[False] = False,
  225. ) -> Dict[str, Dict[str, DeviceKeyLookupResult]]:
  226. ...
  227. @overload
  228. async def get_e2e_device_keys_and_signatures(
  229. self,
  230. query_list: Collection[Tuple[str, Optional[str]]],
  231. include_all_devices: Literal[True],
  232. include_deleted_devices: Literal[True],
  233. ) -> Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]]:
  234. ...
  235. @trace
  236. @cancellable
  237. async def get_e2e_device_keys_and_signatures(
  238. self,
  239. query_list: Collection[Tuple[str, Optional[str]]],
  240. include_all_devices: bool = False,
  241. include_deleted_devices: bool = False,
  242. ) -> Union[
  243. Dict[str, Dict[str, DeviceKeyLookupResult]],
  244. Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]],
  245. ]:
  246. """Fetch a list of device keys
  247. Any cross-signatures made on the keys by the owner of the device are also
  248. included.
  249. The cross-signatures are added to the `signatures` field within the `keys`
  250. object in the response.
  251. Args:
  252. query_list: List of pairs of user_ids and device_ids. Device id can be None
  253. to indicate "all devices for this user"
  254. include_all_devices: whether to return devices without device keys
  255. include_deleted_devices: whether to include null entries for
  256. devices which no longer exist (but were in the query_list).
  257. This option only takes effect if include_all_devices is true.
  258. Returns:
  259. Dict mapping from user-id to dict mapping from device_id to
  260. key data.
  261. """
  262. set_tag("include_all_devices", include_all_devices)
  263. set_tag("include_deleted_devices", include_deleted_devices)
  264. result = await self._get_e2e_device_keys(
  265. query_list,
  266. include_all_devices,
  267. include_deleted_devices,
  268. )
  269. # get the (user_id, device_id) tuples to look up cross-signatures for
  270. signature_query = (
  271. (user_id, device_id)
  272. for user_id, dev in result.items()
  273. for device_id, d in dev.items()
  274. if d is not None and d.keys is not None
  275. )
  276. for batch in batch_iter(signature_query, 50):
  277. cross_sigs_result = await self.db_pool.runInteraction(
  278. "get_e2e_cross_signing_signatures_for_devices",
  279. self._get_e2e_cross_signing_signatures_for_devices_txn,
  280. batch,
  281. )
  282. # add each cross-signing signature to the correct device in the result dict.
  283. for user_id, key_id, device_id, signature in cross_sigs_result:
  284. target_device_result = result[user_id][device_id]
  285. # We've only looked up cross-signatures for non-deleted devices with key
  286. # data.
  287. assert target_device_result is not None
  288. assert target_device_result.keys is not None
  289. target_device_signatures = target_device_result.keys.setdefault(
  290. "signatures", {}
  291. )
  292. signing_user_signatures = target_device_signatures.setdefault(
  293. user_id, {}
  294. )
  295. signing_user_signatures[key_id] = signature
  296. log_kv(result)
  297. return result
  298. async def _get_e2e_device_keys(
  299. self,
  300. query_list: Collection[Tuple[str, Optional[str]]],
  301. include_all_devices: bool = False,
  302. include_deleted_devices: bool = False,
  303. ) -> Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]]:
  304. """Get information on devices from the database
  305. The results include the device's keys and self-signatures, but *not* any
  306. cross-signing signatures which have been added subsequently (for which, see
  307. get_e2e_device_keys_and_signatures)
  308. """
  309. query_clauses: List[str] = []
  310. query_params_list: List[List[object]] = []
  311. if include_all_devices is False:
  312. include_deleted_devices = False
  313. if include_deleted_devices:
  314. deleted_devices = set(query_list)
  315. # Split the query list into queries for users and queries for particular
  316. # devices.
  317. user_list = []
  318. user_device_list = []
  319. for user_id, device_id in query_list:
  320. if device_id is None:
  321. user_list.append(user_id)
  322. else:
  323. user_device_list.append((user_id, device_id))
  324. if user_list:
  325. user_id_in_list_clause, user_args = make_in_list_sql_clause(
  326. self.database_engine, "user_id", user_list
  327. )
  328. query_clauses.append(user_id_in_list_clause)
  329. query_params_list.append(user_args)
  330. if user_device_list:
  331. # Divide the device queries into batches, to avoid excessively large
  332. # queries.
  333. for user_device_batch in batch_iter(user_device_list, 1024):
  334. (
  335. user_device_id_in_list_clause,
  336. user_device_args,
  337. ) = make_tuple_in_list_sql_clause(
  338. self.database_engine, ("user_id", "device_id"), user_device_batch
  339. )
  340. query_clauses.append(user_device_id_in_list_clause)
  341. query_params_list.append(user_device_args)
  342. result: Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]] = {}
  343. def get_e2e_device_keys_txn(
  344. txn: LoggingTransaction, query_clause: str, query_params: list
  345. ) -> None:
  346. sql = (
  347. "SELECT user_id, device_id, "
  348. " d.display_name, "
  349. " k.key_json"
  350. " FROM devices d"
  351. " %s JOIN e2e_device_keys_json k USING (user_id, device_id)"
  352. " WHERE %s AND NOT d.hidden"
  353. ) % (
  354. "LEFT" if include_all_devices else "INNER",
  355. query_clause,
  356. )
  357. txn.execute(sql, query_params)
  358. for user_id, device_id, display_name, key_json in txn:
  359. assert device_id is not None
  360. if include_deleted_devices:
  361. deleted_devices.remove((user_id, device_id))
  362. result.setdefault(user_id, {})[device_id] = DeviceKeyLookupResult(
  363. display_name, db_to_json(key_json) if key_json else None
  364. )
  365. for query_clause, query_params in zip(query_clauses, query_params_list):
  366. await self.db_pool.runInteraction(
  367. "_get_e2e_device_keys",
  368. get_e2e_device_keys_txn,
  369. query_clause,
  370. query_params,
  371. )
  372. if include_deleted_devices:
  373. for user_id, device_id in deleted_devices:
  374. if device_id is None:
  375. continue
  376. result.setdefault(user_id, {})[device_id] = None
  377. return result
  378. def _get_e2e_cross_signing_signatures_for_devices_txn(
  379. self, txn: LoggingTransaction, device_query: Iterable[Tuple[str, str]]
  380. ) -> List[Tuple[str, str, str, str]]:
  381. """Get cross-signing signatures for a given list of devices
  382. Returns signatures made by the owners of the devices.
  383. Returns: a list of results; each entry in the list is a tuple of
  384. (user_id, key_id, target_device_id, signature).
  385. """
  386. signature_query_clauses = []
  387. signature_query_params = []
  388. for user_id, device_id in device_query:
  389. signature_query_clauses.append(
  390. "target_user_id = ? AND target_device_id = ? AND user_id = ?"
  391. )
  392. signature_query_params.extend([user_id, device_id, user_id])
  393. signature_sql = """
  394. SELECT user_id, key_id, target_device_id, signature
  395. FROM e2e_cross_signing_signatures WHERE %s
  396. """ % (
  397. " OR ".join("(" + q + ")" for q in signature_query_clauses)
  398. )
  399. txn.execute(signature_sql, signature_query_params)
  400. return cast(
  401. List[
  402. Tuple[
  403. str,
  404. str,
  405. str,
  406. str,
  407. ]
  408. ],
  409. txn.fetchall(),
  410. )
  411. async def get_e2e_one_time_keys(
  412. self, user_id: str, device_id: str, key_ids: List[str]
  413. ) -> Dict[Tuple[str, str], str]:
  414. """Retrieve a number of one-time keys for a user
  415. Args:
  416. user_id: id of user to get keys for
  417. device_id: id of device to get keys for
  418. key_ids: list of key ids (excluding algorithm) to retrieve
  419. Returns:
  420. A map from (algorithm, key_id) to json string for key
  421. """
  422. rows = await self.db_pool.simple_select_many_batch(
  423. table="e2e_one_time_keys_json",
  424. column="key_id",
  425. iterable=key_ids,
  426. retcols=("algorithm", "key_id", "key_json"),
  427. keyvalues={"user_id": user_id, "device_id": device_id},
  428. desc="add_e2e_one_time_keys_check",
  429. )
  430. result = {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows}
  431. log_kv({"message": "Fetched one time keys for user", "one_time_keys": result})
  432. return result
  433. async def add_e2e_one_time_keys(
  434. self,
  435. user_id: str,
  436. device_id: str,
  437. time_now: int,
  438. new_keys: Iterable[Tuple[str, str, str]],
  439. ) -> None:
  440. """Insert some new one time keys for a device. Errors if any of the
  441. keys already exist.
  442. Args:
  443. user_id: id of user to get keys for
  444. device_id: id of device to get keys for
  445. time_now: insertion time to record (ms since epoch)
  446. new_keys: keys to add - each a tuple of (algorithm, key_id, key json)
  447. """
  448. await self.db_pool.runInteraction(
  449. "add_e2e_one_time_keys_insert",
  450. self._add_e2e_one_time_keys_txn,
  451. user_id,
  452. device_id,
  453. time_now,
  454. new_keys,
  455. )
  456. def _add_e2e_one_time_keys_txn(
  457. self,
  458. txn: LoggingTransaction,
  459. user_id: str,
  460. device_id: str,
  461. time_now: int,
  462. new_keys: Iterable[Tuple[str, str, str]],
  463. ) -> None:
  464. """Insert some new one time keys for a device. Errors if any of the keys already exist.
  465. Args:
  466. user_id: id of user to get keys for
  467. device_id: id of device to get keys for
  468. time_now: insertion time to record (ms since epoch)
  469. new_keys: keys to add - each a tuple of (algorithm, key_id, key json) - note
  470. that the key JSON must be in canonical JSON form
  471. """
  472. set_tag("user_id", user_id)
  473. set_tag("device_id", device_id)
  474. set_tag("new_keys", str(new_keys))
  475. # We are protected from race between lookup and insertion due to
  476. # a unique constraint. If there is a race of two calls to
  477. # `add_e2e_one_time_keys` then they'll conflict and we will only
  478. # insert one set.
  479. self.db_pool.simple_insert_many_txn(
  480. txn,
  481. table="e2e_one_time_keys_json",
  482. keys=(
  483. "user_id",
  484. "device_id",
  485. "algorithm",
  486. "key_id",
  487. "ts_added_ms",
  488. "key_json",
  489. ),
  490. values=[
  491. (user_id, device_id, algorithm, key_id, time_now, json_bytes)
  492. for algorithm, key_id, json_bytes in new_keys
  493. ],
  494. )
  495. self._invalidate_cache_and_stream(
  496. txn, self.count_e2e_one_time_keys, (user_id, device_id)
  497. )
  498. @cached(max_entries=10000)
  499. async def count_e2e_one_time_keys(
  500. self, user_id: str, device_id: str
  501. ) -> Mapping[str, int]:
  502. """Count the number of one time keys the server has for a device
  503. Returns:
  504. A mapping from algorithm to number of keys for that algorithm.
  505. """
  506. def _count_e2e_one_time_keys(txn: LoggingTransaction) -> Dict[str, int]:
  507. sql = (
  508. "SELECT algorithm, COUNT(key_id) FROM e2e_one_time_keys_json"
  509. " WHERE user_id = ? AND device_id = ?"
  510. " GROUP BY algorithm"
  511. )
  512. txn.execute(sql, (user_id, device_id))
  513. # Initially set the key count to 0. This ensures that the client will always
  514. # receive *some count*, even if it's 0.
  515. result = {DeviceKeyAlgorithms.SIGNED_CURVE25519: 0}
  516. # Override entries with the count of any keys we pulled from the database
  517. for algorithm, key_count in txn:
  518. result[algorithm] = key_count
  519. return result
  520. return await self.db_pool.runInteraction(
  521. "count_e2e_one_time_keys", _count_e2e_one_time_keys
  522. )
  523. async def count_bulk_e2e_one_time_keys_for_as(
  524. self, user_ids: Collection[str]
  525. ) -> TransactionOneTimeKeysCount:
  526. """
  527. Counts, in bulk, the one-time keys for all the users specified.
  528. Intended to be used by application services for populating OTK counts in
  529. transactions.
  530. Return structure is of the shape:
  531. user_id -> device_id -> algorithm -> count
  532. Empty algorithm -> count dicts are created if needed to represent a
  533. lack of unused one-time keys.
  534. """
  535. def _count_bulk_e2e_one_time_keys_txn(
  536. txn: LoggingTransaction,
  537. ) -> TransactionOneTimeKeysCount:
  538. user_in_where_clause, user_parameters = make_in_list_sql_clause(
  539. self.database_engine, "user_id", user_ids
  540. )
  541. sql = f"""
  542. SELECT user_id, device_id, algorithm, COUNT(key_id)
  543. FROM devices
  544. LEFT JOIN e2e_one_time_keys_json USING (user_id, device_id)
  545. WHERE {user_in_where_clause}
  546. GROUP BY user_id, device_id, algorithm
  547. """
  548. txn.execute(sql, user_parameters)
  549. result: TransactionOneTimeKeysCount = {}
  550. for user_id, device_id, algorithm, count in txn:
  551. # We deliberately construct empty dictionaries for
  552. # users and devices without any unused one-time keys.
  553. # We *could* omit these empty dicts if there have been no
  554. # changes since the last transaction, but we currently don't
  555. # do any change tracking!
  556. device_count_by_algo = result.setdefault(user_id, {}).setdefault(
  557. device_id, {}
  558. )
  559. if algorithm is not None:
  560. # algorithm will be None if this device has no keys.
  561. device_count_by_algo[algorithm] = count
  562. return result
  563. return await self.db_pool.runInteraction(
  564. "count_bulk_e2e_one_time_keys", _count_bulk_e2e_one_time_keys_txn
  565. )
  566. async def get_e2e_bulk_unused_fallback_key_types(
  567. self, user_ids: Collection[str]
  568. ) -> TransactionUnusedFallbackKeys:
  569. """
  570. Finds, in bulk, the types of unused fallback keys for all the users specified.
  571. Intended to be used by application services for populating unused fallback
  572. keys in transactions.
  573. Return structure is of the shape:
  574. user_id -> device_id -> algorithms
  575. Empty lists are created for devices if there are no unused fallback
  576. keys. This matches the response structure of MSC3202.
  577. """
  578. if len(user_ids) == 0:
  579. return {}
  580. def _get_bulk_e2e_unused_fallback_keys_txn(
  581. txn: LoggingTransaction,
  582. ) -> TransactionUnusedFallbackKeys:
  583. user_in_where_clause, user_parameters = make_in_list_sql_clause(
  584. self.database_engine, "devices.user_id", user_ids
  585. )
  586. # We can't use USING here because we require the `.used` condition
  587. # to be part of the JOIN condition so that we generate empty lists
  588. # when all keys are used (as opposed to just when there are no keys at all).
  589. sql = f"""
  590. SELECT devices.user_id, devices.device_id, algorithm
  591. FROM devices
  592. LEFT JOIN e2e_fallback_keys_json AS fallback_keys
  593. ON devices.user_id = fallback_keys.user_id
  594. AND devices.device_id = fallback_keys.device_id
  595. AND NOT fallback_keys.used
  596. WHERE
  597. {user_in_where_clause}
  598. """
  599. txn.execute(sql, user_parameters)
  600. result: TransactionUnusedFallbackKeys = {}
  601. for user_id, device_id, algorithm in txn:
  602. # We deliberately construct empty dictionaries and lists for
  603. # users and devices without any unused fallback keys.
  604. # We *could* omit these empty dicts if there have been no
  605. # changes since the last transaction, but we currently don't
  606. # do any change tracking!
  607. device_unused_keys = result.setdefault(user_id, {}).setdefault(
  608. device_id, []
  609. )
  610. if algorithm is not None:
  611. # algorithm will be None if this device has no keys.
  612. device_unused_keys.append(algorithm)
  613. return result
  614. return await self.db_pool.runInteraction(
  615. "_get_bulk_e2e_unused_fallback_keys", _get_bulk_e2e_unused_fallback_keys_txn
  616. )
  617. async def set_e2e_fallback_keys(
  618. self, user_id: str, device_id: str, fallback_keys: JsonDict
  619. ) -> None:
  620. """Set the user's e2e fallback keys.
  621. Args:
  622. user_id: the user whose keys are being set
  623. device_id: the device whose keys are being set
  624. fallback_keys: the keys to set. This is a map from key ID (which is
  625. of the form "algorithm:id") to key data.
  626. """
  627. await self.db_pool.runInteraction(
  628. "set_e2e_fallback_keys_txn",
  629. self._set_e2e_fallback_keys_txn,
  630. user_id,
  631. device_id,
  632. fallback_keys,
  633. )
  634. await self.invalidate_cache_and_stream(
  635. "get_e2e_unused_fallback_key_types", (user_id, device_id)
  636. )
  637. def _set_e2e_fallback_keys_txn(
  638. self,
  639. txn: LoggingTransaction,
  640. user_id: str,
  641. device_id: str,
  642. fallback_keys: JsonDict,
  643. ) -> None:
  644. """Set the user's e2e fallback keys.
  645. Args:
  646. user_id: the user whose keys are being set
  647. device_id: the device whose keys are being set
  648. fallback_keys: the keys to set. This is a map from key ID (which is
  649. of the form "algorithm:id") to key data.
  650. """
  651. # fallback_keys will usually only have one item in it, so using a for
  652. # loop (as opposed to calling simple_upsert_many_txn) won't be too bad
  653. # FIXME: make sure that only one key per algorithm is uploaded
  654. for key_id, fallback_key in fallback_keys.items():
  655. algorithm, key_id = key_id.split(":", 1)
  656. old_key_json = self.db_pool.simple_select_one_onecol_txn(
  657. txn,
  658. table="e2e_fallback_keys_json",
  659. keyvalues={
  660. "user_id": user_id,
  661. "device_id": device_id,
  662. "algorithm": algorithm,
  663. },
  664. retcol="key_json",
  665. allow_none=True,
  666. )
  667. new_key_json = encode_canonical_json(fallback_key).decode("utf-8")
  668. # If the uploaded key is the same as the current fallback key,
  669. # don't do anything. This prevents marking the key as unused if it
  670. # was already used.
  671. if old_key_json != new_key_json:
  672. self.db_pool.simple_upsert_txn(
  673. txn,
  674. table="e2e_fallback_keys_json",
  675. keyvalues={
  676. "user_id": user_id,
  677. "device_id": device_id,
  678. "algorithm": algorithm,
  679. },
  680. values={
  681. "key_id": key_id,
  682. "key_json": json_encoder.encode(fallback_key),
  683. "used": False,
  684. },
  685. )
  686. @cached(max_entries=10000)
  687. async def get_e2e_unused_fallback_key_types(
  688. self, user_id: str, device_id: str
  689. ) -> Sequence[str]:
  690. """Returns the fallback key types that have an unused key.
  691. Args:
  692. user_id: the user whose keys are being queried
  693. device_id: the device whose keys are being queried
  694. Returns:
  695. a list of key types
  696. """
  697. return await self.db_pool.simple_select_onecol(
  698. "e2e_fallback_keys_json",
  699. keyvalues={"user_id": user_id, "device_id": device_id, "used": False},
  700. retcol="algorithm",
  701. desc="get_e2e_unused_fallback_key_types",
  702. )
  703. async def get_e2e_cross_signing_key(
  704. self, user_id: str, key_type: str, from_user_id: Optional[str] = None
  705. ) -> Optional[JsonMapping]:
  706. """Returns a user's cross-signing key.
  707. Args:
  708. user_id: the user whose key is being requested
  709. key_type: the type of key that is being requested: either 'master'
  710. for a master key, 'self_signing' for a self-signing key, or
  711. 'user_signing' for a user-signing key
  712. from_user_id: if specified, signatures made by this user on
  713. the self-signing key will be included in the result
  714. Returns:
  715. dict of the key data or None if not found
  716. """
  717. res = await self.get_e2e_cross_signing_keys_bulk([user_id], from_user_id)
  718. user_keys = res.get(user_id)
  719. if not user_keys:
  720. return None
  721. return user_keys.get(key_type)
  722. @cached(num_args=1)
  723. def _get_bare_e2e_cross_signing_keys(
  724. self, user_id: str
  725. ) -> Mapping[str, JsonMapping]:
  726. """Dummy function. Only used to make a cache for
  727. _get_bare_e2e_cross_signing_keys_bulk.
  728. """
  729. raise NotImplementedError()
  730. @cachedList(
  731. cached_method_name="_get_bare_e2e_cross_signing_keys",
  732. list_name="user_ids",
  733. num_args=1,
  734. )
  735. async def _get_bare_e2e_cross_signing_keys_bulk(
  736. self, user_ids: Iterable[str]
  737. ) -> Mapping[str, Optional[Mapping[str, JsonMapping]]]:
  738. """Returns the cross-signing keys for a set of users. The output of this
  739. function should be passed to _get_e2e_cross_signing_signatures_txn if
  740. the signatures for the calling user need to be fetched.
  741. Args:
  742. user_ids: the users whose keys are being requested
  743. Returns:
  744. A mapping from user ID to key type to key data. If a user's cross-signing
  745. keys were not found, either their user ID will not be in the dict, or
  746. their user ID will map to None.
  747. """
  748. return await self.db_pool.runInteraction(
  749. "get_bare_e2e_cross_signing_keys_bulk",
  750. self._get_bare_e2e_cross_signing_keys_bulk_txn,
  751. user_ids,
  752. )
  753. def _get_bare_e2e_cross_signing_keys_bulk_txn(
  754. self,
  755. txn: LoggingTransaction,
  756. user_ids: Iterable[str],
  757. ) -> Dict[str, Dict[str, JsonDict]]:
  758. """Returns the cross-signing keys for a set of users. The output of this
  759. function should be passed to _get_e2e_cross_signing_signatures_txn if
  760. the signatures for the calling user need to be fetched.
  761. Args:
  762. txn: db connection
  763. user_ids: the users whose keys are being requested
  764. Returns:
  765. Mapping from user ID to key type to key data.
  766. If a user's cross-signing keys were not found, their user ID will not be in
  767. the dict.
  768. """
  769. result: Dict[str, Dict[str, JsonDict]] = {}
  770. for user_chunk in batch_iter(user_ids, 100):
  771. clause, params = make_in_list_sql_clause(
  772. txn.database_engine, "user_id", user_chunk
  773. )
  774. # Fetch the latest key for each type per user.
  775. if isinstance(self.database_engine, PostgresEngine):
  776. # The `DISTINCT ON` clause will pick the *first* row it
  777. # encounters, so ordering by stream ID desc will ensure we get
  778. # the latest key.
  779. sql = """
  780. SELECT DISTINCT ON (user_id, keytype) user_id, keytype, keydata, stream_id
  781. FROM e2e_cross_signing_keys
  782. WHERE %(clause)s
  783. ORDER BY user_id, keytype, stream_id DESC
  784. """ % {
  785. "clause": clause
  786. }
  787. else:
  788. # SQLite has special handling for bare columns when using
  789. # MIN/MAX with a `GROUP BY` clause where it picks the value from
  790. # a row that matches the MIN/MAX.
  791. sql = """
  792. SELECT user_id, keytype, keydata, MAX(stream_id)
  793. FROM e2e_cross_signing_keys
  794. WHERE %(clause)s
  795. GROUP BY user_id, keytype
  796. """ % {
  797. "clause": clause
  798. }
  799. txn.execute(sql, params)
  800. rows = self.db_pool.cursor_to_dict(txn)
  801. for row in rows:
  802. user_id = row["user_id"]
  803. key_type = row["keytype"]
  804. key = db_to_json(row["keydata"])
  805. user_keys = result.setdefault(user_id, {})
  806. user_keys[key_type] = key
  807. return result
  808. def _get_e2e_cross_signing_signatures_txn(
  809. self,
  810. txn: LoggingTransaction,
  811. keys: Dict[str, Optional[Dict[str, JsonDict]]],
  812. from_user_id: str,
  813. ) -> Dict[str, Optional[Dict[str, JsonDict]]]:
  814. """Returns the cross-signing signatures made by a user on a set of keys.
  815. Args:
  816. txn: db connection
  817. keys: a map of user ID to key type to key data.
  818. This dict will be modified to add signatures.
  819. from_user_id: fetch the signatures made by this user
  820. Returns:
  821. Mapping from user ID to key type to key data.
  822. The return value will be the same as the keys argument, with the
  823. modifications included.
  824. """
  825. # find out what cross-signing keys (a.k.a. devices) we need to get
  826. # signatures for. This is a map of (user_id, device_id) to key type
  827. # (device_id is the key's public part).
  828. devices: Dict[Tuple[str, str], str] = {}
  829. for user_id, user_keys in keys.items():
  830. if user_keys is None:
  831. continue
  832. for key_type, key in user_keys.items():
  833. device_id = None
  834. for k in key["keys"].values():
  835. device_id = k
  836. # `key` ought to be a `CrossSigningKey`, whose .keys property is a
  837. # dictionary with a single entry:
  838. # "algorithm:base64_public_key": "base64_public_key"
  839. # See https://spec.matrix.org/v1.1/client-server-api/#cross-signing
  840. assert isinstance(device_id, str)
  841. devices[(user_id, device_id)] = key_type
  842. for batch in batch_iter(devices.keys(), size=100):
  843. sql = """
  844. SELECT target_user_id, target_device_id, key_id, signature
  845. FROM e2e_cross_signing_signatures
  846. WHERE user_id = ?
  847. AND (%s)
  848. """ % (
  849. " OR ".join(
  850. "(target_user_id = ? AND target_device_id = ?)" for _ in batch
  851. )
  852. )
  853. query_params = [from_user_id]
  854. for item in batch:
  855. # item is a (user_id, device_id) tuple
  856. query_params.extend(item)
  857. txn.execute(sql, query_params)
  858. rows = self.db_pool.cursor_to_dict(txn)
  859. # and add the signatures to the appropriate keys
  860. for row in rows:
  861. key_id: str = row["key_id"]
  862. target_user_id: str = row["target_user_id"]
  863. target_device_id: str = row["target_device_id"]
  864. key_type = devices[(target_user_id, target_device_id)]
  865. # We need to copy everything, because the result may have come
  866. # from the cache. dict.copy only does a shallow copy, so we
  867. # need to recursively copy the dicts that will be modified.
  868. user_keys = keys[target_user_id]
  869. # `user_keys` cannot be `None` because we only fetched signatures for
  870. # users with keys
  871. assert user_keys is not None
  872. user_keys = keys[target_user_id] = user_keys.copy()
  873. target_user_key = user_keys[key_type] = user_keys[key_type].copy()
  874. if "signatures" in target_user_key:
  875. signatures = target_user_key["signatures"] = target_user_key[
  876. "signatures"
  877. ].copy()
  878. if from_user_id in signatures:
  879. user_sigs = signatures[from_user_id] = signatures[from_user_id]
  880. user_sigs[key_id] = row["signature"]
  881. else:
  882. signatures[from_user_id] = {key_id: row["signature"]}
  883. else:
  884. target_user_key["signatures"] = {
  885. from_user_id: {key_id: row["signature"]}
  886. }
  887. return keys
  888. @cancellable
  889. async def get_e2e_cross_signing_keys_bulk(
  890. self, user_ids: List[str], from_user_id: Optional[str] = None
  891. ) -> Mapping[str, Optional[Mapping[str, JsonMapping]]]:
  892. """Returns the cross-signing keys for a set of users.
  893. Args:
  894. user_ids: the users whose keys are being requested
  895. from_user_id: if specified, signatures made by this user on
  896. the self-signing keys will be included in the result
  897. Returns:
  898. A map of user ID to key type to key data. If a user's cross-signing
  899. keys were not found, either their user ID will not be in the dict,
  900. or their user ID will map to None.
  901. """
  902. result = await self._get_bare_e2e_cross_signing_keys_bulk(user_ids)
  903. if from_user_id:
  904. result = cast(
  905. Dict[str, Optional[Mapping[str, JsonMapping]]],
  906. await self.db_pool.runInteraction(
  907. "get_e2e_cross_signing_signatures",
  908. self._get_e2e_cross_signing_signatures_txn,
  909. result,
  910. from_user_id,
  911. ),
  912. )
  913. return result
  914. async def get_all_user_signature_changes_for_remotes(
  915. self, instance_name: str, last_id: int, current_id: int, limit: int
  916. ) -> Tuple[List[Tuple[int, tuple]], int, bool]:
  917. """Get updates for groups replication stream.
  918. Note that the user signature stream represents when a user signs their
  919. device with their user-signing key, which is not published to other
  920. users or servers, so no `destination` is needed in the returned
  921. list. However, this is needed to poke workers.
  922. Args:
  923. instance_name: The writer we want to fetch updates from. Unused
  924. here since there is only ever one writer.
  925. last_id: The token to fetch updates from. Exclusive.
  926. current_id: The token to fetch updates up to. Inclusive.
  927. limit: The requested limit for the number of rows to return. The
  928. function may return more or fewer rows.
  929. Returns:
  930. A tuple consisting of: the updates, a token to use to fetch
  931. subsequent updates, and whether we returned fewer rows than exists
  932. between the requested tokens due to the limit.
  933. The token returned can be used in a subsequent call to this
  934. function to get further updatees.
  935. The updates are a list of 2-tuples of stream ID and the row data
  936. """
  937. if last_id == current_id:
  938. return [], current_id, False
  939. def _get_all_user_signature_changes_for_remotes_txn(
  940. txn: LoggingTransaction,
  941. ) -> Tuple[List[Tuple[int, tuple]], int, bool]:
  942. sql = """
  943. SELECT stream_id, from_user_id AS user_id
  944. FROM user_signature_stream
  945. WHERE ? < stream_id AND stream_id <= ?
  946. ORDER BY stream_id ASC
  947. LIMIT ?
  948. """
  949. txn.execute(sql, (last_id, current_id, limit))
  950. updates = [(row[0], (row[1:])) for row in txn]
  951. limited = False
  952. upto_token = current_id
  953. if len(updates) >= limit:
  954. upto_token = updates[-1][0]
  955. limited = True
  956. return updates, upto_token, limited
  957. return await self.db_pool.runInteraction(
  958. "get_all_user_signature_changes_for_remotes",
  959. _get_all_user_signature_changes_for_remotes_txn,
  960. )
  961. @abc.abstractmethod
  962. def get_device_stream_token(self) -> int:
  963. """Get the current stream id from the _device_list_id_gen"""
  964. ...
  965. async def claim_e2e_one_time_keys(
  966. self, query_list: Iterable[Tuple[str, str, str, int]]
  967. ) -> Tuple[
  968. Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]]
  969. ]:
  970. """Take a list of one time keys out of the database.
  971. Args:
  972. query_list: An iterable of tuples of (user ID, device ID, algorithm).
  973. Returns:
  974. A tuple pf:
  975. A map of user ID -> a map device ID -> a map of key ID -> JSON.
  976. A copy of the input which has not been fulfilled.
  977. """
  978. @trace
  979. def _claim_e2e_one_time_key_simple(
  980. txn: LoggingTransaction,
  981. user_id: str,
  982. device_id: str,
  983. algorithm: str,
  984. count: int,
  985. ) -> List[Tuple[str, str]]:
  986. """Claim OTK for device for DBs that don't support RETURNING.
  987. Returns:
  988. A tuple of key name (algorithm + key ID) and key JSON, if an
  989. OTK was found.
  990. """
  991. sql = """
  992. SELECT key_id, key_json FROM e2e_one_time_keys_json
  993. WHERE user_id = ? AND device_id = ? AND algorithm = ?
  994. LIMIT ?
  995. """
  996. txn.execute(sql, (user_id, device_id, algorithm, count))
  997. otk_rows = list(txn)
  998. if not otk_rows:
  999. return []
  1000. self.db_pool.simple_delete_many_txn(
  1001. txn,
  1002. table="e2e_one_time_keys_json",
  1003. column="key_id",
  1004. values=[otk_row[0] for otk_row in otk_rows],
  1005. keyvalues={
  1006. "user_id": user_id,
  1007. "device_id": device_id,
  1008. "algorithm": algorithm,
  1009. },
  1010. )
  1011. self._invalidate_cache_and_stream(
  1012. txn, self.count_e2e_one_time_keys, (user_id, device_id)
  1013. )
  1014. return [
  1015. (f"{algorithm}:{key_id}", key_json) for key_id, key_json in otk_rows
  1016. ]
  1017. @trace
  1018. def _claim_e2e_one_time_key_returning(
  1019. txn: LoggingTransaction,
  1020. user_id: str,
  1021. device_id: str,
  1022. algorithm: str,
  1023. count: int,
  1024. ) -> List[Tuple[str, str]]:
  1025. """Claim OTK for device for DBs that support RETURNING.
  1026. Returns:
  1027. A tuple of key name (algorithm + key ID) and key JSON, if an
  1028. OTK was found.
  1029. """
  1030. # We can use RETURNING to do the fetch and DELETE in once step.
  1031. sql = """
  1032. DELETE FROM e2e_one_time_keys_json
  1033. WHERE user_id = ? AND device_id = ? AND algorithm = ?
  1034. AND key_id IN (
  1035. SELECT key_id FROM e2e_one_time_keys_json
  1036. WHERE user_id = ? AND device_id = ? AND algorithm = ?
  1037. LIMIT ?
  1038. )
  1039. RETURNING key_id, key_json
  1040. """
  1041. txn.execute(
  1042. sql,
  1043. (user_id, device_id, algorithm, user_id, device_id, algorithm, count),
  1044. )
  1045. otk_rows = list(txn)
  1046. if not otk_rows:
  1047. return []
  1048. self._invalidate_cache_and_stream(
  1049. txn, self.count_e2e_one_time_keys, (user_id, device_id)
  1050. )
  1051. return [
  1052. (f"{algorithm}:{key_id}", key_json) for key_id, key_json in otk_rows
  1053. ]
  1054. results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
  1055. missing: List[Tuple[str, str, str, int]] = []
  1056. for user_id, device_id, algorithm, count in query_list:
  1057. if self.database_engine.supports_returning:
  1058. # If we support RETURNING clause we can use a single query that
  1059. # allows us to use autocommit mode.
  1060. _claim_e2e_one_time_key = _claim_e2e_one_time_key_returning
  1061. db_autocommit = True
  1062. else:
  1063. _claim_e2e_one_time_key = _claim_e2e_one_time_key_simple
  1064. db_autocommit = False
  1065. claim_rows = await self.db_pool.runInteraction(
  1066. "claim_e2e_one_time_keys",
  1067. _claim_e2e_one_time_key,
  1068. user_id,
  1069. device_id,
  1070. algorithm,
  1071. count,
  1072. db_autocommit=db_autocommit,
  1073. )
  1074. if claim_rows:
  1075. device_results = results.setdefault(user_id, {}).setdefault(
  1076. device_id, {}
  1077. )
  1078. for claim_row in claim_rows:
  1079. device_results[claim_row[0]] = json_decoder.decode(claim_row[1])
  1080. # Did we get enough OTKs?
  1081. count -= len(claim_rows)
  1082. if count:
  1083. missing.append((user_id, device_id, algorithm, count))
  1084. return results, missing
  1085. async def claim_e2e_fallback_keys(
  1086. self, query_list: Iterable[Tuple[str, str, str, bool]]
  1087. ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]:
  1088. """Take a list of fallback keys out of the database.
  1089. Args:
  1090. query_list: An iterable of tuples of
  1091. (user ID, device ID, algorithm, whether the key should be marked as used).
  1092. Returns:
  1093. A map of user ID -> a map device ID -> a map of key ID -> JSON.
  1094. """
  1095. results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
  1096. for user_id, device_id, algorithm, mark_as_used in query_list:
  1097. row = await self.db_pool.simple_select_one(
  1098. table="e2e_fallback_keys_json",
  1099. keyvalues={
  1100. "user_id": user_id,
  1101. "device_id": device_id,
  1102. "algorithm": algorithm,
  1103. },
  1104. retcols=("key_id", "key_json", "used"),
  1105. desc="_get_fallback_key",
  1106. allow_none=True,
  1107. )
  1108. if row is None:
  1109. continue
  1110. key_id = row["key_id"]
  1111. key_json = row["key_json"]
  1112. used = row["used"]
  1113. # Mark fallback key as used if not already.
  1114. if not used and mark_as_used:
  1115. await self.db_pool.simple_update_one(
  1116. table="e2e_fallback_keys_json",
  1117. keyvalues={
  1118. "user_id": user_id,
  1119. "device_id": device_id,
  1120. "algorithm": algorithm,
  1121. "key_id": key_id,
  1122. },
  1123. updatevalues={"used": True},
  1124. desc="_get_fallback_key_set_used",
  1125. )
  1126. await self.invalidate_cache_and_stream(
  1127. "get_e2e_unused_fallback_key_types", (user_id, device_id)
  1128. )
  1129. device_results = results.setdefault(user_id, {}).setdefault(device_id, {})
  1130. device_results[f"{algorithm}:{key_id}"] = json_decoder.decode(key_json)
  1131. return results
  1132. class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
  1133. def __init__(
  1134. self,
  1135. database: DatabasePool,
  1136. db_conn: LoggingDatabaseConnection,
  1137. hs: "HomeServer",
  1138. ):
  1139. super().__init__(database, db_conn, hs)
  1140. self._cross_signing_id_gen = StreamIdGenerator(
  1141. db_conn,
  1142. hs.get_replication_notifier(),
  1143. "e2e_cross_signing_keys",
  1144. "stream_id",
  1145. )
  1146. async def set_e2e_device_keys(
  1147. self, user_id: str, device_id: str, time_now: int, device_keys: JsonDict
  1148. ) -> bool:
  1149. """Stores device keys for a device. Returns whether there was a change
  1150. or the keys were already in the database.
  1151. Args:
  1152. user_id: user_id of the user to store keys for
  1153. device_id: device_id of the device to store keys for
  1154. time_now: time at the request to store the keys
  1155. device_keys: the keys to store
  1156. """
  1157. return await self.db_pool.runInteraction(
  1158. "set_e2e_device_keys",
  1159. self._set_e2e_device_keys_txn,
  1160. user_id,
  1161. device_id,
  1162. time_now,
  1163. device_keys,
  1164. )
  1165. def _set_e2e_device_keys_txn(
  1166. self,
  1167. txn: LoggingTransaction,
  1168. user_id: str,
  1169. device_id: str,
  1170. time_now: int,
  1171. device_keys: JsonDict,
  1172. ) -> bool:
  1173. """Stores device keys for a device. Returns whether there was a change
  1174. or the keys were already in the database.
  1175. Args:
  1176. user_id: user_id of the user to store keys for
  1177. device_id: device_id of the device to store keys for
  1178. time_now: time at the request to store the keys
  1179. device_keys: the keys to store
  1180. """
  1181. set_tag("user_id", user_id)
  1182. set_tag("device_id", device_id)
  1183. set_tag("time_now", time_now)
  1184. set_tag("device_keys", str(device_keys))
  1185. old_key_json = self.db_pool.simple_select_one_onecol_txn(
  1186. txn,
  1187. table="e2e_device_keys_json",
  1188. keyvalues={"user_id": user_id, "device_id": device_id},
  1189. retcol="key_json",
  1190. allow_none=True,
  1191. )
  1192. # In py3 we need old_key_json to match new_key_json type. The DB
  1193. # returns unicode while encode_canonical_json returns bytes.
  1194. new_key_json = encode_canonical_json(device_keys).decode("utf-8")
  1195. if old_key_json == new_key_json:
  1196. log_kv({"Message": "Device key already stored."})
  1197. return False
  1198. self.db_pool.simple_upsert_txn(
  1199. txn,
  1200. table="e2e_device_keys_json",
  1201. keyvalues={"user_id": user_id, "device_id": device_id},
  1202. values={"ts_added_ms": time_now, "key_json": new_key_json},
  1203. )
  1204. log_kv({"message": "Device keys stored."})
  1205. return True
  1206. async def delete_e2e_keys_by_device(self, user_id: str, device_id: str) -> None:
  1207. def delete_e2e_keys_by_device_txn(txn: LoggingTransaction) -> None:
  1208. log_kv(
  1209. {
  1210. "message": "Deleting keys for device",
  1211. "device_id": device_id,
  1212. "user_id": user_id,
  1213. }
  1214. )
  1215. self.db_pool.simple_delete_txn(
  1216. txn,
  1217. table="e2e_device_keys_json",
  1218. keyvalues={"user_id": user_id, "device_id": device_id},
  1219. )
  1220. self.db_pool.simple_delete_txn(
  1221. txn,
  1222. table="e2e_one_time_keys_json",
  1223. keyvalues={"user_id": user_id, "device_id": device_id},
  1224. )
  1225. self._invalidate_cache_and_stream(
  1226. txn, self.count_e2e_one_time_keys, (user_id, device_id)
  1227. )
  1228. self.db_pool.simple_delete_txn(
  1229. txn,
  1230. table="dehydrated_devices",
  1231. keyvalues={"user_id": user_id, "device_id": device_id},
  1232. )
  1233. self.db_pool.simple_delete_txn(
  1234. txn,
  1235. table="e2e_fallback_keys_json",
  1236. keyvalues={"user_id": user_id, "device_id": device_id},
  1237. )
  1238. self._invalidate_cache_and_stream(
  1239. txn, self.get_e2e_unused_fallback_key_types, (user_id, device_id)
  1240. )
  1241. await self.db_pool.runInteraction(
  1242. "delete_e2e_keys_by_device", delete_e2e_keys_by_device_txn
  1243. )
  1244. def _set_e2e_cross_signing_key_txn(
  1245. self,
  1246. txn: LoggingTransaction,
  1247. user_id: str,
  1248. key_type: str,
  1249. key: JsonDict,
  1250. stream_id: int,
  1251. ) -> None:
  1252. """Set a user's cross-signing key.
  1253. Args:
  1254. txn: db connection
  1255. user_id: the user to set the signing key for
  1256. key_type: the type of key that is being set: either 'master'
  1257. for a master key, 'self_signing' for a self-signing key, or
  1258. 'user_signing' for a user-signing key
  1259. key: the key data
  1260. stream_id
  1261. """
  1262. # the 'key' dict will look something like:
  1263. # {
  1264. # "user_id": "@alice:example.com",
  1265. # "usage": ["self_signing"],
  1266. # "keys": {
  1267. # "ed25519:base64+self+signing+public+key": "base64+self+signing+public+key",
  1268. # },
  1269. # "signatures": {
  1270. # "@alice:example.com": {
  1271. # "ed25519:base64+master+public+key": "base64+signature"
  1272. # }
  1273. # }
  1274. # }
  1275. # The "keys" property must only have one entry, which will be the public
  1276. # key, so we just grab the first value in there
  1277. pubkey = next(iter(key["keys"].values()))
  1278. # The cross-signing keys need to occupy the same namespace as devices,
  1279. # since signatures are identified by device ID. So add an entry to the
  1280. # device table to make sure that we don't have a collision with device
  1281. # IDs.
  1282. # We only need to do this for local users, since remote servers should be
  1283. # responsible for checking this for their own users.
  1284. if self.hs.is_mine_id(user_id):
  1285. self.db_pool.simple_insert_txn(
  1286. txn,
  1287. "devices",
  1288. values={
  1289. "user_id": user_id,
  1290. "device_id": pubkey,
  1291. "display_name": key_type + " signing key",
  1292. "hidden": True,
  1293. },
  1294. )
  1295. # and finally, store the key itself
  1296. self.db_pool.simple_insert_txn(
  1297. txn,
  1298. "e2e_cross_signing_keys",
  1299. values={
  1300. "user_id": user_id,
  1301. "keytype": key_type,
  1302. "keydata": json_encoder.encode(key),
  1303. "stream_id": stream_id,
  1304. },
  1305. )
  1306. self._invalidate_cache_and_stream(
  1307. txn, self._get_bare_e2e_cross_signing_keys, (user_id,)
  1308. )
  1309. async def set_e2e_cross_signing_key(
  1310. self, user_id: str, key_type: str, key: JsonDict
  1311. ) -> None:
  1312. """Set a user's cross-signing key.
  1313. Args:
  1314. user_id: the user to set the user-signing key for
  1315. key_type: the type of cross-signing key to set
  1316. key: the key data
  1317. """
  1318. async with self._cross_signing_id_gen.get_next() as stream_id:
  1319. return await self.db_pool.runInteraction(
  1320. "add_e2e_cross_signing_key",
  1321. self._set_e2e_cross_signing_key_txn,
  1322. user_id,
  1323. key_type,
  1324. key,
  1325. stream_id,
  1326. )
  1327. async def store_e2e_cross_signing_signatures(
  1328. self, user_id: str, signatures: "Iterable[SignatureListItem]"
  1329. ) -> None:
  1330. """Stores cross-signing signatures.
  1331. Args:
  1332. user_id: the user who made the signatures
  1333. signatures: signatures to add
  1334. """
  1335. await self.db_pool.simple_insert_many(
  1336. "e2e_cross_signing_signatures",
  1337. keys=(
  1338. "user_id",
  1339. "key_id",
  1340. "target_user_id",
  1341. "target_device_id",
  1342. "signature",
  1343. ),
  1344. values=[
  1345. (
  1346. user_id,
  1347. item.signing_key_id,
  1348. item.target_user_id,
  1349. item.target_device_id,
  1350. item.signature,
  1351. )
  1352. for item in signatures
  1353. ],
  1354. desc="add_e2e_signing_key",
  1355. )