No puede seleccionar más de 25 temas Los temas deben comenzar con una letra o número, pueden incluir guiones ('-') y pueden tener hasta 35 caracteres de largo.
 
 
 
 
 
 

973 líneas
32 KiB

  1. # Copyright 2014-2016 OpenMarket Ltd
  2. # Copyright 2019 The Matrix.org Foundation C.I.C.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """ Thread-local-alike tracking of log contexts within synapse
  16. This module provides objects and utilities for tracking contexts through
  17. synapse code, so that log lines can include a request identifier, and so that
  18. CPU and database activity can be accounted for against the request that caused
  19. them.
  20. See doc/log_contexts.rst for details on how this works.
  21. """
  22. import logging
  23. import threading
  24. import typing
  25. import warnings
  26. from types import TracebackType
  27. from typing import (
  28. TYPE_CHECKING,
  29. Awaitable,
  30. Callable,
  31. Optional,
  32. Tuple,
  33. Type,
  34. TypeVar,
  35. Union,
  36. overload,
  37. )
  38. import attr
  39. from typing_extensions import Literal, ParamSpec
  40. from twisted.internet import defer, threads
  41. from twisted.python.threadpool import ThreadPool
  42. if TYPE_CHECKING:
  43. from synapse.logging.scopecontextmanager import _LogContextScope
  44. from synapse.types import ISynapseReactor
  45. logger = logging.getLogger(__name__)
  46. try:
  47. import resource
  48. # Python doesn't ship with a definition of RUSAGE_THREAD but it's defined
  49. # to be 1 on linux so we hard code it.
  50. RUSAGE_THREAD = 1
  51. # If the system doesn't support RUSAGE_THREAD then this should throw an
  52. # exception.
  53. resource.getrusage(RUSAGE_THREAD)
  54. is_thread_resource_usage_supported = True
  55. def get_thread_resource_usage() -> "Optional[resource.struct_rusage]":
  56. return resource.getrusage(RUSAGE_THREAD)
  57. except Exception:
  58. # If the system doesn't support resource.getrusage(RUSAGE_THREAD) then we
  59. # won't track resource usage.
  60. is_thread_resource_usage_supported = False
  61. def get_thread_resource_usage() -> "Optional[resource.struct_rusage]":
  62. return None
  63. # a hook which can be set during testing to assert that we aren't abusing logcontexts.
  64. def logcontext_error(msg: str) -> None:
  65. logger.warning(msg)
  66. # get an id for the current thread.
  67. #
  68. # threading.get_ident doesn't actually return an OS-level tid, and annoyingly,
  69. # on Linux it actually returns the same value either side of a fork() call. However
  70. # we only fork in one place, so it's not worth the hoop-jumping to get a real tid.
  71. #
  72. get_thread_id = threading.get_ident
  73. class ContextResourceUsage:
  74. """Object for tracking the resources used by a log context
  75. Attributes:
  76. ru_utime (float): user CPU time (in seconds)
  77. ru_stime (float): system CPU time (in seconds)
  78. db_txn_count (int): number of database transactions done
  79. db_sched_duration_sec (float): amount of time spent waiting for a
  80. database connection
  81. db_txn_duration_sec (float): amount of time spent doing database
  82. transactions (excluding scheduling time)
  83. evt_db_fetch_count (int): number of events requested from the database
  84. """
  85. __slots__ = [
  86. "ru_stime",
  87. "ru_utime",
  88. "db_txn_count",
  89. "db_txn_duration_sec",
  90. "db_sched_duration_sec",
  91. "evt_db_fetch_count",
  92. ]
  93. def __init__(self, copy_from: "Optional[ContextResourceUsage]" = None) -> None:
  94. """Create a new ContextResourceUsage
  95. Args:
  96. copy_from: if not None, an object to copy stats from
  97. """
  98. if copy_from is None:
  99. self.reset()
  100. else:
  101. # FIXME: mypy can't infer the types set via reset() above, so specify explicitly for now
  102. self.ru_utime: float = copy_from.ru_utime
  103. self.ru_stime: float = copy_from.ru_stime
  104. self.db_txn_count: int = copy_from.db_txn_count
  105. self.db_txn_duration_sec: float = copy_from.db_txn_duration_sec
  106. self.db_sched_duration_sec: float = copy_from.db_sched_duration_sec
  107. self.evt_db_fetch_count: int = copy_from.evt_db_fetch_count
  108. def copy(self) -> "ContextResourceUsage":
  109. return ContextResourceUsage(copy_from=self)
  110. def reset(self) -> None:
  111. self.ru_stime = 0.0
  112. self.ru_utime = 0.0
  113. self.db_txn_count = 0
  114. self.db_txn_duration_sec = 0.0
  115. self.db_sched_duration_sec = 0.0
  116. self.evt_db_fetch_count = 0
  117. def __repr__(self) -> str:
  118. return (
  119. "<ContextResourceUsage ru_stime='%r', ru_utime='%r', "
  120. "db_txn_count='%r', db_txn_duration_sec='%r', "
  121. "db_sched_duration_sec='%r', evt_db_fetch_count='%r'>"
  122. ) % (
  123. self.ru_stime,
  124. self.ru_utime,
  125. self.db_txn_count,
  126. self.db_txn_duration_sec,
  127. self.db_sched_duration_sec,
  128. self.evt_db_fetch_count,
  129. )
  130. def __iadd__(self, other: "ContextResourceUsage") -> "ContextResourceUsage":
  131. """Add another ContextResourceUsage's stats to this one's.
  132. Args:
  133. other: the other resource usage object
  134. """
  135. self.ru_utime += other.ru_utime
  136. self.ru_stime += other.ru_stime
  137. self.db_txn_count += other.db_txn_count
  138. self.db_txn_duration_sec += other.db_txn_duration_sec
  139. self.db_sched_duration_sec += other.db_sched_duration_sec
  140. self.evt_db_fetch_count += other.evt_db_fetch_count
  141. return self
  142. def __isub__(self, other: "ContextResourceUsage") -> "ContextResourceUsage":
  143. self.ru_utime -= other.ru_utime
  144. self.ru_stime -= other.ru_stime
  145. self.db_txn_count -= other.db_txn_count
  146. self.db_txn_duration_sec -= other.db_txn_duration_sec
  147. self.db_sched_duration_sec -= other.db_sched_duration_sec
  148. self.evt_db_fetch_count -= other.evt_db_fetch_count
  149. return self
  150. def __add__(self, other: "ContextResourceUsage") -> "ContextResourceUsage":
  151. res = ContextResourceUsage(copy_from=self)
  152. res += other
  153. return res
  154. def __sub__(self, other: "ContextResourceUsage") -> "ContextResourceUsage":
  155. res = ContextResourceUsage(copy_from=self)
  156. res -= other
  157. return res
  158. @attr.s(slots=True, auto_attribs=True)
  159. class ContextRequest:
  160. """
  161. A bundle of attributes from the SynapseRequest object.
  162. This exists to:
  163. * Avoid a cycle between LoggingContext and SynapseRequest.
  164. * Be a single variable that can be passed from parent LoggingContexts to
  165. their children.
  166. """
  167. request_id: str
  168. ip_address: str
  169. site_tag: str
  170. requester: Optional[str]
  171. authenticated_entity: Optional[str]
  172. method: str
  173. url: str
  174. protocol: str
  175. user_agent: str
  176. LoggingContextOrSentinel = Union["LoggingContext", "_Sentinel"]
  177. class _Sentinel:
  178. """Sentinel to represent the root context"""
  179. __slots__ = ["previous_context", "finished", "request", "scope", "tag"]
  180. def __init__(self) -> None:
  181. # Minimal set for compatibility with LoggingContext
  182. self.previous_context = None
  183. self.finished = False
  184. self.request = None
  185. self.scope = None
  186. self.tag = None
  187. def __str__(self) -> str:
  188. return "sentinel"
  189. def start(self, rusage: "Optional[resource.struct_rusage]") -> None:
  190. pass
  191. def stop(self, rusage: "Optional[resource.struct_rusage]") -> None:
  192. pass
  193. def add_database_transaction(self, duration_sec: float) -> None:
  194. pass
  195. def add_database_scheduled(self, sched_sec: float) -> None:
  196. pass
  197. def record_event_fetch(self, event_count: int) -> None:
  198. pass
  199. def __bool__(self) -> Literal[False]:
  200. return False
  201. SENTINEL_CONTEXT = _Sentinel()
  202. class LoggingContext:
  203. """Additional context for log formatting. Contexts are scoped within a
  204. "with" block.
  205. If a parent is given when creating a new context, then:
  206. - logging fields are copied from the parent to the new context on entry
  207. - when the new context exits, the cpu usage stats are copied from the
  208. child to the parent
  209. Args:
  210. name: Name for the context for logging. If this is omitted, it is
  211. inherited from the parent context.
  212. parent_context (LoggingContext|None): The parent of the new context
  213. """
  214. __slots__ = [
  215. "previous_context",
  216. "name",
  217. "parent_context",
  218. "_resource_usage",
  219. "usage_start",
  220. "main_thread",
  221. "finished",
  222. "request",
  223. "tag",
  224. "scope",
  225. ]
  226. def __init__(
  227. self,
  228. name: Optional[str] = None,
  229. parent_context: "Optional[LoggingContext]" = None,
  230. request: Optional[ContextRequest] = None,
  231. ) -> None:
  232. self.previous_context = current_context()
  233. # track the resources used by this context so far
  234. self._resource_usage = ContextResourceUsage()
  235. # The thread resource usage when the logcontext became active. None
  236. # if the context is not currently active.
  237. self.usage_start: Optional[resource.struct_rusage] = None
  238. self.main_thread = get_thread_id()
  239. self.request = None
  240. self.tag = ""
  241. self.scope: Optional["_LogContextScope"] = None
  242. # keep track of whether we have hit the __exit__ block for this context
  243. # (suggesting that the the thing that created the context thinks it should
  244. # be finished, and that re-activating it would suggest an error).
  245. self.finished = False
  246. self.parent_context = parent_context
  247. if self.parent_context is not None:
  248. # we track the current request_id
  249. self.request = self.parent_context.request
  250. # we also track the current scope:
  251. self.scope = self.parent_context.scope
  252. if request is not None:
  253. # the request param overrides the request from the parent context
  254. self.request = request
  255. # if we don't have a `name`, but do have a parent context, use its name.
  256. if self.parent_context and name is None:
  257. name = str(self.parent_context)
  258. if name is None:
  259. raise ValueError(
  260. "LoggingContext must be given either a name or a parent context"
  261. )
  262. self.name = name
  263. def __str__(self) -> str:
  264. return self.name
  265. @classmethod
  266. def current_context(cls) -> LoggingContextOrSentinel:
  267. """Get the current logging context from thread local storage
  268. This exists for backwards compatibility. ``current_context()`` should be
  269. called directly.
  270. Returns:
  271. The current logging context
  272. """
  273. warnings.warn(
  274. "synapse.logging.context.LoggingContext.current_context() is deprecated "
  275. "in favor of synapse.logging.context.current_context().",
  276. DeprecationWarning,
  277. stacklevel=2,
  278. )
  279. return current_context()
  280. @classmethod
  281. def set_current_context(
  282. cls, context: LoggingContextOrSentinel
  283. ) -> LoggingContextOrSentinel:
  284. """Set the current logging context in thread local storage
  285. This exists for backwards compatibility. ``set_current_context()`` should be
  286. called directly.
  287. Args:
  288. context: The context to activate.
  289. Returns:
  290. The context that was previously active
  291. """
  292. warnings.warn(
  293. "synapse.logging.context.LoggingContext.set_current_context() is deprecated "
  294. "in favor of synapse.logging.context.set_current_context().",
  295. DeprecationWarning,
  296. stacklevel=2,
  297. )
  298. return set_current_context(context)
  299. def __enter__(self) -> "LoggingContext":
  300. """Enters this logging context into thread local storage"""
  301. old_context = set_current_context(self)
  302. if self.previous_context != old_context:
  303. logcontext_error(
  304. "Expected previous context %r, found %r"
  305. % (
  306. self.previous_context,
  307. old_context,
  308. )
  309. )
  310. return self
  311. def __exit__(
  312. self,
  313. type: Optional[Type[BaseException]],
  314. value: Optional[BaseException],
  315. traceback: Optional[TracebackType],
  316. ) -> None:
  317. """Restore the logging context in thread local storage to the state it
  318. was before this context was entered.
  319. Returns:
  320. None to avoid suppressing any exceptions that were thrown.
  321. """
  322. current = set_current_context(self.previous_context)
  323. if current is not self:
  324. if current is SENTINEL_CONTEXT:
  325. logcontext_error("Expected logging context %s was lost" % (self,))
  326. else:
  327. logcontext_error(
  328. "Expected logging context %s but found %s" % (self, current)
  329. )
  330. # the fact that we are here suggests that the caller thinks that everything
  331. # is done and dusted for this logcontext, and further activity will not get
  332. # recorded against the correct metrics.
  333. self.finished = True
  334. def start(self, rusage: "Optional[resource.struct_rusage]") -> None:
  335. """
  336. Record that this logcontext is currently running.
  337. This should not be called directly: use set_current_context
  338. Args:
  339. rusage: the resources used by the current thread, at the point of
  340. switching to this logcontext. May be None if this platform doesn't
  341. support getrusuage.
  342. """
  343. if get_thread_id() != self.main_thread:
  344. logcontext_error("Started logcontext %s on different thread" % (self,))
  345. return
  346. if self.finished:
  347. logcontext_error("Re-starting finished log context %s" % (self,))
  348. # If we haven't already started record the thread resource usage so
  349. # far
  350. if self.usage_start:
  351. logcontext_error("Re-starting already-active log context %s" % (self,))
  352. else:
  353. self.usage_start = rusage
  354. def stop(self, rusage: "Optional[resource.struct_rusage]") -> None:
  355. """
  356. Record that this logcontext is no longer running.
  357. This should not be called directly: use set_current_context
  358. Args:
  359. rusage: the resources used by the current thread, at the point of
  360. switching away from this logcontext. May be None if this platform
  361. doesn't support getrusuage.
  362. """
  363. try:
  364. if get_thread_id() != self.main_thread:
  365. logcontext_error("Stopped logcontext %s on different thread" % (self,))
  366. return
  367. if not rusage:
  368. return
  369. # Record the cpu used since we started
  370. if not self.usage_start:
  371. logcontext_error(
  372. "Called stop on logcontext %s without recording a start rusage"
  373. % (self,)
  374. )
  375. return
  376. utime_delta, stime_delta = self._get_cputime(rusage)
  377. self.add_cputime(utime_delta, stime_delta)
  378. finally:
  379. self.usage_start = None
  380. def get_resource_usage(self) -> ContextResourceUsage:
  381. """Get resources used by this logcontext so far.
  382. Returns:
  383. A *copy* of the object tracking resource usage so far
  384. """
  385. # we always return a copy, for consistency
  386. res = self._resource_usage.copy()
  387. # If we are on the correct thread and we're currently running then we
  388. # can include resource usage so far.
  389. is_main_thread = get_thread_id() == self.main_thread
  390. if self.usage_start and is_main_thread:
  391. rusage = get_thread_resource_usage()
  392. assert rusage is not None
  393. utime_delta, stime_delta = self._get_cputime(rusage)
  394. res.ru_utime += utime_delta
  395. res.ru_stime += stime_delta
  396. return res
  397. def _get_cputime(self, current: "resource.struct_rusage") -> Tuple[float, float]:
  398. """Get the cpu usage time between start() and the given rusage
  399. Args:
  400. rusage: the current resource usage
  401. Returns: Tuple[float, float]: seconds in user mode, seconds in system mode
  402. """
  403. assert self.usage_start is not None
  404. utime_delta = current.ru_utime - self.usage_start.ru_utime
  405. stime_delta = current.ru_stime - self.usage_start.ru_stime
  406. # sanity check
  407. if utime_delta < 0:
  408. logger.error(
  409. "utime went backwards! %f < %f",
  410. current.ru_utime,
  411. self.usage_start.ru_utime,
  412. )
  413. utime_delta = 0
  414. if stime_delta < 0:
  415. logger.error(
  416. "stime went backwards! %f < %f",
  417. current.ru_stime,
  418. self.usage_start.ru_stime,
  419. )
  420. stime_delta = 0
  421. return utime_delta, stime_delta
  422. def add_cputime(self, utime_delta: float, stime_delta: float) -> None:
  423. """Update the CPU time usage of this context (and any parents, recursively).
  424. Args:
  425. utime_delta: additional user time, in seconds, spent in this context.
  426. stime_delta: additional system time, in seconds, spent in this context.
  427. """
  428. self._resource_usage.ru_utime += utime_delta
  429. self._resource_usage.ru_stime += stime_delta
  430. if self.parent_context:
  431. self.parent_context.add_cputime(utime_delta, stime_delta)
  432. def add_database_transaction(self, duration_sec: float) -> None:
  433. """Record the use of a database transaction and the length of time it took.
  434. Args:
  435. duration_sec: The number of seconds the database transaction took.
  436. """
  437. if duration_sec < 0:
  438. raise ValueError("DB txn time can only be non-negative")
  439. self._resource_usage.db_txn_count += 1
  440. self._resource_usage.db_txn_duration_sec += duration_sec
  441. if self.parent_context:
  442. self.parent_context.add_database_transaction(duration_sec)
  443. def add_database_scheduled(self, sched_sec: float) -> None:
  444. """Record a use of the database pool
  445. Args:
  446. sched_sec: number of seconds it took us to get a connection
  447. """
  448. if sched_sec < 0:
  449. raise ValueError("DB scheduling time can only be non-negative")
  450. self._resource_usage.db_sched_duration_sec += sched_sec
  451. if self.parent_context:
  452. self.parent_context.add_database_scheduled(sched_sec)
  453. def record_event_fetch(self, event_count: int) -> None:
  454. """Record a number of events being fetched from the db
  455. Args:
  456. event_count: number of events being fetched
  457. """
  458. self._resource_usage.evt_db_fetch_count += event_count
  459. if self.parent_context:
  460. self.parent_context.record_event_fetch(event_count)
  461. class LoggingContextFilter(logging.Filter):
  462. """Logging filter that adds values from the current logging context to each
  463. record.
  464. """
  465. def __init__(self, request: str = ""):
  466. self._default_request = request
  467. def filter(self, record: logging.LogRecord) -> Literal[True]:
  468. """Add each fields from the logging contexts to the record.
  469. Returns:
  470. True to include the record in the log output.
  471. """
  472. context = current_context()
  473. record.request = self._default_request
  474. # context should never be None, but if it somehow ends up being, then
  475. # we end up in a death spiral of infinite loops, so let's check, for
  476. # robustness' sake.
  477. if context is not None:
  478. # Logging is interested in the request ID. Note that for backwards
  479. # compatibility this is stored as the "request" on the record.
  480. record.request = str(context)
  481. # Add some data from the HTTP request.
  482. request = context.request
  483. if request is None:
  484. return True
  485. record.ip_address = request.ip_address
  486. record.site_tag = request.site_tag
  487. record.requester = request.requester
  488. record.authenticated_entity = request.authenticated_entity
  489. record.method = request.method
  490. record.url = request.url
  491. record.protocol = request.protocol
  492. record.user_agent = request.user_agent
  493. return True
  494. class PreserveLoggingContext:
  495. """Context manager which replaces the logging context
  496. The previous logging context is restored on exit."""
  497. __slots__ = ["_old_context", "_new_context"]
  498. def __init__(
  499. self, new_context: LoggingContextOrSentinel = SENTINEL_CONTEXT
  500. ) -> None:
  501. self._new_context = new_context
  502. def __enter__(self) -> None:
  503. self._old_context = set_current_context(self._new_context)
  504. def __exit__(
  505. self,
  506. type: Optional[Type[BaseException]],
  507. value: Optional[BaseException],
  508. traceback: Optional[TracebackType],
  509. ) -> None:
  510. context = set_current_context(self._old_context)
  511. if context != self._new_context:
  512. if not context:
  513. logcontext_error(
  514. "Expected logging context %s was lost" % (self._new_context,)
  515. )
  516. else:
  517. logcontext_error(
  518. "Expected logging context %s but found %s"
  519. % (
  520. self._new_context,
  521. context,
  522. )
  523. )
  524. _thread_local = threading.local()
  525. _thread_local.current_context = SENTINEL_CONTEXT
  526. def current_context() -> LoggingContextOrSentinel:
  527. """Get the current logging context from thread local storage"""
  528. return getattr(_thread_local, "current_context", SENTINEL_CONTEXT)
  529. def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel:
  530. """Set the current logging context in thread local storage
  531. Args:
  532. context: The context to activate.
  533. Returns:
  534. The context that was previously active
  535. """
  536. # everything blows up if we allow current_context to be set to None, so sanity-check
  537. # that now.
  538. if context is None:
  539. raise TypeError("'context' argument may not be None")
  540. current = current_context()
  541. if current is not context:
  542. rusage = get_thread_resource_usage()
  543. current.stop(rusage)
  544. _thread_local.current_context = context
  545. context.start(rusage)
  546. return current
  547. def nested_logging_context(suffix: str) -> LoggingContext:
  548. """Creates a new logging context as a child of another.
  549. The nested logging context will have a 'name' made up of the parent context's
  550. name, plus the given suffix.
  551. CPU/db usage stats will be added to the parent context's on exit.
  552. Normal usage looks like:
  553. with nested_logging_context(suffix):
  554. # ... do stuff
  555. Args:
  556. suffix: suffix to add to the parent context's 'name'.
  557. Returns:
  558. A new logging context.
  559. """
  560. curr_context = current_context()
  561. if not curr_context:
  562. logger.warning(
  563. "Starting nested logging context from sentinel context: metrics will be lost"
  564. )
  565. parent_context = None
  566. else:
  567. assert isinstance(curr_context, LoggingContext)
  568. parent_context = curr_context
  569. prefix = str(curr_context)
  570. return LoggingContext(
  571. prefix + "-" + suffix,
  572. parent_context=parent_context,
  573. )
  574. P = ParamSpec("P")
  575. R = TypeVar("R")
  576. async def _unwrap_awaitable(awaitable: Awaitable[R]) -> R:
  577. """Unwraps an arbitrary awaitable by awaiting it."""
  578. return await awaitable
  579. @overload
  580. def preserve_fn(
  581. f: Callable[P, Awaitable[R]],
  582. ) -> Callable[P, "defer.Deferred[R]"]:
  583. # The `type: ignore[misc]` above suppresses
  584. # "Overloaded function signatures 1 and 2 overlap with incompatible return types"
  585. ...
  586. @overload
  587. def preserve_fn(f: Callable[P, R]) -> Callable[P, "defer.Deferred[R]"]:
  588. ...
  589. def preserve_fn(
  590. f: Union[
  591. Callable[P, R],
  592. Callable[P, Awaitable[R]],
  593. ]
  594. ) -> Callable[P, "defer.Deferred[R]"]:
  595. """Function decorator which wraps the function with run_in_background"""
  596. def g(*args: P.args, **kwargs: P.kwargs) -> "defer.Deferred[R]":
  597. return run_in_background(f, *args, **kwargs)
  598. return g
  599. @overload
  600. def run_in_background(
  601. f: Callable[P, Awaitable[R]], *args: P.args, **kwargs: P.kwargs
  602. ) -> "defer.Deferred[R]":
  603. # The `type: ignore[misc]` above suppresses
  604. # "Overloaded function signatures 1 and 2 overlap with incompatible return types"
  605. ...
  606. @overload
  607. def run_in_background(
  608. f: Callable[P, R], *args: P.args, **kwargs: P.kwargs
  609. ) -> "defer.Deferred[R]":
  610. ...
  611. def run_in_background( # type: ignore[misc]
  612. # The `type: ignore[misc]` above suppresses
  613. # "Overloaded function implementation does not accept all possible arguments of signature 1"
  614. # "Overloaded function implementation does not accept all possible arguments of signature 2"
  615. # which seems like a bug in mypy.
  616. f: Union[
  617. Callable[P, R],
  618. Callable[P, Awaitable[R]],
  619. ],
  620. *args: P.args,
  621. **kwargs: P.kwargs,
  622. ) -> "defer.Deferred[R]":
  623. """Calls a function, ensuring that the current context is restored after
  624. return from the function, and that the sentinel context is set once the
  625. deferred returned by the function completes.
  626. Useful for wrapping functions that return a deferred or coroutine, which you don't
  627. yield or await on (for instance because you want to pass it to
  628. deferred.gatherResults()).
  629. If f returns a Coroutine object, it will be wrapped into a Deferred (which will have
  630. the side effect of executing the coroutine).
  631. Note that if you completely discard the result, you should make sure that
  632. `f` doesn't raise any deferred exceptions, otherwise a scary-looking
  633. CRITICAL error about an unhandled error will be logged without much
  634. indication about where it came from.
  635. """
  636. current = current_context()
  637. try:
  638. res = f(*args, **kwargs)
  639. except Exception:
  640. # the assumption here is that the caller doesn't want to be disturbed
  641. # by synchronous exceptions, so let's turn them into Failures.
  642. return defer.fail()
  643. # `res` may be a coroutine, `Deferred`, some other kind of awaitable, or a plain
  644. # value. Convert it to a `Deferred`.
  645. d: "defer.Deferred[R]"
  646. if isinstance(res, typing.Coroutine):
  647. # Wrap the coroutine in a `Deferred`.
  648. d = defer.ensureDeferred(res)
  649. elif isinstance(res, defer.Deferred):
  650. d = res
  651. elif isinstance(res, Awaitable):
  652. # `res` is probably some kind of completed awaitable, such as a `DoneAwaitable`
  653. # or `Future` from `make_awaitable`.
  654. d = defer.ensureDeferred(_unwrap_awaitable(res))
  655. else:
  656. # `res` is a plain value. Wrap it in a `Deferred`.
  657. d = defer.succeed(res)
  658. if d.called and not d.paused:
  659. # The function should have maintained the logcontext, so we can
  660. # optimise out the messing about
  661. return d
  662. # The function may have reset the context before returning, so
  663. # we need to restore it now.
  664. ctx = set_current_context(current)
  665. # The original context will be restored when the deferred
  666. # completes, but there is nothing waiting for it, so it will
  667. # get leaked into the reactor or some other function which
  668. # wasn't expecting it. We therefore need to reset the context
  669. # here.
  670. #
  671. # (If this feels asymmetric, consider it this way: we are
  672. # effectively forking a new thread of execution. We are
  673. # probably currently within a ``with LoggingContext()`` block,
  674. # which is supposed to have a single entry and exit point. But
  675. # by spawning off another deferred, we are effectively
  676. # adding a new exit point.)
  677. d.addBoth(_set_context_cb, ctx)
  678. return d
  679. T = TypeVar("T")
  680. def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]":
  681. """Given a deferred, make it follow the Synapse logcontext rules:
  682. If the deferred has completed, essentially does nothing (just returns another
  683. completed deferred with the result/failure).
  684. If the deferred has not yet completed, resets the logcontext before
  685. returning a deferred. Then, when the deferred completes, restores the
  686. current logcontext before running callbacks/errbacks.
  687. (This is more-or-less the opposite operation to run_in_background.)
  688. """
  689. if deferred.called and not deferred.paused:
  690. # it looks like this deferred is ready to run any callbacks we give it
  691. # immediately. We may as well optimise out the logcontext faffery.
  692. return deferred
  693. # ok, we can't be sure that a yield won't block, so let's reset the
  694. # logcontext, and add a callback to the deferred to restore it.
  695. prev_context = set_current_context(SENTINEL_CONTEXT)
  696. deferred.addBoth(_set_context_cb, prev_context)
  697. return deferred
  698. ResultT = TypeVar("ResultT")
  699. def _set_context_cb(result: ResultT, context: LoggingContextOrSentinel) -> ResultT:
  700. """A callback function which just sets the logging context"""
  701. set_current_context(context)
  702. return result
  703. def defer_to_thread(
  704. reactor: "ISynapseReactor", f: Callable[P, R], *args: P.args, **kwargs: P.kwargs
  705. ) -> "defer.Deferred[R]":
  706. """
  707. Calls the function `f` using a thread from the reactor's default threadpool and
  708. returns the result as a Deferred.
  709. Creates a new logcontext for `f`, which is created as a child of the current
  710. logcontext (so its CPU usage metrics will get attributed to the current
  711. logcontext). `f` should preserve the logcontext it is given.
  712. The result deferred follows the Synapse logcontext rules: you should `yield`
  713. on it.
  714. Args:
  715. reactor: The reactor in whose main thread the Deferred will be invoked,
  716. and whose threadpool we should use for the function.
  717. Normally this will be hs.get_reactor().
  718. f: The function to call.
  719. args: positional arguments to pass to f.
  720. kwargs: keyword arguments to pass to f.
  721. Returns:
  722. A Deferred which fires a callback with the result of `f`, or an
  723. errback if `f` throws an exception.
  724. """
  725. return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
  726. def defer_to_threadpool(
  727. reactor: "ISynapseReactor",
  728. threadpool: ThreadPool,
  729. f: Callable[P, R],
  730. *args: P.args,
  731. **kwargs: P.kwargs,
  732. ) -> "defer.Deferred[R]":
  733. """
  734. A wrapper for twisted.internet.threads.deferToThreadpool, which handles
  735. logcontexts correctly.
  736. Calls the function `f` using a thread from the given threadpool and returns
  737. the result as a Deferred.
  738. Creates a new logcontext for `f`, which is created as a child of the current
  739. logcontext (so its CPU usage metrics will get attributed to the current
  740. logcontext). `f` should preserve the logcontext it is given.
  741. The result deferred follows the Synapse logcontext rules: you should `yield`
  742. on it.
  743. Args:
  744. reactor: The reactor in whose main thread the Deferred will be invoked.
  745. Normally this will be hs.get_reactor().
  746. threadpool: The threadpool to use for running `f`. Normally this will be
  747. hs.get_reactor().getThreadPool().
  748. f: The function to call.
  749. args: positional arguments to pass to f.
  750. kwargs: keyword arguments to pass to f.
  751. Returns:
  752. A Deferred which fires a callback with the result of `f`, or an
  753. errback if `f` throws an exception.
  754. """
  755. curr_context = current_context()
  756. if not curr_context:
  757. logger.warning(
  758. "Calling defer_to_threadpool from sentinel context: metrics will be lost"
  759. )
  760. parent_context = None
  761. else:
  762. assert isinstance(curr_context, LoggingContext)
  763. parent_context = curr_context
  764. def g() -> R:
  765. with LoggingContext(str(curr_context), parent_context=parent_context):
  766. return f(*args, **kwargs)
  767. return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g))