Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.
 
 
 
 
 
 

377 рядки
13 KiB

  1. # Copyright 2018 New Vector Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import logging
  15. import threading
  16. from contextlib import nullcontext
  17. from functools import wraps
  18. from types import TracebackType
  19. from typing import (
  20. TYPE_CHECKING,
  21. Any,
  22. Awaitable,
  23. Callable,
  24. Dict,
  25. Iterable,
  26. Optional,
  27. Set,
  28. Type,
  29. TypeVar,
  30. Union,
  31. )
  32. from prometheus_client import Metric
  33. from prometheus_client.core import REGISTRY, Counter, Gauge
  34. from typing_extensions import ParamSpec
  35. from twisted.internet import defer
  36. from synapse.logging.context import (
  37. ContextResourceUsage,
  38. LoggingContext,
  39. PreserveLoggingContext,
  40. )
  41. from synapse.logging.opentracing import SynapseTags, start_active_span
  42. from synapse.metrics._types import Collector
  43. if TYPE_CHECKING:
  44. import resource
  45. # Old versions don't have `LiteralString`
  46. from typing_extensions import LiteralString
  47. logger = logging.getLogger(__name__)
  48. _background_process_start_count = Counter(
  49. "synapse_background_process_start_count",
  50. "Number of background processes started",
  51. ["name"],
  52. )
  53. _background_process_in_flight_count = Gauge(
  54. "synapse_background_process_in_flight_count",
  55. "Number of background processes in flight",
  56. labelnames=["name"],
  57. )
  58. # we set registry=None in all of these to stop them getting registered with
  59. # the default registry. Instead we collect them all via the CustomCollector,
  60. # which ensures that we can update them before they are collected.
  61. #
  62. _background_process_ru_utime = Counter(
  63. "synapse_background_process_ru_utime_seconds",
  64. "User CPU time used by background processes, in seconds",
  65. ["name"],
  66. registry=None,
  67. )
  68. _background_process_ru_stime = Counter(
  69. "synapse_background_process_ru_stime_seconds",
  70. "System CPU time used by background processes, in seconds",
  71. ["name"],
  72. registry=None,
  73. )
  74. _background_process_db_txn_count = Counter(
  75. "synapse_background_process_db_txn_count",
  76. "Number of database transactions done by background processes",
  77. ["name"],
  78. registry=None,
  79. )
  80. _background_process_db_txn_duration = Counter(
  81. "synapse_background_process_db_txn_duration_seconds",
  82. (
  83. "Seconds spent by background processes waiting for database "
  84. "transactions, excluding scheduling time"
  85. ),
  86. ["name"],
  87. registry=None,
  88. )
  89. _background_process_db_sched_duration = Counter(
  90. "synapse_background_process_db_sched_duration_seconds",
  91. "Seconds spent by background processes waiting for database connections",
  92. ["name"],
  93. registry=None,
  94. )
  95. # map from description to a counter, so that we can name our logcontexts
  96. # incrementally. (It actually duplicates _background_process_start_count, but
  97. # it's much simpler to do so than to try to combine them.)
  98. _background_process_counts: Dict[str, int] = {}
  99. # Set of all running background processes that became active active since the
  100. # last time metrics were scraped (i.e. background processes that performed some
  101. # work since the last scrape.)
  102. #
  103. # We do it like this to handle the case where we have a large number of
  104. # background processes stacking up behind a lock or linearizer, where we then
  105. # only need to iterate over and update metrics for the process that have
  106. # actually been active and can ignore the idle ones.
  107. _background_processes_active_since_last_scrape: "Set[_BackgroundProcess]" = set()
  108. # A lock that covers the above set and dict
  109. _bg_metrics_lock = threading.Lock()
  110. class _Collector(Collector):
  111. """A custom metrics collector for the background process metrics.
  112. Ensures that all of the metrics are up-to-date with any in-flight processes
  113. before they are returned.
  114. """
  115. def collect(self) -> Iterable[Metric]:
  116. global _background_processes_active_since_last_scrape
  117. # We swap out the _background_processes set with an empty one so that
  118. # we can safely iterate over the set without holding the lock.
  119. with _bg_metrics_lock:
  120. _background_processes_copy = _background_processes_active_since_last_scrape
  121. _background_processes_active_since_last_scrape = set()
  122. for process in _background_processes_copy:
  123. process.update_metrics()
  124. # now we need to run collect() over each of the static Counters, and
  125. # yield each metric they return.
  126. for m in (
  127. _background_process_ru_utime,
  128. _background_process_ru_stime,
  129. _background_process_db_txn_count,
  130. _background_process_db_txn_duration,
  131. _background_process_db_sched_duration,
  132. ):
  133. yield from m.collect()
  134. REGISTRY.register(_Collector())
  135. class _BackgroundProcess:
  136. def __init__(self, desc: str, ctx: LoggingContext):
  137. self.desc = desc
  138. self._context = ctx
  139. self._reported_stats: Optional[ContextResourceUsage] = None
  140. def update_metrics(self) -> None:
  141. """Updates the metrics with values from this process."""
  142. new_stats = self._context.get_resource_usage()
  143. if self._reported_stats is None:
  144. diff = new_stats
  145. else:
  146. diff = new_stats - self._reported_stats
  147. self._reported_stats = new_stats
  148. # For unknown reasons, the difference in times can be negative. See comment in
  149. # synapse.http.request_metrics.RequestMetrics.update_metrics.
  150. _background_process_ru_utime.labels(self.desc).inc(max(diff.ru_utime, 0))
  151. _background_process_ru_stime.labels(self.desc).inc(max(diff.ru_stime, 0))
  152. _background_process_db_txn_count.labels(self.desc).inc(diff.db_txn_count)
  153. _background_process_db_txn_duration.labels(self.desc).inc(
  154. diff.db_txn_duration_sec
  155. )
  156. _background_process_db_sched_duration.labels(self.desc).inc(
  157. diff.db_sched_duration_sec
  158. )
  159. R = TypeVar("R")
  160. def run_as_background_process(
  161. desc: "LiteralString",
  162. func: Callable[..., Awaitable[Optional[R]]],
  163. *args: Any,
  164. bg_start_span: bool = True,
  165. **kwargs: Any,
  166. ) -> "defer.Deferred[Optional[R]]":
  167. """Run the given function in its own logcontext, with resource metrics
  168. This should be used to wrap processes which are fired off to run in the
  169. background, instead of being associated with a particular request.
  170. It returns a Deferred which completes when the function completes, but it doesn't
  171. follow the synapse logcontext rules, which makes it appropriate for passing to
  172. clock.looping_call and friends (or for firing-and-forgetting in the middle of a
  173. normal synapse async function).
  174. Args:
  175. desc: a description for this background process type
  176. func: a function, which may return a Deferred or a coroutine
  177. bg_start_span: Whether to start an opentracing span. Defaults to True.
  178. Should only be disabled for processes that will not log to or tag
  179. a span.
  180. args: positional args for func
  181. kwargs: keyword args for func
  182. Returns:
  183. Deferred which returns the result of func, or `None` if func raises.
  184. Note that the returned Deferred does not follow the synapse logcontext
  185. rules.
  186. """
  187. async def run() -> Optional[R]:
  188. with _bg_metrics_lock:
  189. count = _background_process_counts.get(desc, 0)
  190. _background_process_counts[desc] = count + 1
  191. _background_process_start_count.labels(desc).inc()
  192. _background_process_in_flight_count.labels(desc).inc()
  193. with BackgroundProcessLoggingContext(desc, count) as context:
  194. try:
  195. if bg_start_span:
  196. ctx = start_active_span(
  197. f"bgproc.{desc}", tags={SynapseTags.REQUEST_ID: str(context)}
  198. )
  199. else:
  200. ctx = nullcontext() # type: ignore[assignment]
  201. with ctx:
  202. return await func(*args, **kwargs)
  203. except Exception:
  204. logger.exception(
  205. "Background process '%s' threw an exception",
  206. desc,
  207. )
  208. return None
  209. finally:
  210. _background_process_in_flight_count.labels(desc).dec()
  211. with PreserveLoggingContext():
  212. # Note that we return a Deferred here so that it can be used in a
  213. # looping_call and other places that expect a Deferred.
  214. return defer.ensureDeferred(run())
  215. P = ParamSpec("P")
  216. def wrap_as_background_process(
  217. desc: "LiteralString",
  218. ) -> Callable[
  219. [Callable[P, Awaitable[Optional[R]]]],
  220. Callable[P, "defer.Deferred[Optional[R]]"],
  221. ]:
  222. """Decorator that wraps an asynchronous function `func`, returning a synchronous
  223. decorated function. Calling the decorated version runs `func` as a background
  224. process, forwarding all arguments verbatim.
  225. That is,
  226. @wrap_as_background_process
  227. def func(*args): ...
  228. func(1, 2, third=3)
  229. is equivalent to:
  230. def func(*args): ...
  231. run_as_background_process(func, 1, 2, third=3)
  232. The former can be convenient if `func` needs to be run as a background process in
  233. multiple places.
  234. """
  235. def wrap_as_background_process_inner(
  236. func: Callable[P, Awaitable[Optional[R]]]
  237. ) -> Callable[P, "defer.Deferred[Optional[R]]"]:
  238. @wraps(func)
  239. def wrap_as_background_process_inner_2(
  240. *args: P.args, **kwargs: P.kwargs
  241. ) -> "defer.Deferred[Optional[R]]":
  242. # type-ignore: mypy is confusing kwargs with the bg_start_span kwarg.
  243. # Argument 4 to "run_as_background_process" has incompatible type
  244. # "**P.kwargs"; expected "bool"
  245. # See https://github.com/python/mypy/issues/8862
  246. return run_as_background_process(desc, func, *args, **kwargs) # type: ignore[arg-type]
  247. return wrap_as_background_process_inner_2
  248. return wrap_as_background_process_inner
  249. class BackgroundProcessLoggingContext(LoggingContext):
  250. """A logging context that tracks in flight metrics for background
  251. processes.
  252. """
  253. __slots__ = ["_proc"]
  254. def __init__(self, name: str, instance_id: Optional[Union[int, str]] = None):
  255. """
  256. Args:
  257. name: The name of the background process. Each distinct `name` gets a
  258. separate prometheus time series.
  259. instance_id: an identifer to add to `name` to distinguish this instance of
  260. the named background process in the logs. If this is `None`, one is
  261. made up based on id(self).
  262. """
  263. if instance_id is None:
  264. instance_id = id(self)
  265. super().__init__("%s-%s" % (name, instance_id))
  266. self._proc: Optional[_BackgroundProcess] = _BackgroundProcess(name, self)
  267. def start(self, rusage: "Optional[resource.struct_rusage]") -> None:
  268. """Log context has started running (again)."""
  269. super().start(rusage)
  270. if self._proc is None:
  271. logger.error(
  272. "Background process re-entered without a proc: %s",
  273. self.name,
  274. stack_info=True,
  275. )
  276. return
  277. # We've become active again so we make sure we're in the list of active
  278. # procs. (Note that "start" here means we've become active, as opposed
  279. # to starting for the first time.)
  280. with _bg_metrics_lock:
  281. _background_processes_active_since_last_scrape.add(self._proc)
  282. def __exit__(
  283. self,
  284. type: Optional[Type[BaseException]],
  285. value: Optional[BaseException],
  286. traceback: Optional[TracebackType],
  287. ) -> None:
  288. """Log context has finished."""
  289. super().__exit__(type, value, traceback)
  290. if self._proc is None:
  291. logger.error(
  292. "Background process exited without a proc: %s",
  293. self.name,
  294. stack_info=True,
  295. )
  296. return
  297. # The background process has finished. We explicitly remove and manually
  298. # update the metrics here so that if nothing is scraping metrics the set
  299. # doesn't infinitely grow.
  300. with _bg_metrics_lock:
  301. _background_processes_active_since_last_scrape.discard(self._proc)
  302. self._proc.update_metrics()
  303. # Set proc to None to break the reference cycle.
  304. self._proc = None