You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

478 lines
15 KiB

  1. # Copyright 2015, 2016 OpenMarket Ltd
  2. # Copyright 2022 The Matrix.org Foundation C.I.C.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import itertools
  16. import logging
  17. import os
  18. import platform
  19. import threading
  20. from typing import (
  21. Callable,
  22. Dict,
  23. Generic,
  24. Iterable,
  25. Mapping,
  26. Optional,
  27. Sequence,
  28. Set,
  29. Tuple,
  30. Type,
  31. TypeVar,
  32. Union,
  33. cast,
  34. )
  35. import attr
  36. from prometheus_client import CollectorRegistry, Counter, Gauge, Histogram, Metric
  37. from prometheus_client.core import (
  38. REGISTRY,
  39. GaugeHistogramMetricFamily,
  40. GaugeMetricFamily,
  41. )
  42. from twisted.python.threadpool import ThreadPool
  43. # This module is imported for its side effects; flake8 needn't warn that it's unused.
  44. import synapse.metrics._reactor_metrics # noqa: F401
  45. from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager
  46. from synapse.metrics._twisted_exposition import MetricsResource, generate_latest
  47. from synapse.metrics._types import Collector
  48. from synapse.util import SYNAPSE_VERSION
  49. logger = logging.getLogger(__name__)
  50. METRICS_PREFIX = "/_synapse/metrics"
  51. all_gauges: Dict[str, Collector] = {}
  52. HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
  53. class _RegistryProxy:
  54. @staticmethod
  55. def collect() -> Iterable[Metric]:
  56. for metric in REGISTRY.collect():
  57. if not metric.name.startswith("__"):
  58. yield metric
  59. # A little bit nasty, but collect() above is static so a Protocol doesn't work.
  60. # _RegistryProxy matches the signature of a CollectorRegistry instance enough
  61. # for it to be usable in the contexts in which we use it.
  62. # TODO Do something nicer about this.
  63. RegistryProxy = cast(CollectorRegistry, _RegistryProxy)
  64. @attr.s(slots=True, hash=True, auto_attribs=True)
  65. class LaterGauge(Collector):
  66. name: str
  67. desc: str
  68. labels: Optional[Sequence[str]] = attr.ib(hash=False)
  69. # callback: should either return a value (if there are no labels for this metric),
  70. # or dict mapping from a label tuple to a value
  71. caller: Callable[
  72. [], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]]
  73. ]
  74. def collect(self) -> Iterable[Metric]:
  75. g = GaugeMetricFamily(self.name, self.desc, labels=self.labels)
  76. try:
  77. calls = self.caller()
  78. except Exception:
  79. logger.exception("Exception running callback for LaterGauge(%s)", self.name)
  80. yield g
  81. return
  82. if isinstance(calls, (int, float)):
  83. g.add_metric([], calls)
  84. else:
  85. for k, v in calls.items():
  86. g.add_metric(k, v)
  87. yield g
  88. def __attrs_post_init__(self) -> None:
  89. self._register()
  90. def _register(self) -> None:
  91. if self.name in all_gauges.keys():
  92. logger.warning("%s already registered, reregistering" % (self.name,))
  93. REGISTRY.unregister(all_gauges.pop(self.name))
  94. REGISTRY.register(self)
  95. all_gauges[self.name] = self
  96. # `MetricsEntry` only makes sense when it is a `Protocol`,
  97. # but `Protocol` can't be used as a `TypeVar` bound.
  98. MetricsEntry = TypeVar("MetricsEntry")
  99. class InFlightGauge(Generic[MetricsEntry], Collector):
  100. """Tracks number of things (e.g. requests, Measure blocks, etc) in flight
  101. at any given time.
  102. Each InFlightGauge will create a metric called `<name>_total` that counts
  103. the number of in flight blocks, as well as a metrics for each item in the
  104. given `sub_metrics` as `<name>_<sub_metric>` which will get updated by the
  105. callbacks.
  106. Args:
  107. name
  108. desc
  109. labels
  110. sub_metrics: A list of sub metrics that the callbacks will update.
  111. """
  112. def __init__(
  113. self,
  114. name: str,
  115. desc: str,
  116. labels: Sequence[str],
  117. sub_metrics: Sequence[str],
  118. ):
  119. self.name = name
  120. self.desc = desc
  121. self.labels = labels
  122. self.sub_metrics = sub_metrics
  123. # Create a class which have the sub_metrics values as attributes, which
  124. # default to 0 on initialization. Used to pass to registered callbacks.
  125. self._metrics_class: Type[MetricsEntry] = attr.make_class(
  126. "_MetricsEntry",
  127. attrs={x: attr.ib(default=0) for x in sub_metrics},
  128. slots=True,
  129. )
  130. # Counts number of in flight blocks for a given set of label values
  131. self._registrations: Dict[
  132. Tuple[str, ...], Set[Callable[[MetricsEntry], None]]
  133. ] = {}
  134. # Protects access to _registrations
  135. self._lock = threading.Lock()
  136. self._register_with_collector()
  137. def register(
  138. self,
  139. key: Tuple[str, ...],
  140. callback: Callable[[MetricsEntry], None],
  141. ) -> None:
  142. """Registers that we've entered a new block with labels `key`.
  143. `callback` gets called each time the metrics are collected. The same
  144. value must also be given to `unregister`.
  145. `callback` gets called with an object that has an attribute per
  146. sub_metric, which should be updated with the necessary values. Note that
  147. the metrics object is shared between all callbacks registered with the
  148. same key.
  149. Note that `callback` may be called on a separate thread.
  150. """
  151. with self._lock:
  152. self._registrations.setdefault(key, set()).add(callback)
  153. def unregister(
  154. self,
  155. key: Tuple[str, ...],
  156. callback: Callable[[MetricsEntry], None],
  157. ) -> None:
  158. """Registers that we've exited a block with labels `key`."""
  159. with self._lock:
  160. self._registrations.setdefault(key, set()).discard(callback)
  161. def collect(self) -> Iterable[Metric]:
  162. """Called by prometheus client when it reads metrics.
  163. Note: may be called by a separate thread.
  164. """
  165. in_flight = GaugeMetricFamily(
  166. self.name + "_total", self.desc, labels=self.labels
  167. )
  168. metrics_by_key = {}
  169. # We copy so that we don't mutate the list while iterating
  170. with self._lock:
  171. keys = list(self._registrations)
  172. for key in keys:
  173. with self._lock:
  174. callbacks = set(self._registrations[key])
  175. in_flight.add_metric(key, len(callbacks))
  176. metrics = self._metrics_class()
  177. metrics_by_key[key] = metrics
  178. for callback in callbacks:
  179. callback(metrics)
  180. yield in_flight
  181. for name in self.sub_metrics:
  182. gauge = GaugeMetricFamily(
  183. "_".join([self.name, name]), "", labels=self.labels
  184. )
  185. for key, metrics in metrics_by_key.items():
  186. gauge.add_metric(key, getattr(metrics, name))
  187. yield gauge
  188. def _register_with_collector(self) -> None:
  189. if self.name in all_gauges.keys():
  190. logger.warning("%s already registered, reregistering" % (self.name,))
  191. REGISTRY.unregister(all_gauges.pop(self.name))
  192. REGISTRY.register(self)
  193. all_gauges[self.name] = self
  194. class GaugeBucketCollector(Collector):
  195. """Like a Histogram, but the buckets are Gauges which are updated atomically.
  196. The data is updated by calling `update_data` with an iterable of measurements.
  197. We assume that the data is updated less frequently than it is reported to
  198. Prometheus, and optimise for that case.
  199. """
  200. __slots__ = (
  201. "_name",
  202. "_documentation",
  203. "_bucket_bounds",
  204. "_metric",
  205. )
  206. def __init__(
  207. self,
  208. name: str,
  209. documentation: str,
  210. buckets: Iterable[float],
  211. registry: CollectorRegistry = REGISTRY,
  212. ):
  213. """
  214. Args:
  215. name: base name of metric to be exported to Prometheus. (a _bucket suffix
  216. will be added.)
  217. documentation: help text for the metric
  218. buckets: The top bounds of the buckets to report
  219. registry: metric registry to register with
  220. """
  221. self._name = name
  222. self._documentation = documentation
  223. # the tops of the buckets
  224. self._bucket_bounds = [float(b) for b in buckets]
  225. if self._bucket_bounds != sorted(self._bucket_bounds):
  226. raise ValueError("Buckets not in sorted order")
  227. if self._bucket_bounds[-1] != float("inf"):
  228. self._bucket_bounds.append(float("inf"))
  229. # We initially set this to None. We won't report metrics until
  230. # this has been initialised after a successful data update
  231. self._metric: Optional[GaugeHistogramMetricFamily] = None
  232. registry.register(self)
  233. def collect(self) -> Iterable[Metric]:
  234. # Don't report metrics unless we've already collected some data
  235. if self._metric is not None:
  236. yield self._metric
  237. def update_data(self, values: Iterable[float]) -> None:
  238. """Update the data to be reported by the metric
  239. The existing data is cleared, and each measurement in the input is assigned
  240. to the relevant bucket.
  241. """
  242. self._metric = self._values_to_metric(values)
  243. def _values_to_metric(self, values: Iterable[float]) -> GaugeHistogramMetricFamily:
  244. total = 0.0
  245. bucket_values = [0 for _ in self._bucket_bounds]
  246. for v in values:
  247. # assign each value to a bucket
  248. for i, bound in enumerate(self._bucket_bounds):
  249. if v <= bound:
  250. bucket_values[i] += 1
  251. break
  252. # ... and increment the sum
  253. total += v
  254. # now, aggregate the bucket values so that they count the number of entries in
  255. # that bucket or below.
  256. accumulated_values = itertools.accumulate(bucket_values)
  257. return GaugeHistogramMetricFamily(
  258. self._name,
  259. self._documentation,
  260. buckets=list(
  261. zip((str(b) for b in self._bucket_bounds), accumulated_values)
  262. ),
  263. gsum_value=total,
  264. )
  265. #
  266. # Detailed CPU metrics
  267. #
  268. class CPUMetrics(Collector):
  269. def __init__(self) -> None:
  270. ticks_per_sec = 100
  271. try:
  272. # Try and get the system config
  273. ticks_per_sec = os.sysconf("SC_CLK_TCK")
  274. except (ValueError, TypeError, AttributeError):
  275. pass
  276. self.ticks_per_sec = ticks_per_sec
  277. def collect(self) -> Iterable[Metric]:
  278. if not HAVE_PROC_SELF_STAT:
  279. return
  280. with open("/proc/self/stat") as s:
  281. line = s.read()
  282. raw_stats = line.split(") ", 1)[1].split(" ")
  283. user = GaugeMetricFamily("process_cpu_user_seconds_total", "")
  284. user.add_metric([], float(raw_stats[11]) / self.ticks_per_sec)
  285. yield user
  286. sys = GaugeMetricFamily("process_cpu_system_seconds_total", "")
  287. sys.add_metric([], float(raw_stats[12]) / self.ticks_per_sec)
  288. yield sys
  289. REGISTRY.register(CPUMetrics())
  290. #
  291. # Federation Metrics
  292. #
  293. sent_transactions_counter = Counter("synapse_federation_client_sent_transactions", "")
  294. events_processed_counter = Counter("synapse_federation_client_events_processed", "")
  295. event_processing_loop_counter = Counter(
  296. "synapse_event_processing_loop_count", "Event processing loop iterations", ["name"]
  297. )
  298. event_processing_loop_room_count = Counter(
  299. "synapse_event_processing_loop_room_count",
  300. "Rooms seen per event processing loop iteration",
  301. ["name"],
  302. )
  303. # Used to track where various components have processed in the event stream,
  304. # e.g. federation sending, appservice sending, etc.
  305. event_processing_positions = Gauge("synapse_event_processing_positions", "", ["name"])
  306. # Used to track the current max events stream position
  307. event_persisted_position = Gauge("synapse_event_persisted_position", "")
  308. # Used to track the received_ts of the last event processed by various
  309. # components
  310. event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name"])
  311. # Used to track the lag processing events. This is the time difference
  312. # between the last processed event's received_ts and the time it was
  313. # finished being processed.
  314. event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"])
  315. event_processing_lag_by_event = Histogram(
  316. "synapse_event_processing_lag_by_event",
  317. "Time between an event being persisted and it being queued up to be sent to the relevant remote servers",
  318. ["name"],
  319. )
  320. # Build info of the running server.
  321. build_info = Gauge(
  322. "synapse_build_info", "Build information", ["pythonversion", "version", "osversion"]
  323. )
  324. build_info.labels(
  325. " ".join([platform.python_implementation(), platform.python_version()]),
  326. SYNAPSE_VERSION,
  327. " ".join([platform.system(), platform.release()]),
  328. ).set(1)
  329. # 3PID send info
  330. threepid_send_requests = Histogram(
  331. "synapse_threepid_send_requests_with_tries",
  332. documentation="Number of requests for a 3pid token by try count. Note if"
  333. " there is a request with try count of 4, then there would have been one"
  334. " each for 1, 2 and 3",
  335. buckets=(1, 2, 3, 4, 5, 10),
  336. labelnames=("type", "reason"),
  337. )
  338. threadpool_total_threads = Gauge(
  339. "synapse_threadpool_total_threads",
  340. "Total number of threads currently in the threadpool",
  341. ["name"],
  342. )
  343. threadpool_total_working_threads = Gauge(
  344. "synapse_threadpool_working_threads",
  345. "Number of threads currently working in the threadpool",
  346. ["name"],
  347. )
  348. threadpool_total_min_threads = Gauge(
  349. "synapse_threadpool_min_threads",
  350. "Minimum number of threads configured in the threadpool",
  351. ["name"],
  352. )
  353. threadpool_total_max_threads = Gauge(
  354. "synapse_threadpool_max_threads",
  355. "Maximum number of threads configured in the threadpool",
  356. ["name"],
  357. )
  358. def register_threadpool(name: str, threadpool: ThreadPool) -> None:
  359. """Add metrics for the threadpool."""
  360. threadpool_total_min_threads.labels(name).set(threadpool.min)
  361. threadpool_total_max_threads.labels(name).set(threadpool.max)
  362. threadpool_total_threads.labels(name).set_function(lambda: len(threadpool.threads))
  363. threadpool_total_working_threads.labels(name).set_function(
  364. lambda: len(threadpool.working)
  365. )
  366. __all__ = [
  367. "Collector",
  368. "MetricsResource",
  369. "generate_latest",
  370. "LaterGauge",
  371. "InFlightGauge",
  372. "GaugeBucketCollector",
  373. "MIN_TIME_BETWEEN_GCS",
  374. "install_gc_manager",
  375. ]