Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.
 
 
 
 
 
 

480 rindas
15 KiB

  1. # Copyright 2015, 2016 OpenMarket Ltd
  2. # Copyright 2022 The Matrix.org Foundation C.I.C.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import itertools
  16. import logging
  17. import os
  18. import platform
  19. import threading
  20. from typing import (
  21. Callable,
  22. Dict,
  23. Generic,
  24. Iterable,
  25. Mapping,
  26. Optional,
  27. Set,
  28. Tuple,
  29. Type,
  30. TypeVar,
  31. Union,
  32. cast,
  33. )
  34. import attr
  35. from prometheus_client import CollectorRegistry, Counter, Gauge, Histogram, Metric
  36. from prometheus_client.core import (
  37. REGISTRY,
  38. GaugeHistogramMetricFamily,
  39. GaugeMetricFamily,
  40. )
  41. from twisted.python.threadpool import ThreadPool
  42. # This module is imported for its side effects; flake8 needn't warn that it's unused.
  43. import synapse.metrics._reactor_metrics # noqa: F401
  44. from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager
  45. from synapse.metrics._twisted_exposition import MetricsResource, generate_latest
  46. from synapse.metrics._types import Collector
  47. from synapse.types import StrSequence
  48. from synapse.util import SYNAPSE_VERSION
  49. logger = logging.getLogger(__name__)
  50. METRICS_PREFIX = "/_synapse/metrics"
  51. all_gauges: Dict[str, Collector] = {}
  52. HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
  53. class _RegistryProxy:
  54. @staticmethod
  55. def collect() -> Iterable[Metric]:
  56. for metric in REGISTRY.collect():
  57. if not metric.name.startswith("__"):
  58. yield metric
  59. # A little bit nasty, but collect() above is static so a Protocol doesn't work.
  60. # _RegistryProxy matches the signature of a CollectorRegistry instance enough
  61. # for it to be usable in the contexts in which we use it.
  62. # TODO Do something nicer about this.
  63. RegistryProxy = cast(CollectorRegistry, _RegistryProxy)
  64. @attr.s(slots=True, hash=True, auto_attribs=True)
  65. class LaterGauge(Collector):
  66. """A Gauge which periodically calls a user-provided callback to produce metrics."""
  67. name: str
  68. desc: str
  69. labels: Optional[StrSequence] = attr.ib(hash=False)
  70. # callback: should either return a value (if there are no labels for this metric),
  71. # or dict mapping from a label tuple to a value
  72. caller: Callable[
  73. [], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]]
  74. ]
  75. def collect(self) -> Iterable[Metric]:
  76. g = GaugeMetricFamily(self.name, self.desc, labels=self.labels)
  77. try:
  78. calls = self.caller()
  79. except Exception:
  80. logger.exception("Exception running callback for LaterGauge(%s)", self.name)
  81. yield g
  82. return
  83. if isinstance(calls, (int, float)):
  84. g.add_metric([], calls)
  85. else:
  86. for k, v in calls.items():
  87. g.add_metric(k, v)
  88. yield g
  89. def __attrs_post_init__(self) -> None:
  90. self._register()
  91. def _register(self) -> None:
  92. if self.name in all_gauges.keys():
  93. logger.warning("%s already registered, reregistering" % (self.name,))
  94. REGISTRY.unregister(all_gauges.pop(self.name))
  95. REGISTRY.register(self)
  96. all_gauges[self.name] = self
  97. # `MetricsEntry` only makes sense when it is a `Protocol`,
  98. # but `Protocol` can't be used as a `TypeVar` bound.
  99. MetricsEntry = TypeVar("MetricsEntry")
  100. class InFlightGauge(Generic[MetricsEntry], Collector):
  101. """Tracks number of things (e.g. requests, Measure blocks, etc) in flight
  102. at any given time.
  103. Each InFlightGauge will create a metric called `<name>_total` that counts
  104. the number of in flight blocks, as well as a metrics for each item in the
  105. given `sub_metrics` as `<name>_<sub_metric>` which will get updated by the
  106. callbacks.
  107. Args:
  108. name
  109. desc
  110. labels
  111. sub_metrics: A list of sub metrics that the callbacks will update.
  112. """
  113. def __init__(
  114. self,
  115. name: str,
  116. desc: str,
  117. labels: StrSequence,
  118. sub_metrics: StrSequence,
  119. ):
  120. self.name = name
  121. self.desc = desc
  122. self.labels = labels
  123. self.sub_metrics = sub_metrics
  124. # Create a class which have the sub_metrics values as attributes, which
  125. # default to 0 on initialization. Used to pass to registered callbacks.
  126. self._metrics_class: Type[MetricsEntry] = attr.make_class(
  127. "_MetricsEntry",
  128. attrs={x: attr.ib(default=0) for x in sub_metrics},
  129. slots=True,
  130. )
  131. # Counts number of in flight blocks for a given set of label values
  132. self._registrations: Dict[
  133. Tuple[str, ...], Set[Callable[[MetricsEntry], None]]
  134. ] = {}
  135. # Protects access to _registrations
  136. self._lock = threading.Lock()
  137. self._register_with_collector()
  138. def register(
  139. self,
  140. key: Tuple[str, ...],
  141. callback: Callable[[MetricsEntry], None],
  142. ) -> None:
  143. """Registers that we've entered a new block with labels `key`.
  144. `callback` gets called each time the metrics are collected. The same
  145. value must also be given to `unregister`.
  146. `callback` gets called with an object that has an attribute per
  147. sub_metric, which should be updated with the necessary values. Note that
  148. the metrics object is shared between all callbacks registered with the
  149. same key.
  150. Note that `callback` may be called on a separate thread.
  151. """
  152. with self._lock:
  153. self._registrations.setdefault(key, set()).add(callback)
  154. def unregister(
  155. self,
  156. key: Tuple[str, ...],
  157. callback: Callable[[MetricsEntry], None],
  158. ) -> None:
  159. """Registers that we've exited a block with labels `key`."""
  160. with self._lock:
  161. self._registrations.setdefault(key, set()).discard(callback)
  162. def collect(self) -> Iterable[Metric]:
  163. """Called by prometheus client when it reads metrics.
  164. Note: may be called by a separate thread.
  165. """
  166. in_flight = GaugeMetricFamily(
  167. self.name + "_total", self.desc, labels=self.labels
  168. )
  169. metrics_by_key = {}
  170. # We copy so that we don't mutate the list while iterating
  171. with self._lock:
  172. keys = list(self._registrations)
  173. for key in keys:
  174. with self._lock:
  175. callbacks = set(self._registrations[key])
  176. in_flight.add_metric(key, len(callbacks))
  177. metrics = self._metrics_class()
  178. metrics_by_key[key] = metrics
  179. for callback in callbacks:
  180. callback(metrics)
  181. yield in_flight
  182. for name in self.sub_metrics:
  183. gauge = GaugeMetricFamily(
  184. "_".join([self.name, name]), "", labels=self.labels
  185. )
  186. for key, metrics in metrics_by_key.items():
  187. gauge.add_metric(key, getattr(metrics, name))
  188. yield gauge
  189. def _register_with_collector(self) -> None:
  190. if self.name in all_gauges.keys():
  191. logger.warning("%s already registered, reregistering" % (self.name,))
  192. REGISTRY.unregister(all_gauges.pop(self.name))
  193. REGISTRY.register(self)
  194. all_gauges[self.name] = self
  195. class GaugeBucketCollector(Collector):
  196. """Like a Histogram, but the buckets are Gauges which are updated atomically.
  197. The data is updated by calling `update_data` with an iterable of measurements.
  198. We assume that the data is updated less frequently than it is reported to
  199. Prometheus, and optimise for that case.
  200. """
  201. __slots__ = (
  202. "_name",
  203. "_documentation",
  204. "_bucket_bounds",
  205. "_metric",
  206. )
  207. def __init__(
  208. self,
  209. name: str,
  210. documentation: str,
  211. buckets: Iterable[float],
  212. registry: CollectorRegistry = REGISTRY,
  213. ):
  214. """
  215. Args:
  216. name: base name of metric to be exported to Prometheus. (a _bucket suffix
  217. will be added.)
  218. documentation: help text for the metric
  219. buckets: The top bounds of the buckets to report
  220. registry: metric registry to register with
  221. """
  222. self._name = name
  223. self._documentation = documentation
  224. # the tops of the buckets
  225. self._bucket_bounds = [float(b) for b in buckets]
  226. if self._bucket_bounds != sorted(self._bucket_bounds):
  227. raise ValueError("Buckets not in sorted order")
  228. if self._bucket_bounds[-1] != float("inf"):
  229. self._bucket_bounds.append(float("inf"))
  230. # We initially set this to None. We won't report metrics until
  231. # this has been initialised after a successful data update
  232. self._metric: Optional[GaugeHistogramMetricFamily] = None
  233. registry.register(self)
  234. def collect(self) -> Iterable[Metric]:
  235. # Don't report metrics unless we've already collected some data
  236. if self._metric is not None:
  237. yield self._metric
  238. def update_data(self, values: Iterable[float]) -> None:
  239. """Update the data to be reported by the metric
  240. The existing data is cleared, and each measurement in the input is assigned
  241. to the relevant bucket.
  242. """
  243. self._metric = self._values_to_metric(values)
  244. def _values_to_metric(self, values: Iterable[float]) -> GaugeHistogramMetricFamily:
  245. total = 0.0
  246. bucket_values = [0 for _ in self._bucket_bounds]
  247. for v in values:
  248. # assign each value to a bucket
  249. for i, bound in enumerate(self._bucket_bounds):
  250. if v <= bound:
  251. bucket_values[i] += 1
  252. break
  253. # ... and increment the sum
  254. total += v
  255. # now, aggregate the bucket values so that they count the number of entries in
  256. # that bucket or below.
  257. accumulated_values = itertools.accumulate(bucket_values)
  258. return GaugeHistogramMetricFamily(
  259. self._name,
  260. self._documentation,
  261. buckets=list(
  262. zip((str(b) for b in self._bucket_bounds), accumulated_values)
  263. ),
  264. gsum_value=total,
  265. )
  266. #
  267. # Detailed CPU metrics
  268. #
  269. class CPUMetrics(Collector):
  270. def __init__(self) -> None:
  271. ticks_per_sec = 100
  272. try:
  273. # Try and get the system config
  274. ticks_per_sec = os.sysconf("SC_CLK_TCK")
  275. except (ValueError, TypeError, AttributeError):
  276. pass
  277. self.ticks_per_sec = ticks_per_sec
  278. def collect(self) -> Iterable[Metric]:
  279. if not HAVE_PROC_SELF_STAT:
  280. return
  281. with open("/proc/self/stat") as s:
  282. line = s.read()
  283. raw_stats = line.split(") ", 1)[1].split(" ")
  284. user = GaugeMetricFamily("process_cpu_user_seconds_total", "")
  285. user.add_metric([], float(raw_stats[11]) / self.ticks_per_sec)
  286. yield user
  287. sys = GaugeMetricFamily("process_cpu_system_seconds_total", "")
  288. sys.add_metric([], float(raw_stats[12]) / self.ticks_per_sec)
  289. yield sys
  290. REGISTRY.register(CPUMetrics())
  291. #
  292. # Federation Metrics
  293. #
  294. sent_transactions_counter = Counter("synapse_federation_client_sent_transactions", "")
  295. events_processed_counter = Counter("synapse_federation_client_events_processed", "")
  296. event_processing_loop_counter = Counter(
  297. "synapse_event_processing_loop_count", "Event processing loop iterations", ["name"]
  298. )
  299. event_processing_loop_room_count = Counter(
  300. "synapse_event_processing_loop_room_count",
  301. "Rooms seen per event processing loop iteration",
  302. ["name"],
  303. )
  304. # Used to track where various components have processed in the event stream,
  305. # e.g. federation sending, appservice sending, etc.
  306. event_processing_positions = Gauge("synapse_event_processing_positions", "", ["name"])
  307. # Used to track the current max events stream position
  308. event_persisted_position = Gauge("synapse_event_persisted_position", "")
  309. # Used to track the received_ts of the last event processed by various
  310. # components
  311. event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name"])
  312. # Used to track the lag processing events. This is the time difference
  313. # between the last processed event's received_ts and the time it was
  314. # finished being processed.
  315. event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"])
  316. event_processing_lag_by_event = Histogram(
  317. "synapse_event_processing_lag_by_event",
  318. "Time between an event being persisted and it being queued up to be sent to the relevant remote servers",
  319. ["name"],
  320. )
  321. # Build info of the running server.
  322. build_info = Gauge(
  323. "synapse_build_info", "Build information", ["pythonversion", "version", "osversion"]
  324. )
  325. build_info.labels(
  326. " ".join([platform.python_implementation(), platform.python_version()]),
  327. SYNAPSE_VERSION,
  328. " ".join([platform.system(), platform.release()]),
  329. ).set(1)
  330. # 3PID send info
  331. threepid_send_requests = Histogram(
  332. "synapse_threepid_send_requests_with_tries",
  333. documentation="Number of requests for a 3pid token by try count. Note if"
  334. " there is a request with try count of 4, then there would have been one"
  335. " each for 1, 2 and 3",
  336. buckets=(1, 2, 3, 4, 5, 10),
  337. labelnames=("type", "reason"),
  338. )
  339. threadpool_total_threads = Gauge(
  340. "synapse_threadpool_total_threads",
  341. "Total number of threads currently in the threadpool",
  342. ["name"],
  343. )
  344. threadpool_total_working_threads = Gauge(
  345. "synapse_threadpool_working_threads",
  346. "Number of threads currently working in the threadpool",
  347. ["name"],
  348. )
  349. threadpool_total_min_threads = Gauge(
  350. "synapse_threadpool_min_threads",
  351. "Minimum number of threads configured in the threadpool",
  352. ["name"],
  353. )
  354. threadpool_total_max_threads = Gauge(
  355. "synapse_threadpool_max_threads",
  356. "Maximum number of threads configured in the threadpool",
  357. ["name"],
  358. )
  359. def register_threadpool(name: str, threadpool: ThreadPool) -> None:
  360. """Add metrics for the threadpool."""
  361. threadpool_total_min_threads.labels(name).set(threadpool.min)
  362. threadpool_total_max_threads.labels(name).set(threadpool.max)
  363. threadpool_total_threads.labels(name).set_function(lambda: len(threadpool.threads))
  364. threadpool_total_working_threads.labels(name).set_function(
  365. lambda: len(threadpool.working)
  366. )
  367. __all__ = [
  368. "Collector",
  369. "MetricsResource",
  370. "generate_latest",
  371. "LaterGauge",
  372. "InFlightGauge",
  373. "GaugeBucketCollector",
  374. "MIN_TIME_BETWEEN_GCS",
  375. "install_gc_manager",
  376. ]