Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.
 
 
 
 
 
 

357 lignes
12 KiB

  1. # Copyright 2014-2016 OpenMarket Ltd
  2. # Copyright 2021 The Matrix.org Foundation C.I.C.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import argparse
  16. import logging
  17. import logging.config
  18. import os
  19. import sys
  20. import threading
  21. from string import Template
  22. from typing import TYPE_CHECKING, Any, Dict, Optional
  23. import yaml
  24. from zope.interface import implementer
  25. from twisted.logger import (
  26. ILogObserver,
  27. LogBeginner,
  28. STDLibLogObserver,
  29. eventAsText,
  30. globalLogBeginner,
  31. )
  32. from synapse.logging.context import LoggingContextFilter
  33. from synapse.logging.filter import MetadataFilter
  34. from synapse.synapse_rust import reset_logging_config
  35. from synapse.types import JsonDict
  36. from ..util import SYNAPSE_VERSION
  37. from ._base import Config, ConfigError
  38. if TYPE_CHECKING:
  39. from synapse.config.homeserver import HomeServerConfig
  40. from synapse.server import HomeServer
  41. DEFAULT_LOG_CONFIG = Template(
  42. """\
  43. # Log configuration for Synapse.
  44. #
  45. # This is a YAML file containing a standard Python logging configuration
  46. # dictionary. See [1] for details on the valid settings.
  47. #
  48. # Synapse also supports structured logging for machine readable logs which can
  49. # be ingested by ELK stacks. See [2] for details.
  50. #
  51. # [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
  52. # [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
  53. version: 1
  54. formatters:
  55. precise:
  56. format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - \
  57. %(request)s - %(message)s'
  58. handlers:
  59. file:
  60. class: logging.handlers.TimedRotatingFileHandler
  61. formatter: precise
  62. filename: ${log_file}
  63. when: midnight
  64. backupCount: 3 # Does not include the current log file.
  65. encoding: utf8
  66. # Default to buffering writes to log file for efficiency.
  67. # WARNING/ERROR logs will still be flushed immediately, but there will be a
  68. # delay (of up to `period` seconds, or until the buffer is full with
  69. # `capacity` messages) before INFO/DEBUG logs get written.
  70. buffer:
  71. class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler
  72. target: file
  73. # The capacity is the maximum number of log lines that are buffered
  74. # before being written to disk. Increasing this will lead to better
  75. # performance, at the expensive of it taking longer for log lines to
  76. # be written to disk.
  77. # This parameter is required.
  78. capacity: 10
  79. # Logs with a level at or above the flush level will cause the buffer to
  80. # be flushed immediately.
  81. # Default value: 40 (ERROR)
  82. # Other values: 50 (CRITICAL), 30 (WARNING), 20 (INFO), 10 (DEBUG)
  83. flushLevel: 30 # Flush immediately for WARNING logs and higher
  84. # The period of time, in seconds, between forced flushes.
  85. # Messages will not be delayed for longer than this time.
  86. # Default value: 5 seconds
  87. period: 5
  88. # A handler that writes logs to stderr. Unused by default, but can be used
  89. # instead of "buffer" and "file" in the logger handlers.
  90. console:
  91. class: logging.StreamHandler
  92. formatter: precise
  93. loggers:
  94. synapse.storage.SQL:
  95. # beware: increasing this to DEBUG will make synapse log sensitive
  96. # information such as access tokens.
  97. level: INFO
  98. root:
  99. level: INFO
  100. # Write logs to the `buffer` handler, which will buffer them together in memory,
  101. # then write them to a file.
  102. #
  103. # Replace "buffer" with "console" to log to stderr instead.
  104. #
  105. handlers: [buffer]
  106. disable_existing_loggers: false
  107. """
  108. )
  109. LOG_FILE_ERROR = """\
  110. Support for the log_file configuration option and --log-file command-line option was
  111. removed in Synapse 1.3.0. You should instead set up a separate log configuration file.
  112. """
  113. STRUCTURED_ERROR = """\
  114. Support for the structured configuration option was removed in Synapse 1.54.0.
  115. You should instead use the standard logging configuration. See
  116. https://matrix-org.github.io/synapse/v1.54/structured_logging.html
  117. """
  118. class LoggingConfig(Config):
  119. section = "logging"
  120. def read_config(self, config: JsonDict, **kwargs: Any) -> None:
  121. if config.get("log_file"):
  122. raise ConfigError(LOG_FILE_ERROR)
  123. self.log_config = self.abspath(config.get("log_config"))
  124. self.no_redirect_stdio = config.get("no_redirect_stdio", False)
  125. def generate_config_section(
  126. self, config_dir_path: str, server_name: str, **kwargs: Any
  127. ) -> str:
  128. log_config = os.path.join(config_dir_path, server_name + ".log.config")
  129. return (
  130. """\
  131. log_config: "%(log_config)s"
  132. """
  133. % locals()
  134. )
  135. def read_arguments(self, args: argparse.Namespace) -> None:
  136. if args.no_redirect_stdio is not None:
  137. self.no_redirect_stdio = args.no_redirect_stdio
  138. if args.log_file is not None:
  139. raise ConfigError(LOG_FILE_ERROR)
  140. @staticmethod
  141. def add_arguments(parser: argparse.ArgumentParser) -> None:
  142. logging_group = parser.add_argument_group("logging")
  143. logging_group.add_argument(
  144. "-n",
  145. "--no-redirect-stdio",
  146. action="store_true",
  147. default=None,
  148. help="Do not redirect stdout/stderr to the log",
  149. )
  150. logging_group.add_argument(
  151. "-f",
  152. "--log-file",
  153. dest="log_file",
  154. help=argparse.SUPPRESS,
  155. )
  156. def generate_files(self, config: Dict[str, Any], config_dir_path: str) -> None:
  157. log_config = config.get("log_config")
  158. if log_config and not os.path.exists(log_config):
  159. log_file = self.abspath("homeserver.log")
  160. print(
  161. "Generating log config file %s which will log to %s"
  162. % (log_config, log_file)
  163. )
  164. with open(log_config, "w") as log_config_file:
  165. log_config_file.write(DEFAULT_LOG_CONFIG.substitute(log_file=log_file))
  166. def _setup_stdlib_logging(
  167. config: "HomeServerConfig", log_config_path: Optional[str], logBeginner: LogBeginner
  168. ) -> None:
  169. """
  170. Set up Python standard library logging.
  171. """
  172. # We add a log record factory that runs all messages through the
  173. # LoggingContextFilter so that we get the context *at the time we log*
  174. # rather than when we write to a handler. This can be done in config using
  175. # filter options, but care must when using e.g. MemoryHandler to buffer
  176. # writes.
  177. log_context_filter = LoggingContextFilter()
  178. log_metadata_filter = MetadataFilter({"server_name": config.server.server_name})
  179. old_factory = logging.getLogRecordFactory()
  180. def factory(*args: Any, **kwargs: Any) -> logging.LogRecord:
  181. record = old_factory(*args, **kwargs)
  182. log_context_filter.filter(record)
  183. log_metadata_filter.filter(record)
  184. return record
  185. logging.setLogRecordFactory(factory)
  186. # Configure the logger with the initial configuration.
  187. if log_config_path is None:
  188. log_format = (
  189. "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
  190. " - %(message)s"
  191. )
  192. logger = logging.getLogger("")
  193. logger.setLevel(logging.INFO)
  194. logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO)
  195. formatter = logging.Formatter(log_format)
  196. handler = logging.StreamHandler()
  197. handler.setFormatter(formatter)
  198. logger.addHandler(handler)
  199. else:
  200. # Load the logging configuration.
  201. _load_logging_config(log_config_path)
  202. # Route Twisted's native logging through to the standard library logging
  203. # system.
  204. observer = STDLibLogObserver()
  205. threadlocal = threading.local()
  206. @implementer(ILogObserver)
  207. def _log(event: dict) -> None:
  208. if "log_text" in event:
  209. if event["log_text"].startswith("DNSDatagramProtocol starting on "):
  210. return
  211. if event["log_text"].startswith("(UDP Port "):
  212. return
  213. if event["log_text"].startswith("Timing out client"):
  214. return
  215. # this is a workaround to make sure we don't get stack overflows when the
  216. # logging system raises an error which is written to stderr which is redirected
  217. # to the logging system, etc.
  218. if getattr(threadlocal, "active", False):
  219. # write the text of the event, if any, to the *real* stderr (which may
  220. # be redirected to /dev/null, but there's not much we can do)
  221. try:
  222. event_text = eventAsText(event)
  223. print("logging during logging: %s" % event_text, file=sys.__stderr__)
  224. except Exception:
  225. # gah.
  226. pass
  227. return
  228. try:
  229. threadlocal.active = True
  230. return observer(event)
  231. finally:
  232. threadlocal.active = False
  233. logBeginner.beginLoggingTo([_log], redirectStandardIO=False)
  234. def _load_logging_config(log_config_path: str) -> None:
  235. """
  236. Configure logging from a log config path.
  237. """
  238. with open(log_config_path, "rb") as f:
  239. log_config = yaml.safe_load(f.read())
  240. if not log_config:
  241. logging.warning("Loaded a blank logging config?")
  242. # If the old structured logging configuration is being used, raise an error.
  243. if "structured" in log_config and log_config.get("structured"):
  244. raise ConfigError(STRUCTURED_ERROR)
  245. logging.config.dictConfig(log_config)
  246. # Blow away the pyo3-log cache so that it reloads the configuration.
  247. reset_logging_config()
  248. def _reload_logging_config(log_config_path: Optional[str]) -> None:
  249. """
  250. Reload the log configuration from the file and apply it.
  251. """
  252. # If no log config path was given, it cannot be reloaded.
  253. if log_config_path is None:
  254. return
  255. _load_logging_config(log_config_path)
  256. logging.info("Reloaded log config from %s due to SIGHUP", log_config_path)
  257. def setup_logging(
  258. hs: "HomeServer",
  259. config: "HomeServerConfig",
  260. use_worker_options: bool = False,
  261. logBeginner: LogBeginner = globalLogBeginner,
  262. ) -> None:
  263. """
  264. Set up the logging subsystem.
  265. Args:
  266. config: configuration data
  267. use_worker_options: True to use the 'worker_log_config' option
  268. instead of 'log_config'.
  269. logBeginner: The Twisted logBeginner to use.
  270. """
  271. from twisted.internet import reactor
  272. log_config_path = (
  273. config.worker.worker_log_config
  274. if use_worker_options
  275. else config.logging.log_config
  276. )
  277. # Perform one-time logging configuration.
  278. _setup_stdlib_logging(config, log_config_path, logBeginner=logBeginner)
  279. # Add a SIGHUP handler to reload the logging configuration, if one is available.
  280. from synapse.app import _base as appbase
  281. appbase.register_sighup(_reload_logging_config, log_config_path)
  282. # Log immediately so we can grep backwards.
  283. logging.warning("***** STARTING SERVER *****")
  284. logging.warning(
  285. "Server %s version %s",
  286. sys.argv[0],
  287. SYNAPSE_VERSION,
  288. )
  289. logging.info("Server hostname: %s", config.server.server_name)
  290. logging.info("Instance name: %s", hs.get_instance_name())
  291. logging.info("Twisted reactor: %s", type(reactor).__name__)