You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

255 lines
8.3 KiB

  1. # Copyright 2020 The Matrix.org Foundation C.I.C.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import logging
  15. import sys
  16. import traceback
  17. from collections import deque
  18. from ipaddress import IPv4Address, IPv6Address, ip_address
  19. from math import floor
  20. from typing import Callable, Deque, Optional
  21. import attr
  22. from zope.interface import implementer
  23. from twisted.application.internet import ClientService
  24. from twisted.internet.defer import CancelledError, Deferred
  25. from twisted.internet.endpoints import (
  26. HostnameEndpoint,
  27. TCP4ClientEndpoint,
  28. TCP6ClientEndpoint,
  29. )
  30. from twisted.internet.interfaces import (
  31. IPushProducer,
  32. IReactorTCP,
  33. IStreamClientEndpoint,
  34. )
  35. from twisted.internet.protocol import Factory, Protocol
  36. from twisted.internet.tcp import Connection
  37. from twisted.python.failure import Failure
  38. logger = logging.getLogger(__name__)
  39. @attr.s(slots=True, auto_attribs=True)
  40. @implementer(IPushProducer)
  41. class LogProducer:
  42. """
  43. An IPushProducer that writes logs from its buffer to its transport when it
  44. is resumed.
  45. Args:
  46. buffer: Log buffer to read logs from.
  47. transport: Transport to write to.
  48. format: A callable to format the log record to a string.
  49. """
  50. # This is essentially ITCPTransport, but that is missing certain fields
  51. # (connected and registerProducer) which are part of the implementation.
  52. transport: Connection
  53. _format: Callable[[logging.LogRecord], str]
  54. _buffer: Deque[logging.LogRecord]
  55. _paused: bool = attr.ib(default=False, init=False)
  56. def pauseProducing(self) -> None:
  57. self._paused = True
  58. def stopProducing(self) -> None:
  59. self._paused = True
  60. self._buffer = deque()
  61. def resumeProducing(self) -> None:
  62. # If we're already producing, nothing to do.
  63. self._paused = False
  64. # Loop until paused.
  65. while self._paused is False and (self._buffer and self.transport.connected):
  66. try:
  67. # Request the next record and format it.
  68. record = self._buffer.popleft()
  69. msg = self._format(record)
  70. # Send it as a new line over the transport.
  71. self.transport.write(msg.encode("utf8"))
  72. self.transport.write(b"\n")
  73. except Exception:
  74. # Something has gone wrong writing to the transport -- log it
  75. # and break out of the while.
  76. traceback.print_exc(file=sys.__stderr__)
  77. break
  78. class RemoteHandler(logging.Handler):
  79. """
  80. An logging handler that writes logs to a TCP target.
  81. Args:
  82. host: The host of the logging target.
  83. port: The logging target's port.
  84. maximum_buffer: The maximum buffer size.
  85. """
  86. def __init__(
  87. self,
  88. host: str,
  89. port: int,
  90. maximum_buffer: int = 1000,
  91. level: int = logging.NOTSET,
  92. _reactor: Optional[IReactorTCP] = None,
  93. ):
  94. super().__init__(level=level)
  95. self.host = host
  96. self.port = port
  97. self.maximum_buffer = maximum_buffer
  98. self._buffer: Deque[logging.LogRecord] = deque()
  99. self._connection_waiter: Optional[Deferred] = None
  100. self._producer: Optional[LogProducer] = None
  101. # Connect without DNS lookups if it's a direct IP.
  102. if _reactor is None:
  103. from twisted.internet import reactor
  104. _reactor = reactor # type: ignore[assignment]
  105. try:
  106. ip = ip_address(self.host)
  107. if isinstance(ip, IPv4Address):
  108. endpoint: IStreamClientEndpoint = TCP4ClientEndpoint(
  109. _reactor, self.host, self.port
  110. )
  111. elif isinstance(ip, IPv6Address):
  112. endpoint = TCP6ClientEndpoint(_reactor, self.host, self.port)
  113. else:
  114. raise ValueError("Unknown IP address provided: %s" % (self.host,))
  115. except ValueError:
  116. endpoint = HostnameEndpoint(_reactor, self.host, self.port)
  117. factory = Factory.forProtocol(Protocol)
  118. self._service = ClientService(endpoint, factory, clock=_reactor)
  119. self._service.startService()
  120. self._stopping = False
  121. self._connect()
  122. def close(self) -> None:
  123. self._stopping = True
  124. self._service.stopService()
  125. def _connect(self) -> None:
  126. """
  127. Triggers an attempt to connect then write to the remote if not already writing.
  128. """
  129. # Do not attempt to open multiple connections.
  130. if self._connection_waiter:
  131. return
  132. def fail(failure: Failure) -> None:
  133. # If the Deferred was cancelled (e.g. during shutdown) do not try to
  134. # reconnect (this will cause an infinite loop of errors).
  135. if failure.check(CancelledError) and self._stopping:
  136. return
  137. # For a different error, print the traceback and re-connect.
  138. failure.printTraceback(file=sys.__stderr__)
  139. self._connection_waiter = None
  140. self._connect()
  141. def writer(result: Protocol) -> None:
  142. # Force recognising transport as a Connection and not the more
  143. # generic ITransport.
  144. transport: Connection = result.transport # type: ignore
  145. # We have a connection. If we already have a producer, and its
  146. # transport is the same, just trigger a resumeProducing.
  147. if self._producer and transport is self._producer.transport:
  148. self._producer.resumeProducing()
  149. self._connection_waiter = None
  150. return
  151. # If the producer is still producing, stop it.
  152. if self._producer:
  153. self._producer.stopProducing()
  154. # Make a new producer and start it.
  155. self._producer = LogProducer(
  156. buffer=self._buffer,
  157. transport=transport,
  158. format=self.format,
  159. )
  160. transport.registerProducer(self._producer, True)
  161. self._producer.resumeProducing()
  162. self._connection_waiter = None
  163. deferred: Deferred = self._service.whenConnected(failAfterFailures=1)
  164. deferred.addCallbacks(writer, fail)
  165. self._connection_waiter = deferred
  166. def _handle_pressure(self) -> None:
  167. """
  168. Handle backpressure by shedding records.
  169. The buffer will, in this order, until the buffer is below the maximum:
  170. - Shed DEBUG records.
  171. - Shed INFO records.
  172. - Shed the middle 50% of the records.
  173. """
  174. if len(self._buffer) <= self.maximum_buffer:
  175. return
  176. # Strip out DEBUGs
  177. self._buffer = deque(
  178. filter(lambda record: record.levelno > logging.DEBUG, self._buffer)
  179. )
  180. if len(self._buffer) <= self.maximum_buffer:
  181. return
  182. # Strip out INFOs
  183. self._buffer = deque(
  184. filter(lambda record: record.levelno > logging.INFO, self._buffer)
  185. )
  186. if len(self._buffer) <= self.maximum_buffer:
  187. return
  188. # Cut the middle entries out
  189. buffer_split = floor(self.maximum_buffer / 2)
  190. old_buffer = self._buffer
  191. self._buffer = deque()
  192. for _ in range(buffer_split):
  193. self._buffer.append(old_buffer.popleft())
  194. end_buffer = []
  195. for _ in range(buffer_split):
  196. end_buffer.append(old_buffer.pop())
  197. self._buffer.extend(reversed(end_buffer))
  198. def emit(self, record: logging.LogRecord) -> None:
  199. self._buffer.append(record)
  200. # Handle backpressure, if it exists.
  201. try:
  202. self._handle_pressure()
  203. except Exception:
  204. # If handling backpressure fails, clear the buffer and log the
  205. # exception.
  206. self._buffer.clear()
  207. logger.warning("Failed clearing backpressure")
  208. # Try and write immediately.
  209. self._connect()