選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。
 
 
 
 
 
 

495 行
15 KiB

  1. # Copyright 2014-2016 OpenMarket Ltd
  2. # Copyright 2019-2021 The Matrix.org Foundation C.I.C.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import logging
  16. import os
  17. import urllib
  18. from abc import ABC, abstractmethod
  19. from types import TracebackType
  20. from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type
  21. import attr
  22. from twisted.internet.interfaces import IConsumer
  23. from twisted.protocols.basic import FileSender
  24. from twisted.web.server import Request
  25. from synapse.api.errors import Codes, cs_error
  26. from synapse.http.server import finish_request, respond_with_json
  27. from synapse.http.site import SynapseRequest
  28. from synapse.logging.context import make_deferred_yieldable
  29. from synapse.util.stringutils import is_ascii
  30. logger = logging.getLogger(__name__)
  31. # list all text content types that will have the charset default to UTF-8 when
  32. # none is given
  33. TEXT_CONTENT_TYPES = [
  34. "text/css",
  35. "text/csv",
  36. "text/html",
  37. "text/calendar",
  38. "text/plain",
  39. "text/javascript",
  40. "application/json",
  41. "application/ld+json",
  42. "application/rtf",
  43. "image/svg+xml",
  44. "text/xml",
  45. ]
  46. # A list of all content types that are "safe" to be rendered inline in a browser.
  47. INLINE_CONTENT_TYPES = [
  48. "text/css",
  49. "text/plain",
  50. "text/csv",
  51. "application/json",
  52. "application/ld+json",
  53. # We allow some media files deemed as safe, which comes from the matrix-react-sdk.
  54. # https://github.com/matrix-org/matrix-react-sdk/blob/a70fcfd0bcf7f8c85986da18001ea11597989a7c/src/utils/blobs.ts#L51
  55. # SVGs are *intentionally* omitted.
  56. "image/jpeg",
  57. "image/gif",
  58. "image/png",
  59. "image/apng",
  60. "image/webp",
  61. "image/avif",
  62. "video/mp4",
  63. "video/webm",
  64. "video/ogg",
  65. "video/quicktime",
  66. "audio/mp4",
  67. "audio/webm",
  68. "audio/aac",
  69. "audio/mpeg",
  70. "audio/ogg",
  71. "audio/wave",
  72. "audio/wav",
  73. "audio/x-wav",
  74. "audio/x-pn-wav",
  75. "audio/flac",
  76. "audio/x-flac",
  77. ]
  78. # Default timeout_ms for download and thumbnail requests
  79. DEFAULT_MAX_TIMEOUT_MS = 20_000
  80. # Maximum allowed timeout_ms for download and thumbnail requests
  81. MAXIMUM_ALLOWED_MAX_TIMEOUT_MS = 60_000
  82. def respond_404(request: SynapseRequest) -> None:
  83. assert request.path is not None
  84. respond_with_json(
  85. request,
  86. 404,
  87. cs_error("Not found '%s'" % (request.path.decode(),), code=Codes.NOT_FOUND),
  88. send_cors=True,
  89. )
  90. async def respond_with_file(
  91. request: SynapseRequest,
  92. media_type: str,
  93. file_path: str,
  94. file_size: Optional[int] = None,
  95. upload_name: Optional[str] = None,
  96. ) -> None:
  97. logger.debug("Responding with %r", file_path)
  98. if os.path.isfile(file_path):
  99. if file_size is None:
  100. stat = os.stat(file_path)
  101. file_size = stat.st_size
  102. add_file_headers(request, media_type, file_size, upload_name)
  103. with open(file_path, "rb") as f:
  104. await make_deferred_yieldable(FileSender().beginFileTransfer(f, request))
  105. finish_request(request)
  106. else:
  107. respond_404(request)
  108. def add_file_headers(
  109. request: Request,
  110. media_type: str,
  111. file_size: Optional[int],
  112. upload_name: Optional[str],
  113. ) -> None:
  114. """Adds the correct response headers in preparation for responding with the
  115. media.
  116. Args:
  117. request
  118. media_type: The media/content type.
  119. file_size: Size in bytes of the media, if known.
  120. upload_name: The name of the requested file, if any.
  121. """
  122. def _quote(x: str) -> str:
  123. return urllib.parse.quote(x.encode("utf-8"))
  124. # Default to a UTF-8 charset for text content types.
  125. # ex, uses UTF-8 for 'text/css' but not 'text/css; charset=UTF-16'
  126. if media_type.lower() in TEXT_CONTENT_TYPES:
  127. content_type = media_type + "; charset=UTF-8"
  128. else:
  129. content_type = media_type
  130. request.setHeader(b"Content-Type", content_type.encode("UTF-8"))
  131. # A strict subset of content types is allowed to be inlined so that they may
  132. # be viewed directly in a browser. Other file types are forced to be downloads.
  133. #
  134. # Only the type & subtype are important, parameters can be ignored.
  135. if media_type.lower().split(";", 1)[0] in INLINE_CONTENT_TYPES:
  136. disposition = "inline"
  137. else:
  138. disposition = "attachment"
  139. if upload_name:
  140. # RFC6266 section 4.1 [1] defines both `filename` and `filename*`.
  141. #
  142. # `filename` is defined to be a `value`, which is defined by RFC2616
  143. # section 3.6 [2] to be a `token` or a `quoted-string`, where a `token`
  144. # is (essentially) a single US-ASCII word, and a `quoted-string` is a
  145. # US-ASCII string surrounded by double-quotes, using backslash as an
  146. # escape character. Note that %-encoding is *not* permitted.
  147. #
  148. # `filename*` is defined to be an `ext-value`, which is defined in
  149. # RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`,
  150. # where `value-chars` is essentially a %-encoded string in the given charset.
  151. #
  152. # [1]: https://tools.ietf.org/html/rfc6266#section-4.1
  153. # [2]: https://tools.ietf.org/html/rfc2616#section-3.6
  154. # [3]: https://tools.ietf.org/html/rfc5987#section-3.2.1
  155. # We avoid the quoted-string version of `filename`, because (a) synapse didn't
  156. # correctly interpret those as of 0.99.2 and (b) they are a bit of a pain and we
  157. # may as well just do the filename* version.
  158. if _can_encode_filename_as_token(upload_name):
  159. disposition = "%s; filename=%s" % (
  160. disposition,
  161. upload_name,
  162. )
  163. else:
  164. disposition = "%s; filename*=utf-8''%s" % (
  165. disposition,
  166. _quote(upload_name),
  167. )
  168. request.setHeader(b"Content-Disposition", disposition.encode("ascii"))
  169. # cache for at least a day.
  170. # XXX: we might want to turn this off for data we don't want to
  171. # recommend caching as it's sensitive or private - or at least
  172. # select private. don't bother setting Expires as all our
  173. # clients are smart enough to be happy with Cache-Control
  174. request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
  175. if file_size is not None:
  176. request.setHeader(b"Content-Length", b"%d" % (file_size,))
  177. # Tell web crawlers to not index, archive, or follow links in media. This
  178. # should help to prevent things in the media repo from showing up in web
  179. # search results.
  180. request.setHeader(b"X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex")
  181. # separators as defined in RFC2616. SP and HT are handled separately.
  182. # see _can_encode_filename_as_token.
  183. _FILENAME_SEPARATOR_CHARS = {
  184. "(",
  185. ")",
  186. "<",
  187. ">",
  188. "@",
  189. ",",
  190. ";",
  191. ":",
  192. "\\",
  193. '"',
  194. "/",
  195. "[",
  196. "]",
  197. "?",
  198. "=",
  199. "{",
  200. "}",
  201. }
  202. def _can_encode_filename_as_token(x: str) -> bool:
  203. for c in x:
  204. # from RFC2616:
  205. #
  206. # token = 1*<any CHAR except CTLs or separators>
  207. #
  208. # separators = "(" | ")" | "<" | ">" | "@"
  209. # | "," | ";" | ":" | "\" | <">
  210. # | "/" | "[" | "]" | "?" | "="
  211. # | "{" | "}" | SP | HT
  212. #
  213. # CHAR = <any US-ASCII character (octets 0 - 127)>
  214. #
  215. # CTL = <any US-ASCII control character
  216. # (octets 0 - 31) and DEL (127)>
  217. #
  218. if ord(c) >= 127 or ord(c) <= 32 or c in _FILENAME_SEPARATOR_CHARS:
  219. return False
  220. return True
  221. async def respond_with_responder(
  222. request: SynapseRequest,
  223. responder: "Optional[Responder]",
  224. media_type: str,
  225. file_size: Optional[int],
  226. upload_name: Optional[str] = None,
  227. ) -> None:
  228. """Responds to the request with given responder. If responder is None then
  229. returns 404.
  230. Args:
  231. request
  232. responder
  233. media_type: The media/content type.
  234. file_size: Size in bytes of the media. If not known it should be None
  235. upload_name: The name of the requested file, if any.
  236. """
  237. if not responder:
  238. respond_404(request)
  239. return
  240. # If we have a responder we *must* use it as a context manager.
  241. with responder:
  242. if request._disconnected:
  243. logger.warning(
  244. "Not sending response to request %s, already disconnected.", request
  245. )
  246. return
  247. logger.debug("Responding to media request with responder %s", responder)
  248. add_file_headers(request, media_type, file_size, upload_name)
  249. try:
  250. await responder.write_to_consumer(request)
  251. except Exception as e:
  252. # The majority of the time this will be due to the client having gone
  253. # away. Unfortunately, Twisted simply throws a generic exception at us
  254. # in that case.
  255. logger.warning("Failed to write to consumer: %s %s", type(e), e)
  256. # Unregister the producer, if it has one, so Twisted doesn't complain
  257. if request.producer:
  258. request.unregisterProducer()
  259. finish_request(request)
  260. class Responder(ABC):
  261. """Represents a response that can be streamed to the requester.
  262. Responder is a context manager which *must* be used, so that any resources
  263. held can be cleaned up.
  264. """
  265. @abstractmethod
  266. def write_to_consumer(self, consumer: IConsumer) -> Awaitable:
  267. """Stream response into consumer
  268. Args:
  269. consumer: The consumer to stream into.
  270. Returns:
  271. Resolves once the response has finished being written
  272. """
  273. raise NotImplementedError()
  274. def __enter__(self) -> None: # noqa: B027
  275. pass
  276. def __exit__( # noqa: B027
  277. self,
  278. exc_type: Optional[Type[BaseException]],
  279. exc_val: Optional[BaseException],
  280. exc_tb: Optional[TracebackType],
  281. ) -> None:
  282. pass
  283. @attr.s(slots=True, frozen=True, auto_attribs=True)
  284. class ThumbnailInfo:
  285. """Details about a generated thumbnail."""
  286. width: int
  287. height: int
  288. method: str
  289. # Content type of thumbnail, e.g. image/png
  290. type: str
  291. # The size of the media file, in bytes.
  292. length: int
  293. @attr.s(slots=True, frozen=True, auto_attribs=True)
  294. class FileInfo:
  295. """Details about a requested/uploaded file."""
  296. # The server name where the media originated from, or None if local.
  297. server_name: Optional[str]
  298. # The local ID of the file. For local files this is the same as the media_id
  299. file_id: str
  300. # If the file is for the url preview cache
  301. url_cache: bool = False
  302. # Whether the file is a thumbnail or not.
  303. thumbnail: Optional[ThumbnailInfo] = None
  304. # The below properties exist to maintain compatibility with third-party modules.
  305. @property
  306. def thumbnail_width(self) -> Optional[int]:
  307. if not self.thumbnail:
  308. return None
  309. return self.thumbnail.width
  310. @property
  311. def thumbnail_height(self) -> Optional[int]:
  312. if not self.thumbnail:
  313. return None
  314. return self.thumbnail.height
  315. @property
  316. def thumbnail_method(self) -> Optional[str]:
  317. if not self.thumbnail:
  318. return None
  319. return self.thumbnail.method
  320. @property
  321. def thumbnail_type(self) -> Optional[str]:
  322. if not self.thumbnail:
  323. return None
  324. return self.thumbnail.type
  325. @property
  326. def thumbnail_length(self) -> Optional[int]:
  327. if not self.thumbnail:
  328. return None
  329. return self.thumbnail.length
  330. def get_filename_from_headers(headers: Dict[bytes, List[bytes]]) -> Optional[str]:
  331. """
  332. Get the filename of the downloaded file by inspecting the
  333. Content-Disposition HTTP header.
  334. Args:
  335. headers: The HTTP request headers.
  336. Returns:
  337. The filename, or None.
  338. """
  339. content_disposition = headers.get(b"Content-Disposition", [b""])
  340. # No header, bail out.
  341. if not content_disposition[0]:
  342. return None
  343. _, params = _parse_header(content_disposition[0])
  344. upload_name = None
  345. # First check if there is a valid UTF-8 filename
  346. upload_name_utf8 = params.get(b"filename*", None)
  347. if upload_name_utf8:
  348. if upload_name_utf8.lower().startswith(b"utf-8''"):
  349. upload_name_utf8 = upload_name_utf8[7:]
  350. # We have a filename*= section. This MUST be ASCII, and any UTF-8
  351. # bytes are %-quoted.
  352. try:
  353. # Once it is decoded, we can then unquote the %-encoded
  354. # parts strictly into a unicode string.
  355. upload_name = urllib.parse.unquote(
  356. upload_name_utf8.decode("ascii"), errors="strict"
  357. )
  358. except UnicodeDecodeError:
  359. # Incorrect UTF-8.
  360. pass
  361. # If there isn't check for an ascii name.
  362. if not upload_name:
  363. upload_name_ascii = params.get(b"filename", None)
  364. if upload_name_ascii and is_ascii(upload_name_ascii):
  365. upload_name = upload_name_ascii.decode("ascii")
  366. # This may be None here, indicating we did not find a matching name.
  367. return upload_name
  368. def _parse_header(line: bytes) -> Tuple[bytes, Dict[bytes, bytes]]:
  369. """Parse a Content-type like header.
  370. Cargo-culted from `cgi`, but works on bytes rather than strings.
  371. Args:
  372. line: header to be parsed
  373. Returns:
  374. The main content-type, followed by the parameter dictionary
  375. """
  376. parts = _parseparam(b";" + line)
  377. key = next(parts)
  378. pdict = {}
  379. for p in parts:
  380. i = p.find(b"=")
  381. if i >= 0:
  382. name = p[:i].strip().lower()
  383. value = p[i + 1 :].strip()
  384. # strip double-quotes
  385. if len(value) >= 2 and value[0:1] == value[-1:] == b'"':
  386. value = value[1:-1]
  387. value = value.replace(b"\\\\", b"\\").replace(b'\\"', b'"')
  388. pdict[name] = value
  389. return key, pdict
  390. def _parseparam(s: bytes) -> Generator[bytes, None, None]:
  391. """Generator which splits the input on ;, respecting double-quoted sequences
  392. Cargo-culted from `cgi`, but works on bytes rather than strings.
  393. Args:
  394. s: header to be parsed
  395. Returns:
  396. The split input
  397. """
  398. while s[:1] == b";":
  399. s = s[1:]
  400. # look for the next ;
  401. end = s.find(b";")
  402. # if there is an odd number of " marks between here and the next ;, skip to the
  403. # next ; instead
  404. while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2:
  405. end = s.find(b";", end + 1)
  406. if end < 0:
  407. end = len(s)
  408. f = s[:end]
  409. yield f.strip()
  410. s = s[end:]