You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

248 lines
8.0 KiB

  1. # -*- coding: utf-8 -*-
  2. # Copyright 2014 matrix.org
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """This module contains classes for streaming from the event stream: /events.
  16. """
  17. from twisted.internet import defer
  18. from synapse.api.errors import EventStreamError
  19. from synapse.api.events.room import (
  20. RoomMemberEvent, MessageEvent, FeedbackEvent, RoomTopicEvent
  21. )
  22. from synapse.api.streams import PaginationStream, StreamData
  23. import logging
  24. logger = logging.getLogger(__name__)
  25. class MessagesStreamData(StreamData):
  26. EVENT_TYPE = MessageEvent.TYPE
  27. def __init__(self, hs, room_id=None, feedback=False):
  28. super(MessagesStreamData, self).__init__(hs)
  29. self.room_id = room_id
  30. self.with_feedback = feedback
  31. @defer.inlineCallbacks
  32. def get_rows(self, user_id, from_key, to_key, limit):
  33. (data, latest_ver) = yield self.store.get_message_stream(
  34. user_id=user_id,
  35. from_key=from_key,
  36. to_key=to_key,
  37. limit=limit,
  38. room_id=self.room_id,
  39. with_feedback=self.with_feedback
  40. )
  41. defer.returnValue((data, latest_ver))
  42. @defer.inlineCallbacks
  43. def max_token(self):
  44. val = yield self.store.get_max_message_id()
  45. defer.returnValue(val)
  46. class RoomMemberStreamData(StreamData):
  47. EVENT_TYPE = RoomMemberEvent.TYPE
  48. @defer.inlineCallbacks
  49. def get_rows(self, user_id, from_key, to_key, limit):
  50. (data, latest_ver) = yield self.store.get_room_member_stream(
  51. user_id=user_id,
  52. from_key=from_key,
  53. to_key=to_key
  54. )
  55. defer.returnValue((data, latest_ver))
  56. @defer.inlineCallbacks
  57. def max_token(self):
  58. val = yield self.store.get_max_room_member_id()
  59. defer.returnValue(val)
  60. class FeedbackStreamData(StreamData):
  61. EVENT_TYPE = FeedbackEvent.TYPE
  62. def __init__(self, hs, room_id=None):
  63. super(FeedbackStreamData, self).__init__(hs)
  64. self.room_id = room_id
  65. @defer.inlineCallbacks
  66. def get_rows(self, user_id, from_key, to_key, limit):
  67. (data, latest_ver) = yield self.store.get_feedback_stream(
  68. user_id=user_id,
  69. from_key=from_key,
  70. to_key=to_key,
  71. limit=limit,
  72. room_id=self.room_id
  73. )
  74. defer.returnValue((data, latest_ver))
  75. @defer.inlineCallbacks
  76. def max_token(self):
  77. val = yield self.store.get_max_feedback_id()
  78. defer.returnValue(val)
  79. class RoomDataStreamData(StreamData):
  80. EVENT_TYPE = RoomTopicEvent.TYPE # TODO need multiple event types
  81. def __init__(self, hs, room_id=None):
  82. super(RoomDataStreamData, self).__init__(hs)
  83. self.room_id = room_id
  84. @defer.inlineCallbacks
  85. def get_rows(self, user_id, from_key, to_key, limit):
  86. (data, latest_ver) = yield self.store.get_room_data_stream(
  87. user_id=user_id,
  88. from_key=from_key,
  89. to_key=to_key,
  90. limit=limit,
  91. room_id=self.room_id
  92. )
  93. defer.returnValue((data, latest_ver))
  94. @defer.inlineCallbacks
  95. def max_token(self):
  96. val = yield self.store.get_max_room_data_id()
  97. defer.returnValue(val)
  98. class EventStream(PaginationStream):
  99. SEPARATOR = '_'
  100. def __init__(self, user_id, stream_data_list):
  101. super(EventStream, self).__init__()
  102. self.user_id = user_id
  103. self.stream_data = stream_data_list
  104. @defer.inlineCallbacks
  105. def fix_tokens(self, pagination_config):
  106. pagination_config.from_tok = yield self.fix_token(
  107. pagination_config.from_tok)
  108. pagination_config.to_tok = yield self.fix_token(
  109. pagination_config.to_tok)
  110. defer.returnValue(pagination_config)
  111. @defer.inlineCallbacks
  112. def fix_token(self, token):
  113. """Fixes unknown values in a token to known values.
  114. Args:
  115. token (str): The token to fix up.
  116. Returns:
  117. The fixed-up token, which may == token.
  118. """
  119. # replace TOK_START and TOK_END with 0_0_0 or -1_-1_-1 depending.
  120. replacements = [
  121. (PaginationStream.TOK_START, "0"),
  122. (PaginationStream.TOK_END, "-1")
  123. ]
  124. for magic_token, key in replacements:
  125. if magic_token == token:
  126. token = EventStream.SEPARATOR.join(
  127. [key] * len(self.stream_data)
  128. )
  129. # replace -1 values with an actual pkey
  130. token_segments = self._split_token(token)
  131. for i, tok in enumerate(token_segments):
  132. if tok == -1:
  133. # add 1 to the max token because results are EXCLUSIVE from the
  134. # latest version.
  135. token_segments[i] = 1 + (yield self.stream_data[i].max_token())
  136. defer.returnValue(EventStream.SEPARATOR.join(
  137. str(x) for x in token_segments
  138. ))
  139. @defer.inlineCallbacks
  140. def get_chunk(self, config=None):
  141. # no support for limit on >1 streams, makes no sense.
  142. if config.limit and len(self.stream_data) > 1:
  143. raise EventStreamError(
  144. 400, "Limit not supported on multiplexed streams."
  145. )
  146. (chunk_data, next_tok) = yield self._get_chunk_data(config.from_tok,
  147. config.to_tok,
  148. config.limit)
  149. defer.returnValue({
  150. "chunk": chunk_data,
  151. "start": config.from_tok,
  152. "end": next_tok
  153. })
  154. @defer.inlineCallbacks
  155. def _get_chunk_data(self, from_tok, to_tok, limit):
  156. """ Get event data between the two tokens.
  157. Tokens are SEPARATOR separated values representing pkey values of
  158. certain tables, and the position determines the StreamData invoked
  159. according to the STREAM_DATA list.
  160. The magic value '-1' can be used to get the latest value.
  161. Args:
  162. from_tok - The token to start from.
  163. to_tok - The token to end at. Must have values > from_tok or be -1.
  164. Returns:
  165. A list of event data.
  166. Raises:
  167. EventStreamError if something went wrong.
  168. """
  169. # sanity check
  170. if (from_tok.count(EventStream.SEPARATOR) !=
  171. to_tok.count(EventStream.SEPARATOR) or
  172. (from_tok.count(EventStream.SEPARATOR) + 1) !=
  173. len(self.stream_data)):
  174. raise EventStreamError(400, "Token lengths don't match.")
  175. chunk = []
  176. next_ver = []
  177. for i, (from_pkey, to_pkey) in enumerate(zip(
  178. self._split_token(from_tok),
  179. self._split_token(to_tok)
  180. )):
  181. if from_pkey == to_pkey:
  182. # tokens are the same, we have nothing to do.
  183. next_ver.append(str(to_pkey))
  184. continue
  185. (event_chunk, max_pkey) = yield self.stream_data[i].get_rows(
  186. self.user_id, from_pkey, to_pkey, limit
  187. )
  188. chunk += event_chunk
  189. next_ver.append(str(max_pkey))
  190. defer.returnValue((chunk, EventStream.SEPARATOR.join(next_ver)))
  191. def _split_token(self, token):
  192. """Splits the given token into a list of pkeys.
  193. Args:
  194. token (str): The token with SEPARATOR values.
  195. Returns:
  196. A list of ints.
  197. """
  198. segments = token.split(EventStream.SEPARATOR)
  199. try:
  200. int_segments = [int(x) for x in segments]
  201. except ValueError:
  202. raise EventStreamError(400, "Bad token: %s" % token)
  203. return int_segments