utils.py 1.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. # Part of Odoo. See LICENSE file for full copyright and licensing details.
  2. from html.parser import HTMLParser
  3. from odoo.http import FilesystemSessionStore
  4. from odoo.tools._vendor.sessions import SessionStore
  5. class MemoryGeoipResolver:
  6. def resolve(self, ip):
  7. return {}
  8. class MemorySessionStore(SessionStore):
  9. def __init__(self, *args, **kwargs):
  10. super().__init__(*args, **kwargs)
  11. self.store = {}
  12. def get(self, sid):
  13. session = self.store.get(sid)
  14. if not session:
  15. session = self.new()
  16. return session
  17. def save(self, session):
  18. self.store[session.sid] = session
  19. def delete(self, session):
  20. self.store.pop(session.sid, None)
  21. def rotate(self, session, env):
  22. FilesystemSessionStore.rotate(self, session, env)
  23. def vacuum(self):
  24. return
  25. # pylint: disable=W0223(abstract-method)
  26. class HtmlTokenizer(HTMLParser):
  27. def __init__(self, *args, **kwargs):
  28. super().__init__(*args, **kwargs)
  29. self.tokens = []
  30. @classmethod
  31. def _attrs_to_str(cls, attrs):
  32. out = []
  33. for key, value in attrs:
  34. out.append(f"{key}={value!r}" if value else key)
  35. return " ".join(out)
  36. def handle_starttag(self, tag, attrs):
  37. self.tokens.append(f"<{tag} {self._attrs_to_str(attrs)}>")
  38. def handle_endtag(self, tag):
  39. self.tokens.append(f"</{tag}>")
  40. def handle_startendtag(self, tag, attrs):
  41. # HTML5 <img> instead of XHTML <img/>
  42. self.handle_starttag(tag, attrs)
  43. def handle_data(self, data):
  44. data = data.strip()
  45. if data:
  46. self.tokens.append(data)
  47. @classmethod
  48. def tokenize(cls, source_str):
  49. """
  50. Parse the source html into a list of tokens. Only tags and
  51. tags data are conserved, other elements such as comments are
  52. discarded.
  53. """
  54. tokenizer = cls()
  55. tokenizer.feed(source_str)
  56. return tokenizer.tokens