sql_db.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807
  1. # -*- coding: utf-8 -*-
  2. # Part of Odoo. See LICENSE file for full copyright and licensing details.
  3. """
  4. The PostgreSQL connector is a connectivity layer between the OpenERP code and
  5. the database, *not* a database abstraction toolkit. Database abstraction is what
  6. the ORM does, in fact.
  7. """
  8. import logging
  9. import os
  10. import re
  11. import threading
  12. import time
  13. import uuid
  14. import warnings
  15. from contextlib import contextmanager
  16. from datetime import datetime, timedelta
  17. from inspect import currentframe
  18. import psycopg2
  19. import psycopg2.extensions
  20. import psycopg2.extras
  21. from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ_COMMITTED, ISOLATION_LEVEL_REPEATABLE_READ
  22. from psycopg2.pool import PoolError
  23. from psycopg2.sql import SQL, Identifier
  24. from werkzeug import urls
  25. from . import tools
  26. from .tools.func import frame_codeinfo, locked
  27. psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
  28. def undecimalize(value, cr):
  29. if value is None:
  30. return None
  31. return float(value)
  32. psycopg2.extensions.register_type(psycopg2.extensions.new_type((700, 701, 1700), 'float', undecimalize))
  33. _logger = logging.getLogger(__name__)
  34. _logger_conn = _logger.getChild("connection")
  35. real_time = time.time.__call__ # ensure we have a non patched time for query times when using freezegun
  36. re_from = re.compile('.* from "?([a-zA-Z_0-9]+)"? .*$', re.MULTILINE | re.IGNORECASE)
  37. re_into = re.compile('.* into "?([a-zA-Z_0-9]+)"? .*$', re.MULTILINE | re.IGNORECASE)
  38. sql_counter = 0
  39. class Savepoint:
  40. """ Reifies an active breakpoint, allows :meth:`BaseCursor.savepoint` users
  41. to internally rollback the savepoint (as many times as they want) without
  42. having to implement their own savepointing, or triggering exceptions.
  43. Should normally be created using :meth:`BaseCursor.savepoint` rather than
  44. directly.
  45. The savepoint will be rolled back on unsuccessful context exits
  46. (exceptions). It will be released ("committed") on successful context exit.
  47. The savepoint object can be wrapped in ``contextlib.closing`` to
  48. unconditionally roll it back.
  49. The savepoint can also safely be explicitly closed during context body. This
  50. will rollback by default.
  51. :param BaseCursor cr: the cursor to execute the `SAVEPOINT` queries on
  52. """
  53. def __init__(self, cr):
  54. self.name = str(uuid.uuid1())
  55. self._name = Identifier(self.name)
  56. self._cr = cr
  57. self.closed = False
  58. cr.execute(SQL('SAVEPOINT {}').format(self._name))
  59. def __enter__(self):
  60. return self
  61. def __exit__(self, exc_type, exc_val, exc_tb):
  62. self.close(rollback=exc_type is not None)
  63. def close(self, *, rollback=True):
  64. if not self.closed:
  65. self._close(rollback)
  66. def rollback(self):
  67. self._cr.execute(SQL('ROLLBACK TO SAVEPOINT {}').format(self._name))
  68. def _close(self, rollback):
  69. if rollback:
  70. self.rollback()
  71. self._cr.execute(SQL('RELEASE SAVEPOINT {}').format(self._name))
  72. self.closed = True
  73. class _FlushingSavepoint(Savepoint):
  74. def __init__(self, cr):
  75. cr.flush()
  76. super().__init__(cr)
  77. def rollback(self):
  78. self._cr.clear()
  79. super().rollback()
  80. def _close(self, rollback):
  81. try:
  82. if not rollback:
  83. self._cr.flush()
  84. except Exception:
  85. rollback = True
  86. raise
  87. finally:
  88. super()._close(rollback)
  89. class BaseCursor:
  90. """ Base class for cursors that manage pre/post commit hooks. """
  91. def __init__(self):
  92. self.precommit = tools.Callbacks()
  93. self.postcommit = tools.Callbacks()
  94. self.prerollback = tools.Callbacks()
  95. self.postrollback = tools.Callbacks()
  96. # By default a cursor has no transaction object. A transaction object
  97. # for managing environments is instantiated by registry.cursor(). It
  98. # is not done here in order to avoid cyclic module dependencies.
  99. self.transaction = None
  100. def flush(self):
  101. """ Flush the current transaction, and run precommit hooks. """
  102. if self.transaction is not None:
  103. self.transaction.flush()
  104. self.precommit.run()
  105. def clear(self):
  106. """ Clear the current transaction, and clear precommit hooks. """
  107. if self.transaction is not None:
  108. self.transaction.clear()
  109. self.precommit.clear()
  110. def reset(self):
  111. """ Reset the current transaction (this invalidates more that clear()).
  112. This method should be called only right after commit() or rollback().
  113. """
  114. if self.transaction is not None:
  115. self.transaction.reset()
  116. def savepoint(self, flush=True) -> Savepoint:
  117. """context manager entering in a new savepoint
  118. With ``flush`` (the default), will automatically run (or clear) the
  119. relevant hooks.
  120. """
  121. if flush:
  122. return _FlushingSavepoint(self)
  123. else:
  124. return Savepoint(self)
  125. def __enter__(self):
  126. """ Using the cursor as a contextmanager automatically commits and
  127. closes it::
  128. with cr:
  129. cr.execute(...)
  130. # cr is committed if no failure occurred
  131. # cr is closed in any case
  132. """
  133. return self
  134. def __exit__(self, exc_type, exc_value, traceback):
  135. try:
  136. if exc_type is None:
  137. self.commit()
  138. finally:
  139. self.close()
  140. class Cursor(BaseCursor):
  141. """Represents an open transaction to the PostgreSQL DB backend,
  142. acting as a lightweight wrapper around psycopg2's
  143. ``cursor`` objects.
  144. ``Cursor`` is the object behind the ``cr`` variable used all
  145. over the OpenERP code.
  146. .. rubric:: Transaction Isolation
  147. One very important property of database transactions is the
  148. level of isolation between concurrent transactions.
  149. The SQL standard defines four levels of transaction isolation,
  150. ranging from the most strict *Serializable* level, to the least
  151. strict *Read Uncommitted* level. These levels are defined in
  152. terms of the phenomena that must not occur between concurrent
  153. transactions, such as *dirty read*, etc.
  154. In the context of a generic business data management software
  155. such as OpenERP, we need the best guarantees that no data
  156. corruption can ever be cause by simply running multiple
  157. transactions in parallel. Therefore, the preferred level would
  158. be the *serializable* level, which ensures that a set of
  159. transactions is guaranteed to produce the same effect as
  160. running them one at a time in some order.
  161. However, most database management systems implement a limited
  162. serializable isolation in the form of
  163. `snapshot isolation <http://en.wikipedia.org/wiki/Snapshot_isolation>`_,
  164. providing most of the same advantages as True Serializability,
  165. with a fraction of the performance cost.
  166. With PostgreSQL up to version 9.0, this snapshot isolation was
  167. the implementation of both the ``REPEATABLE READ`` and
  168. ``SERIALIZABLE`` levels of the SQL standard.
  169. As of PostgreSQL 9.1, the previous snapshot isolation implementation
  170. was kept for ``REPEATABLE READ``, while a new ``SERIALIZABLE``
  171. level was introduced, providing some additional heuristics to
  172. detect a concurrent update by parallel transactions, and forcing
  173. one of them to rollback.
  174. OpenERP implements its own level of locking protection
  175. for transactions that are highly likely to provoke concurrent
  176. updates, such as stock reservations or document sequences updates.
  177. Therefore we mostly care about the properties of snapshot isolation,
  178. but we don't really need additional heuristics to trigger transaction
  179. rollbacks, as we are taking care of triggering instant rollbacks
  180. ourselves when it matters (and we can save the additional performance
  181. hit of these heuristics).
  182. As a result of the above, we have selected ``REPEATABLE READ`` as
  183. the default transaction isolation level for OpenERP cursors, as
  184. it will be mapped to the desired ``snapshot isolation`` level for
  185. all supported PostgreSQL version (>10).
  186. .. attribute:: cache
  187. Cache dictionary with a "request" (-ish) lifecycle, only lives as
  188. long as the cursor itself does and proactively cleared when the
  189. cursor is closed.
  190. This cache should *only* be used to store repeatable reads as it
  191. ignores rollbacks and savepoints, it should not be used to store
  192. *any* data which may be modified during the life of the cursor.
  193. """
  194. IN_MAX = 1000 # decent limit on size of IN queries - guideline = Oracle limit
  195. def __init__(self, pool, dbname, dsn, **kwargs):
  196. super().__init__()
  197. if 'serialized' in kwargs:
  198. warnings.warn("Since 16.0, 'serialized' parameter is not used anymore.", DeprecationWarning, 2)
  199. assert kwargs.keys() <= {'serialized'}
  200. self.sql_from_log = {}
  201. self.sql_into_log = {}
  202. # default log level determined at cursor creation, could be
  203. # overridden later for debugging purposes
  204. self.sql_log_count = 0
  205. self._sql_table_tracking = False
  206. # avoid the call of close() (by __del__) if an exception
  207. # is raised by any of the following initializations
  208. self._closed = True
  209. self.__pool = pool
  210. self.dbname = dbname
  211. self._cnx = pool.borrow(dsn)
  212. self._obj = self._cnx.cursor()
  213. if _logger.isEnabledFor(logging.DEBUG):
  214. self.__caller = frame_codeinfo(currentframe(), 2)
  215. else:
  216. self.__caller = False
  217. self._closed = False # real initialization value
  218. # See the docstring of this class.
  219. self.connection.set_isolation_level(ISOLATION_LEVEL_REPEATABLE_READ)
  220. self.cache = {}
  221. self._now = None
  222. def __build_dict(self, row):
  223. return {d.name: row[i] for i, d in enumerate(self._obj.description)}
  224. def dictfetchone(self):
  225. row = self._obj.fetchone()
  226. return row and self.__build_dict(row)
  227. def dictfetchmany(self, size):
  228. return [self.__build_dict(row) for row in self._obj.fetchmany(size)]
  229. def dictfetchall(self):
  230. return [self.__build_dict(row) for row in self._obj.fetchall()]
  231. def __del__(self):
  232. if not self._closed and not self._cnx.closed:
  233. # Oops. 'self' has not been closed explicitly.
  234. # The cursor will be deleted by the garbage collector,
  235. # but the database connection is not put back into the connection
  236. # pool, preventing some operation on the database like dropping it.
  237. # This can also lead to a server overload.
  238. msg = "Cursor not closed explicitly\n"
  239. if self.__caller:
  240. msg += "Cursor was created at %s:%s" % self.__caller
  241. else:
  242. msg += "Please enable sql debugging to trace the caller."
  243. _logger.warning(msg)
  244. self._close(True)
  245. def _format(self, query, params=None):
  246. encoding = psycopg2.extensions.encodings[self.connection.encoding]
  247. return self._obj.mogrify(query, params).decode(encoding, 'replace')
  248. def execute(self, query, params=None, log_exceptions=True):
  249. global sql_counter
  250. if params and not isinstance(params, (tuple, list, dict)):
  251. # psycopg2's TypeError is not clear if you mess up the params
  252. raise ValueError("SQL query parameters should be a tuple, list or dict; got %r" % (params,))
  253. start = real_time()
  254. try:
  255. params = params or None
  256. res = self._obj.execute(query, params)
  257. except Exception as e:
  258. if log_exceptions:
  259. _logger.error("bad query: %s\nERROR: %s", tools.ustr(self._obj.query or query), e)
  260. raise
  261. finally:
  262. delay = real_time() - start
  263. if _logger.isEnabledFor(logging.DEBUG):
  264. _logger.debug("[%.3f ms] query: %s", 1000 * delay, self._format(query, params))
  265. # simple query count is always computed
  266. self.sql_log_count += 1
  267. sql_counter += 1
  268. current_thread = threading.current_thread()
  269. if hasattr(current_thread, 'query_count'):
  270. current_thread.query_count += 1
  271. current_thread.query_time += delay
  272. # optional hooks for performance and tracing analysis
  273. for hook in getattr(current_thread, 'query_hooks', ()):
  274. hook(self, query, params, start, delay)
  275. # advanced stats
  276. if _logger.isEnabledFor(logging.DEBUG) or self._sql_table_tracking:
  277. delay *= 1E6
  278. decoded_query = self._obj.query.decode()
  279. res_into = re_into.search(decoded_query)
  280. # prioritize `insert` over `select` so `select` subqueries are not
  281. # considered when inside a `insert`
  282. if res_into:
  283. self.sql_into_log.setdefault(res_into.group(1), [0, 0])
  284. self.sql_into_log[res_into.group(1)][0] += 1
  285. self.sql_into_log[res_into.group(1)][1] += delay
  286. else:
  287. res_from = re_from.search(decoded_query)
  288. if res_from:
  289. self.sql_from_log.setdefault(res_from.group(1), [0, 0])
  290. self.sql_from_log[res_from.group(1)][0] += 1
  291. self.sql_from_log[res_from.group(1)][1] += delay
  292. return res
  293. def split_for_in_conditions(self, ids, size=None):
  294. """Split a list of identifiers into one or more smaller tuples
  295. safe for IN conditions, after uniquifying them."""
  296. return tools.misc.split_every(size or self.IN_MAX, ids)
  297. def print_log(self):
  298. global sql_counter
  299. if not _logger.isEnabledFor(logging.DEBUG):
  300. return
  301. def process(type):
  302. sqllogs = {'from': self.sql_from_log, 'into': self.sql_into_log}
  303. sum = 0
  304. if sqllogs[type]:
  305. sqllogitems = sqllogs[type].items()
  306. _logger.debug("SQL LOG %s:", type)
  307. for r in sorted(sqllogitems, key=lambda k: k[1]):
  308. delay = timedelta(microseconds=r[1][1])
  309. _logger.debug("table: %s: %s/%s", r[0], delay, r[1][0])
  310. sum += r[1][1]
  311. sqllogs[type].clear()
  312. sum = timedelta(microseconds=sum)
  313. _logger.debug("SUM %s:%s/%d [%d]", type, sum, self.sql_log_count, sql_counter)
  314. sqllogs[type].clear()
  315. process('from')
  316. process('into')
  317. self.sql_log_count = 0
  318. @contextmanager
  319. def _enable_logging(self):
  320. """ Forcefully enables logging for this cursor, restores it afterwards.
  321. Updates the logger in-place, so not thread-safe.
  322. """
  323. level = _logger.level
  324. _logger.setLevel(logging.DEBUG)
  325. try:
  326. yield
  327. finally:
  328. _logger.setLevel(level)
  329. @contextmanager
  330. def _enable_table_tracking(self):
  331. try:
  332. old = self._sql_table_tracking
  333. self._sql_table_tracking = True
  334. yield
  335. finally:
  336. self._sql_table_tracking = old
  337. def close(self):
  338. if not self.closed:
  339. return self._close(False)
  340. def _close(self, leak=False):
  341. if not self._obj:
  342. return
  343. del self.cache
  344. # advanced stats only at logging.DEBUG level
  345. self.print_log()
  346. self._obj.close()
  347. # This force the cursor to be freed, and thus, available again. It is
  348. # important because otherwise we can overload the server very easily
  349. # because of a cursor shortage (because cursors are not garbage
  350. # collected as fast as they should). The problem is probably due in
  351. # part because browse records keep a reference to the cursor.
  352. del self._obj
  353. # Clean the underlying connection, and run rollback hooks.
  354. self.rollback()
  355. self._closed = True
  356. if leak:
  357. self._cnx.leaked = True
  358. else:
  359. chosen_template = tools.config['db_template']
  360. keep_in_pool = self.dbname not in ('template0', 'template1', 'postgres', chosen_template)
  361. self.__pool.give_back(self._cnx, keep_in_pool=keep_in_pool)
  362. def autocommit(self, on):
  363. warnings.warn(
  364. f"Deprecated Methods since 16.0, use {'`_cnx.autocommit = True`' if on else '`_cnx.set_isolation_level`'} instead.",
  365. DeprecationWarning, stacklevel=2
  366. )
  367. if on:
  368. isolation_level = ISOLATION_LEVEL_AUTOCOMMIT
  369. else:
  370. isolation_level = ISOLATION_LEVEL_REPEATABLE_READ if self._serialized else ISOLATION_LEVEL_READ_COMMITTED
  371. self._cnx.set_isolation_level(isolation_level)
  372. def commit(self):
  373. """ Perform an SQL `COMMIT` """
  374. self.flush()
  375. result = self._cnx.commit()
  376. self.clear()
  377. self._now = None
  378. self.prerollback.clear()
  379. self.postrollback.clear()
  380. self.postcommit.run()
  381. return result
  382. def rollback(self):
  383. """ Perform an SQL `ROLLBACK` """
  384. self.clear()
  385. self.postcommit.clear()
  386. self.prerollback.run()
  387. result = self._cnx.rollback()
  388. self._now = None
  389. self.postrollback.run()
  390. return result
  391. def __getattr__(self, name):
  392. if self._closed and name == '_obj':
  393. raise psycopg2.InterfaceError("Cursor already closed")
  394. return getattr(self._obj, name)
  395. @property
  396. def closed(self):
  397. return self._closed or self._cnx.closed
  398. def now(self):
  399. """ Return the transaction's timestamp ``NOW() AT TIME ZONE 'UTC'``. """
  400. if self._now is None:
  401. self.execute("SELECT (now() AT TIME ZONE 'UTC')")
  402. self._now = self.fetchone()[0]
  403. return self._now
  404. class TestCursor(BaseCursor):
  405. """ A pseudo-cursor to be used for tests, on top of a real cursor. It keeps
  406. the transaction open across requests, and simulates committing, rolling
  407. back, and closing:
  408. +------------------------+---------------------------------------------------+
  409. | test cursor | queries on actual cursor |
  410. +========================+===================================================+
  411. |``cr = TestCursor(...)``| SAVEPOINT test_cursor_N |
  412. +------------------------+---------------------------------------------------+
  413. | ``cr.execute(query)`` | query |
  414. +------------------------+---------------------------------------------------+
  415. | ``cr.commit()`` | RELEASE SAVEPOINT test_cursor_N |
  416. | | SAVEPOINT test_cursor_N (lazy) |
  417. +------------------------+---------------------------------------------------+
  418. | ``cr.rollback()`` | ROLLBACK TO SAVEPOINT test_cursor_N (if savepoint)|
  419. +------------------------+---------------------------------------------------+
  420. | ``cr.close()`` | ROLLBACK TO SAVEPOINT test_cursor_N (if savepoint)|
  421. | | RELEASE SAVEPOINT test_cursor_N (if savepoint) |
  422. +------------------------+---------------------------------------------------+
  423. """
  424. _cursors_stack = []
  425. def __init__(self, cursor, lock):
  426. super().__init__()
  427. self._now = None
  428. self._closed = False
  429. self._cursor = cursor
  430. # we use a lock to serialize concurrent requests
  431. self._lock = lock
  432. self._lock.acquire()
  433. self._cursors_stack.append(self)
  434. # in order to simulate commit and rollback, the cursor maintains a
  435. # savepoint at its last commit, the savepoint is created lazily
  436. self._savepoint = self._cursor.savepoint(flush=False)
  437. def execute(self, *args, **kwargs):
  438. if not self._savepoint:
  439. self._savepoint = self._cursor.savepoint(flush=False)
  440. return self._cursor.execute(*args, **kwargs)
  441. def close(self):
  442. if not self._closed:
  443. self.rollback()
  444. self._closed = True
  445. if self._savepoint:
  446. self._savepoint.close(rollback=False)
  447. tos = self._cursors_stack.pop()
  448. if tos is not self:
  449. _logger.warning("Found different un-closed cursor when trying to close %s: %s", self, tos)
  450. self._lock.release()
  451. def autocommit(self, on):
  452. warnings.warn("Deprecated method and does nothing since 16.0", DeprecationWarning, 2)
  453. def commit(self):
  454. """ Perform an SQL `COMMIT` """
  455. self.flush()
  456. if self._savepoint:
  457. self._savepoint.close(rollback=False)
  458. self._savepoint = None
  459. self.clear()
  460. self.prerollback.clear()
  461. self.postrollback.clear()
  462. self.postcommit.clear() # TestCursor ignores post-commit hooks
  463. def rollback(self):
  464. """ Perform an SQL `ROLLBACK` """
  465. self.clear()
  466. self.postcommit.clear()
  467. self.prerollback.run()
  468. if self._savepoint:
  469. self._savepoint.rollback()
  470. self.postrollback.run()
  471. def __getattr__(self, name):
  472. return getattr(self._cursor, name)
  473. def now(self):
  474. """ Return the transaction's timestamp ``datetime.now()``. """
  475. if self._now is None:
  476. self._now = datetime.now()
  477. return self._now
  478. class PsycoConnection(psycopg2.extensions.connection):
  479. def lobject(*args, **kwargs):
  480. pass
  481. if hasattr(psycopg2.extensions, 'ConnectionInfo'):
  482. @property
  483. def info(self):
  484. class PsycoConnectionInfo(psycopg2.extensions.ConnectionInfo):
  485. @property
  486. def password(self):
  487. pass
  488. return PsycoConnectionInfo(self)
  489. class ConnectionPool(object):
  490. """ The pool of connections to database(s)
  491. Keep a set of connections to pg databases open, and reuse them
  492. to open cursors for all transactions.
  493. The connections are *not* automatically closed. Only a close_db()
  494. can trigger that.
  495. """
  496. def __init__(self, maxconn=64):
  497. self._connections = []
  498. self._maxconn = max(maxconn, 1)
  499. self._lock = threading.Lock()
  500. def __repr__(self):
  501. used = len([1 for c, u in self._connections[:] if u])
  502. count = len(self._connections)
  503. return "ConnectionPool(used=%d/count=%d/max=%d)" % (used, count, self._maxconn)
  504. def _debug(self, msg, *args):
  505. _logger_conn.debug(('%r ' + msg), self, *args)
  506. @locked
  507. def borrow(self, connection_info):
  508. """
  509. :param dict connection_info: dict of psql connection keywords
  510. :rtype: PsycoConnection
  511. """
  512. # free dead and leaked connections
  513. for i, (cnx, _) in tools.reverse_enumerate(self._connections):
  514. if cnx.closed:
  515. self._connections.pop(i)
  516. self._debug('Removing closed connection at index %d: %r', i, cnx.dsn)
  517. continue
  518. if getattr(cnx, 'leaked', False):
  519. delattr(cnx, 'leaked')
  520. self._connections.pop(i)
  521. self._connections.append((cnx, False))
  522. _logger.info('%r: Free leaked connection to %r', self, cnx.dsn)
  523. for i, (cnx, used) in enumerate(self._connections):
  524. if not used and self._dsn_equals(cnx.dsn, connection_info):
  525. try:
  526. cnx.reset()
  527. except psycopg2.OperationalError:
  528. self._debug('Cannot reset connection at index %d: %r', i, cnx.dsn)
  529. # psycopg2 2.4.4 and earlier do not allow closing a closed connection
  530. if not cnx.closed:
  531. cnx.close()
  532. continue
  533. self._connections.pop(i)
  534. self._connections.append((cnx, True))
  535. self._debug('Borrow existing connection to %r at index %d', cnx.dsn, i)
  536. return cnx
  537. if len(self._connections) >= self._maxconn:
  538. # try to remove the oldest connection not used
  539. for i, (cnx, used) in enumerate(self._connections):
  540. if not used:
  541. self._connections.pop(i)
  542. if not cnx.closed:
  543. cnx.close()
  544. self._debug('Removing old connection at index %d: %r', i, cnx.dsn)
  545. break
  546. else:
  547. # note: this code is called only if the for loop has completed (no break)
  548. raise PoolError('The Connection Pool Is Full')
  549. try:
  550. result = psycopg2.connect(
  551. connection_factory=PsycoConnection,
  552. **connection_info)
  553. except psycopg2.Error:
  554. _logger.info('Connection to the database failed')
  555. raise
  556. self._connections.append((result, True))
  557. self._debug('Create new connection backend PID %d', result.get_backend_pid())
  558. return result
  559. @locked
  560. def give_back(self, connection, keep_in_pool=True):
  561. self._debug('Give back connection to %r', connection.dsn)
  562. for i, (cnx, used) in enumerate(self._connections):
  563. if cnx is connection:
  564. self._connections.pop(i)
  565. if keep_in_pool:
  566. self._connections.append((cnx, False))
  567. self._debug('Put connection to %r in pool', cnx.dsn)
  568. else:
  569. self._debug('Forgot connection to %r', cnx.dsn)
  570. cnx.close()
  571. break
  572. else:
  573. raise PoolError('This connection does not belong to the pool')
  574. @locked
  575. def close_all(self, dsn=None):
  576. count = 0
  577. last = None
  578. for i, (cnx, used) in tools.reverse_enumerate(self._connections):
  579. if dsn is None or self._dsn_equals(cnx.dsn, dsn):
  580. cnx.close()
  581. last = self._connections.pop(i)[0]
  582. count += 1
  583. _logger.info('%r: Closed %d connections %s', self, count,
  584. (dsn and last and 'to %r' % last.dsn) or '')
  585. def _dsn_equals(self, dsn1, dsn2):
  586. alias_keys = {'dbname': 'database'}
  587. ignore_keys = ['password']
  588. dsn1, dsn2 = ({
  589. alias_keys.get(key, key): str(value)
  590. for key, value in (psycopg2.extensions.parse_dsn(dsn) if isinstance(dsn, str) else dsn).items()
  591. if key not in ignore_keys
  592. } for dsn in (dsn1, dsn2))
  593. return dsn1 == dsn2
  594. class Connection(object):
  595. """ A lightweight instance of a connection to postgres
  596. """
  597. def __init__(self, pool, dbname, dsn):
  598. self.__dbname = dbname
  599. self.__dsn = dsn
  600. self.__pool = pool
  601. @property
  602. def dsn(self):
  603. dsn = dict(self.__dsn)
  604. dsn.pop('password', None)
  605. return dsn
  606. @property
  607. def dbname(self):
  608. return self.__dbname
  609. def cursor(self, **kwargs):
  610. if 'serialized' in kwargs:
  611. warnings.warn("Since 16.0, 'serialized' parameter is deprecated", DeprecationWarning, 2)
  612. cursor_type = kwargs.pop('serialized', True) and 'serialized ' or ''
  613. _logger.debug('create %scursor to %r', cursor_type, self.dsn)
  614. return Cursor(self.__pool, self.__dbname, self.__dsn)
  615. def serialized_cursor(self, **kwargs):
  616. warnings.warn("Since 16.0, 'serialized_cursor' is deprecated, use `cursor` instead", DeprecationWarning, 2)
  617. return self.cursor(**kwargs)
  618. def __bool__(self):
  619. raise NotImplementedError()
  620. __nonzero__ = __bool__
  621. def connection_info_for(db_or_uri):
  622. """ parse the given `db_or_uri` and return a 2-tuple (dbname, connection_params)
  623. Connection params are either a dictionary with a single key ``dsn``
  624. containing a connection URI, or a dictionary containing connection
  625. parameter keywords which psycopg2 can build a key/value connection string
  626. (dsn) from
  627. :param str db_or_uri: database name or postgres dsn
  628. :rtype: (str, dict)
  629. """
  630. if 'ODOO_PGAPPNAME' in os.environ:
  631. # Using manual string interpolation for security reason and trimming at default NAMEDATALEN=63
  632. app_name = os.environ['ODOO_PGAPPNAME'].replace('{pid}', str(os.getpid()))[0:63]
  633. else:
  634. app_name = "odoo-%d" % os.getpid()
  635. if db_or_uri.startswith(('postgresql://', 'postgres://')):
  636. # extract db from uri
  637. us = urls.url_parse(db_or_uri)
  638. if len(us.path) > 1:
  639. db_name = us.path[1:]
  640. elif us.username:
  641. db_name = us.username
  642. else:
  643. db_name = us.hostname
  644. return db_name, {'dsn': db_or_uri, 'application_name': app_name}
  645. connection_info = {'database': db_or_uri, 'application_name': app_name}
  646. for p in ('host', 'port', 'user', 'password', 'sslmode'):
  647. cfg = tools.config['db_' + p]
  648. if cfg:
  649. connection_info[p] = cfg
  650. return db_or_uri, connection_info
  651. _Pool = None
  652. def db_connect(to, allow_uri=False):
  653. global _Pool
  654. if _Pool is None:
  655. _Pool = ConnectionPool(int(tools.config['db_maxconn']))
  656. db, info = connection_info_for(to)
  657. if not allow_uri and db != to:
  658. raise ValueError('URI connections not allowed')
  659. return Connection(_Pool, db, info)
  660. def close_db(db_name):
  661. """ You might want to call odoo.modules.registry.Registry.delete(db_name) along this function."""
  662. global _Pool
  663. if _Pool:
  664. _Pool.close_all(connection_info_for(db_name)[1])
  665. def close_all():
  666. global _Pool
  667. if _Pool:
  668. _Pool.close_all()