# WARNING: this file is auto-generated by 'async_to_sync.py' # from the original file 'cursor_async.py' # DO NOT CHANGE! Change the original file instead. """ Psycopg Cursor object. """ # Copyright (C) 2020 The Psycopg Team from __future__ import annotations from types import TracebackType from typing import TYPE_CHECKING, Any, overload from contextlib import contextmanager from collections.abc import Iterable, Iterator from . import errors as e from . import pq from .abc import Params, Query, QueryNoTemplate from .copy import Copy, Writer from .rows import Row, RowFactory, RowMaker from ._compat import Self, Template from ._pipeline import Pipeline from ._cursor_base import BaseCursor if TYPE_CHECKING: from .connection import Connection ACTIVE = pq.TransactionStatus.ACTIVE class Cursor(BaseCursor["Connection[Any]", Row]): __module__ = "psycopg" __slots__ = () @overload def __init__(self, connection: Connection[Row]): ... @overload def __init__( self, connection: Connection[Any], *, row_factory: RowFactory[Row] ): ... def __init__( self, connection: Connection[Any], *, row_factory: RowFactory[Row] | None = None ): super().__init__(connection) self._row_factory = row_factory or connection.row_factory def __enter__(self) -> Self: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> None: self.close() def close(self) -> None: """ Close the current cursor and free associated resources. """ self._close() @property def row_factory(self) -> RowFactory[Row]: """Writable attribute to control how result rows are formed.""" return self._row_factory @row_factory.setter def row_factory(self, row_factory: RowFactory[Row]) -> None: self._row_factory = row_factory if self.pgresult: self._make_row = row_factory(self) def _make_row_maker(self) -> RowMaker[Row]: return self._row_factory(self) @overload def execute( self, query: QueryNoTemplate, params: Params | None = None, *, prepare: bool | None = None, binary: bool | None = None, ) -> Self: ... @overload def execute( self, query: Template, *, prepare: bool | None = None, binary: bool | None = None, ) -> Self: ... def execute( self, query: Query, params: Params | None = None, *, prepare: bool | None = None, binary: bool | None = None, ) -> Self: """ Execute a query or command to the database. """ try: with self._conn.lock: self._conn.wait( self._execute_gen(query, params, prepare=prepare, binary=binary) ) except e._NO_TRACEBACK as ex: raise ex.with_traceback(None) return self def executemany( self, query: Query, params_seq: Iterable[Params], *, returning: bool = False ) -> None: """ Execute the same command with a sequence of input data. """ try: with self._conn.lock: if Pipeline.is_supported(): # If there is already a pipeline, ride it, in order to avoid # sending unnecessary Sync. if self._conn._pipeline: self._conn.wait( self._executemany_gen_pipeline(query, params_seq, returning) ) else: # Otherwise, make a new one with self._conn._pipeline_nolock(): self._conn.wait( self._executemany_gen_pipeline( query, params_seq, returning ) ) else: self._conn.wait( self._executemany_gen_no_pipeline(query, params_seq, returning) ) except e._NO_TRACEBACK as ex: raise ex.with_traceback(None) def stream( self, query: Query, params: Params | None = None, *, binary: bool | None = None, size: int = 1, ) -> Iterator[Row]: """ Iterate row-by-row on a result from the database. :param size: if greater than 1, results will be retrieved by chunks of this size from the server (but still yielded row-by-row); this is only available from version 17 of the libpq. """ if self._pgconn.pipeline_status: raise e.ProgrammingError("stream() cannot be used in pipeline mode") with self._conn.lock: try: self._conn.wait( self._stream_send_gen(query, params, binary=binary, size=size) ) first = True while res := self._conn.wait(self._stream_fetchone_gen(first)): for pos in range(res.ntuples): yield self._tx.load_row(pos, self._make_row) first = False except e._NO_TRACEBACK as ex: raise ex.with_traceback(None) finally: if self._pgconn.transaction_status == ACTIVE: # Try to cancel the query, then consume the results # already received. self._conn._try_cancel() try: while self._conn.wait(self._stream_fetchone_gen(first=False)): pass except Exception: pass # Try to get out of ACTIVE state. Just do a single attempt, which # should work to recover from an error or query cancelled. try: self._conn.wait(self._stream_fetchone_gen(first=False)) except Exception: pass def results(self) -> Iterator[Self]: """ Iterate across multiple record sets received by the cursor. Multiple record sets are received after using `executemany()` with `!returning=True` or using `execute()` with more than one query in the command. """ if self.pgresult: while True: yield self if not self.nextset(): break def set_result(self, index: int) -> Self: """ Move to a specific result set. :arg index: index of the result to go to :type index: `!int` More than one result will be available after executing calling `executemany()` or `execute()` with more than one query. `!index` is 0-based and supports negative values, counting from the end, the same way you can index items in a list. The function returns self, so that the result may be followed by a fetch operation. See `results()` for details. """ if not -len(self._results) <= index < len(self._results): raise IndexError( f"index {index} out of range: {len(self._results)} result(s) available" ) if index < 0: index = len(self._results) + index self._select_current_result(index) return self def fetchone(self) -> Row | None: """ Return the next record from the current result set. Return `!None` the result set is finished. :rtype: Row | None, with Row defined by `row_factory` """ self._fetch_pipeline() res = self._check_result_for_fetch() if self._pos < res.ntuples: record = self._tx.load_row(self._pos, self._make_row) self._pos += 1 return record return None def fetchmany(self, size: int = 0) -> list[Row]: """ Return the next `!size` records from the current result set. `!size` default to `!self.arraysize` if not specified. :rtype: Sequence[Row], with Row defined by `row_factory` """ self._fetch_pipeline() res = self._check_result_for_fetch() if not size: size = self.arraysize records = self._tx.load_rows( self._pos, min(self._pos + size, res.ntuples), self._make_row ) self._pos += len(records) return records def fetchall(self) -> list[Row]: """ Return all the remaining records from the current result set. :rtype: Sequence[Row], with Row defined by `row_factory` """ self._fetch_pipeline() res = self._check_result_for_fetch() records = self._tx.load_rows(self._pos, res.ntuples, self._make_row) self._pos = res.ntuples return records def __iter__(self) -> Self: return self def __next__(self) -> Row: self._fetch_pipeline() res = self._check_result_for_fetch() if self._pos < res.ntuples: record = self._tx.load_row(self._pos, self._make_row) self._pos += 1 return record raise StopIteration("no more records to return") def scroll(self, value: int, mode: str = "relative") -> None: """ Move the cursor in the result set to a new position according to mode. If `!mode` is ``'relative'`` (default), `!value` is taken as offset to the current position in the result set; if set to ``'absolute'``, `!value` states an absolute target position. Raise `!IndexError` in case a scroll operation would leave the result set. In this case the position will not change. """ self._fetch_pipeline() self._scroll(value, mode) @contextmanager def copy( self, statement: Query, params: Params | None = None, *, writer: Writer | None = None, ) -> Iterator[Copy]: """ Initiate a :sql:`COPY` operation and return an object to manage it. """ try: with self._conn.lock: self._conn.wait(self._start_copy_gen(statement, params)) with Copy(self, writer=writer) as copy: yield copy except e._NO_TRACEBACK as ex: raise ex.with_traceback(None) # If a fresher result has been set on the cursor by the Copy object, # read its properties (especially rowcount). self._select_current_result(0) def _fetch_pipeline(self) -> None: if ( self._execmany_returning is not False and (not self.pgresult) and self._conn._pipeline ): with self._conn.lock: self._conn.wait(self._conn._pipeline._fetch_gen(flush=True))