+\h@4dZddlmZddlmZddlmZmZmZm Z m Z m Z m Z m Z mZddlmZmZddlmZddlmZddlmZmZmZdd lmZmZmZmZmZdd l m!Z!dd l"m#Z#dd l$m%Z%m&Z&m'Z'erdd l(m)Z)ddl*m+Z+ddl,m-Z-dZ.Gddee'Z/Gdde/e'Z0y)z4CommandCursor class to iterate over command results.) annotations)deque) TYPE_CHECKINGAnyGenericIteratorMappingNoReturnOptionalSequenceUnion) CodecOptions&_convert_raw_document_lists_to_streams)_csot)_CURSOR_CLOSED_ERRORS)ConnectionFailureInvalidOperationOperationFailure)_CursorAddress_GetMore_OpMsg_OpReply_RawBatchGetMore)PinnedResponse)_ConnectionManager)_Address _DocumentOut _DocumentType) ClientSession) Collection) ConnectionTceZdZdZeZ d ddZd dZd!dZd"dZ e d#dZ d$dZ d% d&d Z e d"d Ze d'd Ze d(d Ze d)d Zd*dZd dZd dZd dZd dZd+dZd'dZd,dZd-dZd-dZd.dZd/d0dZd1dZd2dZd3dZ e!jDd/d4dZ#y)5 CommandCursorz)A cursor / iterator over command cursors.Nc bd|_||_|d|_t|d|_|j d|_||_||_||_ |jjjjj|_||_||_|jdk(|_||_|j"r|j'd|vr |d|_n|j*|_|j-|t/|t0s|t3dt5|yy)zCreate a new command cursor.Nid firstBatchpostBatchResumeTokenrnsz2max_await_time_ms must be an integer or None, not ) _sock_mgr _collection_idr_dataget_postbatchresumetoken_address _batch_size_max_await_time_msdatabaseclientoptionstimeout_timeout_session_explicit_session_killed_comment _end_session_ns full_name batch_size isinstanceint TypeErrortype) self collection cursor_infoaddressr>max_await_time_mssessionexplicit_sessioncomments e/root/niggaflix-v3/playground/venv/lib/python3.12/site-packages/pymongo/synchronous/command_cursor.py__init__zCommandCursor.__init__;s #6@t$;|45 BM// "C "  %"3((1188@@HH  !1xx1}  <<     ; "4(DH!++DH  #+S16G6SDTJ[E\D]^ 7T1c$|jyN) _die_no_lockrCs rK__del__zCommandCursor.__del__es rMct|tstdt||dkr t d|dk(xrdxs||_|S)aLimits the number of documents returned in one batch. Each batch requires a round trip to the server. It can be adjusted to optimize performance and limit data transfer. .. note:: batch_size can not override MongoDB's internal limits on the amount of data it will return to the client in a single batch (i.e if you set batch size to 1,000,000,000, MongoDB will currently only return 4-16MB of results per batch). Raises :exc:`TypeError` if `batch_size` is not an integer. Raises :exc:`ValueError` if `batch_size` is less than ``0``. :param batch_size: The size of each batch of results requested. z#batch_size must be an integer, not rzbatch_size must be >= 0)r?r@rArB ValueErrorr0)rCr>s rKr>zCommandCursor.batch_sizehsU*c*A$zBRASTU U >67 7%?0q>J rMc2t|jdkDS)z^Returns `True` if the cursor has documents remaining from the previous batch. r)lenr,rQs rK _has_nextzCommandCursor._has_nexts4::""rMc|jS)zlRetrieve the postBatchResumeToken from the response to a changeStream aggregate or getMore. )r.rQs rK_post_batch_resume_tokenz&CommandCursor._post_batch_resume_tokens )))rMc|jjj}|j|jsy|j sD|j t|d}|jdk(r|jy||_yy)NFr) r*r2r3_should_pin_cursorr7r) pin_cursorrr+close)rCconnr3conn_mgrs rK_maybe_pin_connectionz#CommandCursor._maybe_pin_connectionsn!!**11((7 ~~ OO )$6Hxx1} !)rMc*|j||||SrO)unpack_response)rCresponse cursor_id codec_options user_fieldslegacy_responses rK_unpack_responsezCommandCursor._unpack_responses'' =+__rMc\tt|jxs |j S)aDoes this cursor have the potential to return more data? Even if :attr:`alive` is ``True``, :meth:`next` can raise :exc:`StopIteration`. Best to use a for loop:: for doc in collection.aggregate(pipeline): print(doc) .. note:: :attr:`alive` can be True while iterating a cursor from a failed server. In this case :attr:`alive` will return False after :meth:`next` fails to retrieve the next batch of results from the server. )boolrXr,r9rQs rKalivezCommandCursor.alives#C O9DLL(8::rMc|jS)zReturns the id of the cursor.)r+rQs rKrfzCommandCursor.cursor_ids xxrMc|jS)zUThe (host, port) of the server used, or None. .. versionadded:: 3.0 )r/rQs rKrFzCommandCursor.addresss }}rMc4|jr |jSy)zmThe cursor's :class:`~pymongo.client_session.ClientSession`, or None. .. versionadded:: 3.6 N)r8r7rQs rKrHzCommandCursor.sessions  ! !== rMc|j}d|_|jr@|s>|j}|jJt|j|j}||fSd}d}||fS)NTr)r9r+r/rr<)rCalready_killedrfrFs rK_prepare_to_diezCommandCursor._prepare_to_diesl 88NI==, ,,$T]]DHH=G '!!IG'!!rMc|j\}}|jjjj |||j |j |j|jsd|_d|_y)z,Closes this cursor without acquiring a lock.N)rsr*r2r3_cleanup_cursor_no_lockr)r7r8rCrfrFs rKrPzCommandCursor._die_no_locksg!113 7 !!((@@ w t?U?U %% DMrMc|j\}}|jjjj |||j |j |j|jsd|_d|_y)zCloses this cursor.N)rsr*r2r3_cleanup_cursor_lockr)r7r8rvs rK _die_lockzCommandCursor._die_locksk!113 7 !!((==   NN MM  " "  %% DMrMcz|jr/|js"|jjd|_yyyrO)r7r8_end_implicit_sessionrQs rKr;zCommandCursor._end_sessions/ ==!7!7 MM / / 1 DM"8=rMc$|jy)z$Explicitly close / kill this cursor.N)ryrQs rKr_zCommandCursor.closes  rMc|jjj} |j||j|j }t|t r1|j"s%t%|j&|j(|_|j*r8|j,dd}|d}|j/d|_|d|_nC|j,}t|j4t6sJ|j4j8|_|j2dk(r|jt;||_y#t $rL}|jtvrd|_ |jr|j|jd}~wt$rd|_ |jt$r|jwxYw) z/Send a getmore message and handle the response.)rFTNrcursor nextBatchr'r%)r*r2r3_run_operationrjr/rcoderr9r5rPr_r Exceptionr?rr)rr` more_to_come from_commanddocsr-r.r+datarrfrr,)rC operationr3reexcr~ documentss rK _send_messagezCommandCursor._send_messages|!!**11 ,,400$---H. h />>!3HMM8CXCX!Y  ]]1%h/F{+I)/4J)KD &d|DH IhmmX6 66}}..DH 88q= JJL9% G xx00# {{!!#    DL JJL   JJL  s(E G AF>Gct|js |jrt|jS|jr|jj dd\}}|j j|j}|j|j|||j|j|j j||j|j jj|j |j"d|j$ n|j't|jS)aRefreshes the cursor with more data from the server. Returns the length of self._data after refresh. Will exit early if self._data is already non-empty. Raises OperationFailure when the cursor cannot be refreshed due to an error on the query. .rTF)rXr,r9r+r<splitr*_read_preference_forrHr_getmore_classr0rgr7r2r3r1r)r:ry)rCdbnamecollname read_prefs rK_refreshzCommandCursor._refresh&s tzz?dlltzz? " 88#xx~~c15 FH((==dllKI   ##$$HH$$22MM$$--44++NNMM  " NN 4::rMc|SrOrQs rK__iter__zCommandCursor.__iter__H rMcj|jr"|jd}||S|jr"t)zAdvance the cursor.T)rm _try_next StopIteration)rCdocs rKnextzCommandCursor.nextKs4jj..&C jj rMc"|jSrO)rrQs rK__next__zCommandCursor.__next__Usyy{rMct|js|js|r|jt|jr|jj Sy)z>> cursor.to_list() Or, so read at most n items from the cursor:: >>> cursor.to_list(n) If the cursor is empty or has no more results, an empty list will be returned. .. versionadded:: 4.9 rTz'to_list() length must be greater than 0r)r?r@rVrmrrX)rClengthres remainings rKto_listzCommandCursor.to_lists $& fc "vzFG Gjj##C3  !"SX- > jj rMrNNFNrDzCollection[_DocumentType]rEzMapping[str, Any]rFOptional[_Address]r>r@rG Optional[int]rHOptional[ClientSession]rIrlrJrreturnNone)rr)r>r@rCommandCursor[_DocumentType])rrl)rOptional[Mapping[str, Any]])r`r!rrNF) reUnion[_OpReply, _OpMsg]rfrrgzCodecOptions[Mapping[str, Any]]rhrrirlrzSequence[_DocumentOut])rr@)rr)rr)rz$tuple[int, Optional[_CursorAddress]])rrrr)rzIterator[_DocumentType])rr)rrlrOptional[_DocumentType]rO)rlistrrrrl)rr)rr)rrrrrrrr)rrrzlist[_DocumentType])$__name__ __module__ __qualname____doc__rrrLrRr>rYpropertyr[rbrjrmrfrFrHrsrPryr;r_rrrrrrrrrrrapplyrrrMrKr#r#6s3N+/+/!&(-('($ (  ( ) ()((( (T.# ** *&48 % `)`!`7 ` 1 `  ` `;;  " ! *&X D 5$ [[rMr#c~eZdZeZ d dfd Z d ddZddZxZS) RawBatchCommandCursorc X|jdrJt | ||||||||y)aLCreate a new cursor / iterator over raw batches of BSON data. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.aggregate_raw_batches` instead. .. seealso:: The MongoDB documentation on `cursors `_. r&N)r-superrL) rCrDrErFr>rGrHrIrJ __class__s rKrLzRawBatchCommandCursor.__init__s<&??<000          rMcL|j||}|st|d|S)N)rhr) raw_responser)rCrerfrgrhrirs rKrjz&RawBatchCommandCursor._unpack_responses0 ,,YK,P  3<? CrMctd)Nz0Cannot call __getitem__ on RawBatchCommandCursor)r)rCindexs rK __getitem__z!RawBatchCommandCursor.__getitem__sQRRrMrrr) rerrfrrgrrhrrirlrzlist[Mapping[str, Any]])rr@rr ) rrrrrrLrjr __classcell__)rs@rKrrs%N+/+/!& - ' $    )  )     H48 % ) ! $  1    ! SrMrN)1r __future__r collectionsrtypingrrrrr r r r r bsonrrpymongorpymongo.cursor_sharedrpymongo.errorsrrrpymongo.messagerrrrrpymongo.responserpymongo.synchronous.cursorrpymongo.typingsrrr"pymongo.synchronous.client_sessionrpymongo.synchronous.collectionr pymongo.synchronous.poolr!_IS_SYNCr#rrrMrKrs;"   F7PP,9AA@93 mGM*m` 2SM-82SrM