o
    "j                     @   s`   d dl Z d dlmZ d dlmZ d dlmZmZ G dd dZ	e j
dd Zdd	 Zd
d ZdS )    N)	framework)_get_global_group_warn_cur_rank_not_in_groupc                   @   s   e Zd ZdZdddZdS )P2POpa  
    A class that makes point-to-point operations for "batch_isend_irecv".

    This class creates the type of P2P operation, communication buffer, peer rank,
    Group. Instances of this class will be passed to
    ``paddle.distributed.batch_isend_irecv`` for point-to-point communication.

    Args:
        op (callable): A function to send data to or receive data from a peer process.
            The type of ``op`` is either ``paddle.distributed.isend`` or ``paddle.distributed.irecv``.
        tensor (Tensor): Tensor to send or receive.
        peer (int): The destination or source rank.
        group (Group, optional): The group instance return by new_group or None for global
            default group. Default: None.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)

            >>> import paddle
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> rank = dist.get_rank()
            >>> world_size = dist.get_world_size()

            >>> send_t = paddle.arange(2) + rank
            >>> # paddle.tensor([0, 1])  # Rank-0
            >>> # paddle.tensor([1, 2])  # Rank-1

            >>> recv_t = paddle.empty(shape=[2], dtype=send_t.dtype)

            >>> send_op = dist.P2POp(dist.isend, send_t, (rank + 1) % world_size)
            >>> recv_op = dist.P2POp(dist.irecv, recv_t, (rank - 1 + world_size) % world_size)

    Nc                 C   sH   |t jt jfvrtd|| _|| _|| _|d u rt | _d S || _d S )NztInvalid ``op`` function. Expected ``op`` to be of type ``paddle.distributed.isend`` or ``paddle.distributed.irecv``.)	distZisendZirecvRuntimeErroroptensorpeerr   group)selfr   r	   r
   r    r   s/var/www/html/Deteccion_Ine/venv/lib/python3.10/site-packages/paddle/distributed/communication/batch_isend_irecv.py__init__@   s   zP2POp.__init__N)__name__
__module____qualname____doc__r   r   r   r   r   r      s    &r   c                 c   sT    | dkrt jj  zd V  W | dkrt jj  d S d S | dkr)t jj  w w )NZNCCL)r   coreZProcessGroupNCCLZgroup_startZ	group_endbackendr   r   r   _with_batch_p2p_guardN   s   r   c                    sR   t | trtdd | D std| d jj t fdd| D s'tddS )zu
    Helper to check that the ``p2p_op_list`` is a list of P2POp instances and
    all ops use the same backend.
    c                 s   s    | ]}t |tV  qd S r   )
isinstancer   .0p2p_opr   r   r   	<genexpr>^   s    

z%_check_p2p_op_list.<locals>.<genexpr>z[Invalid ``p2p_op_list``. Each op is expected to to be of type ``paddle.distributed.P2POp``.r   c                 3   s    | ]	} |j jkV  qd S r   )r   r   r   r   r   r   r   g   s    z(All groups need to use the same backend.N)r   listallr   r   r   )p2p_op_listr   r   r   _check_p2p_op_listY   s   r!   c           
      C   s   t |  | d j}t|rdS t rX|du rt n|}|j}g }t|) | D ]}|j}|j	}|j
}|j}||||}	|	durE||	 q(W d   |S 1 sQw   Y  |S td)a  
    Send or Receive a batch of tensors asynchronously and return a list of requests.

    Process each of the point-to-point operations in ``p2p_op_list`` and return the
    corresponding tasks. NCCL are currently supported.

    Args:
        p2p_op_list (List[P2POp]): A list of point-to-point operations(type of each operator is
            ``paddle.distributed.P2POp``). The order of the isend/irecv in the list
            matters and it needs to match with corresponding isend/irecv on the
            remote end.

    Returns:
        A list of distributed tasks returned by calling the corresponding
        op in the op_list.

    Warning:
        This API only supports the dygraph mode.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)

            >>> import paddle
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> rank = dist.get_rank()
            >>> world_size = dist.get_world_size()

            >>> send_t = paddle.arange(2) + rank
            >>> # paddle.tensor([0, 1])  # Rank-0
            >>> # paddle.tensor([1, 2])  # Rank-1

            >>> recv_t = paddle.empty(shape=[2], dtype=send_t.dtype)

            >>> send_op = dist.P2POp(dist.isend, send_t, (rank + 1) % world_size)
            >>> recv_op = dist.P2POp(dist.irecv, recv_t, (rank - 1 + world_size) % world_size)

            >>> tasks = dist.batch_isend_irecv([send_op, recv_op])

            >>> for task in tasks:
            ...     task.wait()

            >>> print(recv_t)
            >>> # paddle.tensor([1, 2])     # Rank-0
            >>> # paddle.tensor([0, 1])     # Rank-1
    r   Nz*Don't support static graph mode currently.)r!   r   r   r   Zin_dynamic_moder   r   r   r   r	   r
   appendr   )
r    r   r   tasksr   r   r	   r
   Z
comm_grouptaskr   r   r   batch_isend_irecvk   s0   2



		r%   )
contextlibZpaddle.distributeddistributedr   Zpaddler   Z&paddle.distributed.communication.groupr   r   r   contextmanagerr   r!   r%   r   r   r   r   <module>   s   5

