o
    "Õj†  ã                   @   s,   d dl mZ ddd„Z				d	dd„ZdS )
é    )ÚstreamNTc                 C   s   t  || ||d¡S )a{  
    Scatter tensors in in_tensor_list to all participators averagely and gather the result tensors in out_tensor_list.
    As shown below, the in_tensor_list in GPU0 includes 0_0 and 0_1, and GPU1 includes 1_0 and 1_1.
    Through alltoall operator, the 0_0 in GPU0 will be sent to GPU0 and 0_1 to GPU1, 1_0 in GPU1 sent to GPU0 and 1_1 to GPU1.
    Finally the out_tensor_list in GPU0 includes 0_0 and 1_0, and GPU1 includes 0_1 and 1_1.

    .. image:: https://githubraw.cdn.bcebos.com/PaddlePaddle/docs/develop/docs/api/paddle/distributed/img/alltoall.png
        :width: 800
        :alt: alltoall
        :align: center

    Args:
        in_tensor_list (List[Tensor]): List of tensors to scatter one per rank. The data type of each tensor
            should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16.
        out_tensor_list (List[Tensor]): List of tensors to be gathered one per rank. The data type of each tensor should be the same as the input tensors.
        group (Group, optional): The group instance return by new_group or None for global default group. Default: None.
        sync_op (bool, optional): Whether this op is a sync op. The default value is True.

    Returns:
        Return a task object.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)
            >>> import paddle
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> out_tensor_list = []
            >>> if dist.get_rank() == 0:
            ...     data1 = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
            ...     data2 = paddle.to_tensor([[7, 8, 9], [10, 11, 12]])
            >>> else:
            ...     data1 = paddle.to_tensor([[13, 14, 15], [16, 17, 18]])
            ...     data2 = paddle.to_tensor([[19, 20, 21], [22, 23, 24]])
            >>> dist.alltoall([data1, data2], out_tensor_list)
            >>> print(out_tensor_list)
            >>> # [[[1, 2, 3], [4, 5, 6]], [[13, 14, 15], [16, 17, 18]]] (2 GPUs, out for rank 0)
            >>> # [[[7, 8, 9], [10, 11, 12]], [[19, 20, 21], [22, 23, 24]]] (2 GPUs, out for rank 1)
    F)r   Úalltoall)Zin_tensor_listZout_tensor_listÚgroupÚsync_op© r   úl/var/www/html/Deteccion_Ine/venv/lib/python3.10/site-packages/paddle/distributed/communication/all_to_all.pyr      s   *
ÿr   c              	   C   s   t  || ||||d¡S )at  
    Scatter a single input tensor to all participators and gather the received tensors in out_tensor.

    Note:
        ``alltoall_single`` is only supported in eager mode.

    Args:
        in_tensor (Tensor): Input tensor. The data type should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16.
        out_tensor (Tensor): Output Tensor. The data type should be the same as the data type of the input Tensor.
        in_split_sizes (list[int], optional): Split sizes of ``in_tensor`` for dim[0]. If not given, dim[0] of ``in_tensor``
            must be divisible by group size and ``in_tensor`` will be scattered averagely to all participators. Default: None.
        out_split_sizes (list[int], optional): Split sizes of ``out_tensor`` for dim[0]. If not given, dim[0] of ``out_tensor``
            must be divisible by group size and ``out_tensor`` will be gathered averagely from all participators. Default: None.
        group (Group, optional): The group instance return by ``new_group`` or None for global default group. Default: None.
        sync_op (bool, optional): Whether this op is a sync op. The default value is True.

    Returns:
        Return a task object.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)
            >>> import paddle
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> rank = dist.get_rank()
            >>> size = dist.get_world_size()

            >>> # case 1 (2 GPUs)
            >>> data = paddle.arange(2, dtype='int64') + rank * 2
            >>> # data for rank 0: [0, 1]
            >>> # data for rank 1: [2, 3]
            >>> output = paddle.empty([2], dtype='int64')
            >>> dist.alltoall_single(data, output)
            >>> print(output)
            >>> # output for rank 0: [0, 2]
            >>> # output for rank 1: [1, 3]

            >>> # case 2 (2 GPUs)
            >>> in_split_sizes = [i + 1 for i in range(size)]
            >>> # in_split_sizes for rank 0: [1, 2]
            >>> # in_split_sizes for rank 1: [1, 2]
            >>> out_split_sizes = [rank + 1 for i in range(size)]
            >>> # out_split_sizes for rank 0: [1, 1]
            >>> # out_split_sizes for rank 1: [2, 2]
            >>> data = paddle.ones([sum(in_split_sizes), size], dtype='float32') * rank
            >>> # data for rank 0: [[0., 0.], [0., 0.], [0., 0.]]
            >>> # data for rank 1: [[1., 1.], [1., 1.], [1., 1.]]
            >>> output = paddle.empty([(rank + 1) * size, size], dtype='float32')
            >>> group = dist.new_group([0, 1])
            >>> task = dist.alltoall_single(data,
            ...                             output,
            ...                             in_split_sizes,
            ...                             out_split_sizes,
            ...                             sync_op=False,
            ...                             group=group)
            >>> task.wait()
            >>> print(output)
            >>> # output for rank 0: [[0., 0.], [1., 1.]]
            >>> # output for rank 1: [[0., 0.], [0., 0.], [1., 1.], [1., 1.]]

    F)r   Úalltoall_single)Z	in_tensorZ
out_tensorZin_split_sizesZout_split_sizesr   r   r   r   r   r   A   s   Hùr   )NT)NNNT)Z paddle.distributed.communicationr   r   r   r   r   r   r   Ú<module>   s   
2ú