o
    "jH                     @   sL   d dl mZ d dlmZ d dlmZ ejddfddZejddfdd	ZdS )
    )stream)ReduceOp)_reduce_scatter_baseNTc                 C   s   t j| ||||ddS )a  
    Reduces, then scatters a list of tensors to all processes in a group

    Args:
        tensor (Tensor): The output tensor on each rank. The result will overwrite this tenor after communication. Support
            float16, float32, float64, int32, int64, int8, uint8 or bool as the input data type.
        tensor_list (List[Tensor]]): List of tensors to reduce and scatter. Every element in the list must be a Tensor whose data type
            should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16.
        op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.MIN|ReduceOp.PROD, optional): The reduction used. If none is given, use ReduceOp.SUM as default.
        group (Group, optional): Communicate in which group. If none is given, use the global group as default.
        sync_op (bool, optional): Indicate whether the communication is sync or not. If none is given, use true as default.

    Returns:
        Return a task object.

    Warning:
        This API only supports the dygraph mode.


    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)
            >>> import paddle
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> if dist.get_rank() == 0:
            ...     data1 = paddle.to_tensor([0, 1])
            ...     data2 = paddle.to_tensor([2, 3])
            >>> else:
            ...     data1 = paddle.to_tensor([4, 5])
            ...     data2 = paddle.to_tensor([6, 7])
            >>> dist.reduce_scatter(data1, [data1, data2])
            >>> print(data1)
            >>> # [4, 6] (2 GPUs, out for rank 0)
            >>> # [8, 10] (2 GPUs, out for rank 1)

    Fopgroupsync_opZuse_calc_stream)r   reduce_scatter)ZtensorZtensor_listr   r   r    r
   p/var/www/html/Deteccion_Ine/venv/lib/python3.10/site-packages/paddle/distributed/communication/reduce_scatter.pyr	      s   *r	   c                 C   s   t | ||||ddS )a!  
    Reduces, then scatters a flattened tensor to all processes in a group.

    Args:
        output (Tensor): Output tensor. Its data type should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16.
        input (Tensor): Input tensor that is of size output tensor size times world size. Its data type
            should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16.
        op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.MIN|ReduceOp.PROD): Optional. The operation used. Default: ReduceOp.SUM.
        group (ProcessGroup, optional): The process group to work on. If None,
            the default process group will be used.
        sync_op (bool, optional): Whether this op is a sync op. The default value is True.

    Returns:
        Async task handle, if sync_op is set to False.
        None, if sync_op or if not part of the group.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)
            >>> import paddle
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> rank = dist.get_rank()
            >>> data = paddle.arange(4) + rank
            >>> # [0, 1, 2, 3] (2 GPUs, for rank 0)
            >>> # [1, 2, 3, 4] (2 GPUs, for rank 1)
            >>> output = paddle.empty(shape=[2], dtype=data.dtype)
            >>> dist.collective._reduce_scatter_base(output, data)
            >>> print(output)
            >>> # [1, 3] (2 GPUs, out for rank 0)
            >>> # [5, 7] (2 GPUs, out for rank 1)

    Fr   )_reduce_scatter_base_stream)outputinputr   r   r   r
   r
   r   r   J   s   &r   )	Z paddle.distributed.communicationr   Z'paddle.distributed.communication.reducer   Z6paddle.distributed.communication.stream.reduce_scatterr   r   ZSUMr	   r
   r
   r
   r   <module>   s   
5