o
    "j                     @   s^   d dl Zd dlZd dlmZ d dlmZ d dlmZ ddl	m
Z
mZ dddZ	dd	d
ZdS )    N)	framework)stream   )convert_object_to_tensorconvert_tensor_to_objectTc                 C   s   t | ||||S )ak  

    Scatter a tensor to all participators. As shown below, one process is started with a GPU and the source of the scatter
    is GPU0. Through scatter operator, the data in GPU0 will be sent to all GPUs averagely.

    .. image:: https://githubraw.cdn.bcebos.com/PaddlePaddle/docs/develop/docs/api/paddle/distributed/img/scatter.png
        :width: 800
        :alt: scatter
        :align: center

    Args:
        tensor (Tensor): The output Tensor. Its data type
            should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16.
        tensor_list (list|tuple): A list/tuple of Tensors to scatter. Every element in the list must be a Tensor whose data type
            should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16. Default value is None.
        src (int): The source rank id. Default value is 0.
        group (Group, optional): The group instance return by new_group or None for global default group.
        sync_op (bool, optional): Whether this op is a sync op. The default value is True.

    Returns:
        None.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)
            >>> import paddle
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> if dist.get_rank() == 0:
            ...     data1 = paddle.to_tensor([7, 8, 9])
            ...     data2 = paddle.to_tensor([10, 11, 12])
            ...     dist.scatter(data1, src=1)
            >>> else:
            ...     data1 = paddle.to_tensor([1, 2, 3])
            ...     data2 = paddle.to_tensor([4, 5, 6])
            ...     dist.scatter(data1, tensor_list=[data1, data2], src=1)
            >>> print(data1, data2)
            >>> # [1, 2, 3] [10, 11, 12] (2 GPUs, out for rank 0)
            >>> # [4, 5, 6] [4, 5, 6] (2 GPUs, out for rank 1)
    )r   scatter)tensorZtensor_listsrcgroupZsync_op r   i/var/www/html/Deteccion_Ine/venv/lib/python3.10/site-packages/paddle/distributed/communication/scatter.pyr      s   +r   c                 C   s,  t  sJ dt }g }g }||kr.|D ]}t|\}}	|| ||	 qt|}
ntjg dd}
t	
|
| t|
 }g }|D ]}| }t||g}t|}|| qEtj|gdd}t|||krm|nd|| tjg dd}t|||kr|nd|| |   | t||  dS )a  

    Scatter picklable objects from the source to all others. Similiar to scatter(), but python object can be passed in.

    Args:
        out_object_list (list): The list of objects to store the scattered objects.
        in_object_list (list): The list of objects to scatter. Only objects on the src rank will be scattered.
        src (int): The source rank in global view.
        group (Group): The group instance return by new_group or None for global default group.

    Returns:
        None.

    Warning:
        This API only supports the dygraph mode.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> out_object_list = []
            >>> if dist.get_rank() == 0:
            ...     in_object_list = [{'foo': [1, 2, 3]}, {'foo': [4, 5, 6]}]
            >>> else:
            ...     in_object_list = [{'bar': [1, 2, 3]}, {'bar': [4, 5, 6]}]
            >>> dist.scatter_object_list(out_object_list, in_object_list, src=1)
            >>> print(out_object_list)
            >>> # [{'bar': [1, 2, 3]}] (2 GPUs, out for rank 0)
            >>> # [{'bar': [4, 5, 6]}] (2 GPUs, out for rank 1)
    z6scatter_object_list doesn't support static graph mode.Zint64)ZdtypeZuint8N)r   Zin_dynamic_modedistZget_rankr   appendmaxpaddleemptyr   	broadcastintitemnumpynpresizeZ	to_tensorr   clearr   )Zout_object_listZin_object_listr	   r
   rankZin_obj_tensorsZin_obj_sizesobjZ
obj_tensorZobj_sizeZmax_obj_size_tensorZmax_obj_sizeZin_tensor_listr   Z
numpy_dataZ	in_tensorZ
out_tensorZout_tensor_sizer   r   r   scatter_object_listJ   s<   %


r   )Nr   NT)Nr   N)r   r   r   Zpaddle.distributeddistributedr   r   Z paddle.distributed.communicationr   Zserialization_utilsr   r   r   r   r   r   r   r   <module>   s   
/