o
    "Õj™  ã                   @   sP   d dl Zd dlZd dlmZ d dlmZ ddlmZmZ ddd„Z	dd	d
„Z
dS )é    N)Ú	framework)Ústreamé   )Úconvert_object_to_tensorÚconvert_tensor_to_objectTc                 C   s   t  | |||¡S )aˆ  

    Gather tensors from all participators and all get the result. As shown
    below, one process is started with a GPU and the data of this process is represented
    by its group rank. Through the all_gather operator, each GPU will have data
    from all GPUs.

    .. image:: https://githubraw.cdn.bcebos.com/PaddlePaddle/docs/develop/docs/api/paddle/distributed/img/allgather.png
        :width: 800
        :alt: all_gather
        :align: center

    Args:
        tensor_list (list): A list of output Tensors. Every element in the list must be a Tensor whose data type
            should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16.
        tensor (Tensor): The Tensor to send. Its data type
            should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16.
        group (Group, optional): The group instance return by new_group or None for global default group.
        sync_op (bool, optional): Whether this op is a sync op. The default value is True.

    Returns:
        None.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)
            >>> import paddle
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> tensor_list = []
            >>> if dist.get_rank() == 0:
            ...     data = paddle.to_tensor([[4, 5, 6], [4, 5, 6]])
            >>> else:
            ...     data = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
            >>> dist.all_gather(tensor_list, data)
            >>> print(tensor_list)
            >>> # [[[4, 5, 6], [4, 5, 6]], [[1, 2, 3], [1, 2, 3]]] (2 GPUs)
    )r   Ú
all_gather)Útensor_listÚtensorÚgroupZsync_op© r   úl/var/www/html/Deteccion_Ine/venv/lib/python3.10/site-packages/paddle/distributed/communication/all_gather.pyr      s   )r   c                 C   s–   t  ¡ sJ dƒ‚t|ƒ\}}g }t|||ƒ tt|ƒ ¡ ƒ}| ¡ }t 	||g¡}t
 |¡}g }	t|	||ƒ t|	ƒD ]\}
}|  t|||
 ƒ¡ q:dS )a:  

    Gather picklable objects from all participators and all get the result. Similiar to all_gather(), but python object can be passed in.

    Args:
        object_list (list): A list of output object. The datatype of every element in the list is same as the input obj.
        obj (Any): The picklable object to send.
        group (Group): The group instance return by new_group or None for global default group.

    Returns:
        None.

    Warning:
        This API only supports the dygraph mode.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)
            >>> import paddle
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> object_list = []
            >>> if dist.get_rank() == 0:
            ...     obj = {"foo": [1, 2, 3]}
            >>> else:
            ...     obj = {"bar": [4, 5, 6]}
            >>> dist.all_gather_object(object_list, obj)
            >>> print(object_list)
            >>> # [{'foo': [1, 2, 3]}, {'bar': [4, 5, 6]}] (2 GPUs)
    z4all_gather_object doesn't support static graph mode.N)r   Zin_dynamic_moder   r   ÚintÚmaxÚitemÚnumpyÚnpÚresizeÚpaddleZ	to_tensorÚ	enumerateÚappendr   )Zobject_listÚobjr
   r	   Zlen_of_tensorZlist_len_of_tensorZmax_len_of_tensorZ
numpy_dataZinput_tensorr   Úir   r   r   Úall_gather_objectG   s$   "ÿþ
ÿÿr   )NT)N)r   r   r   r   Z paddle.distributed.communicationr   Zserialization_utilsr   r   r   r   r   r   r   r   Ú<module>   s   
,