o
    "j#                     @   sd   d dl Z d dlmZ d dl mZ d dlmZ d dlmZ dd Z	dd Z
d	d
 Z			dddZdS )    N)	framework)data_feeder)_get_global_groupc                 C   sD   |d u rt  n|}|r|j| |S |j| ||}|r |  |S )N)r   process_groupZ%all_gather_into_tensor_on_calc_streamZall_gather_into_tensorwait)Z
out_tensorZ	in_tensorgroupsync_opuse_calc_streamtask r   s/var/www/html/Deteccion_Ine/venv/lib/python3.10/site-packages/paddle/distributed/communication/stream/all_gather.py"_all_gather_into_tensor_in_dygraph   s   r   c                    sl   |d u rt  n|}t| dkr|  fddt|jD 7 } |r&|j|  S |j|  |}|r4|  |S )Nr   c                    s   g | ]}t  qS r   )paddleZ
empty_like).0_tensorr   r   
<listcomp>0   s    z*_all_gather_in_dygraph.<locals>.<listcomp>)r   lenrangenranksr   Zall_gather_on_calc_stream
all_gatherr   )tensor_listr   r   r   r	   r
   r   r   r   _all_gather_in_dygraph*   s   r   c           
      C   s   d}t j|fi t }|j|jd}| D ]}t|dg dd qt|dg dd |d u r2dn|j}t	 }	|j
|d|gid	|gi|||	d
d |   t|jdkrb| t|d d S | t||	d d S )NZc_allgather)dtyper   )Zfloat16Zfloat32Zfloat64Zint32Zint64boolZint8Zuint8r   r   r   XZOut)ring_idr	   r   )typeZinputsZoutputsattrs)r   ZLayerHelperlocalsZ"create_variable_for_type_inferencer   r   Zcheck_variable_and_dtypeiddistZget_world_sizeZ	append_opclearr   shapeextendr   Zunstacksplit)
r   r   r   r   Zop_typehelperoutelemr   r   r   r   r   _all_gather_in_static_mode>   s>   


r*   TFc                 C   s   |dur|  std|s|rtdt r-t| r%t| ||||S t| ||||S |du s5J dt| r>tdt| |||S )aA  

    Gather tensors across devices to a correctly-sized tensor or a tensor list.

    Args:
        tensor_or_tensor_list (Union[Tensor, List[Tensor]]): The output. If it is a tensor, it should be correctly-sized. If it is a list, it
            should be empty or contain correctly-sized tensors.
        tensor (Tensor): The input tensor on each rank. The result will overwrite this tenor after communication. Support
            float16, float32, float64, int32, int64, int8, uint8 or bool as the input data type.
        group (Group, optional): Communicate in which group. If none is given, use the global group as default.
        sync_op (bool, optional): Indicate whether the communication is sync or not. If none is given, use true as default.
        use_calc_stream (bool, optional): Indicate whether the communication is done on calculation stream. If none is given, use false as default. This
            option is designed for high performance demand, be careful to turn it on except you are clearly know its meaning.

    Returns:
        Return a task object.

    Warning:
        This API only supports the dygraph mode now.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)
            >>> import paddle
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> local_rank = dist.get_rank()
            >>> tensor_list = []
            >>> if local_rank == 0:
            ...     data = paddle.to_tensor([[4, 5, 6], [4, 5, 6]])
            >>> else:
            ...     data = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
            >>> task = dist.stream.all_gather(tensor_list, data, sync_op=False)
            >>> task.wait()
            >>> print(tensor_list)
            [[[4, 5, 6], [4, 5, 6]], [[1, 2, 3], [1, 2, 3]]] (2 GPUs)
    NzjThe group should not be None and all ranks which invoke this operation should be the member of this group.z5use_calc_stream can only be true in sync op behavior.z3Group can not be used in static graph mode for now.zLOnly support passing a tensor list to `all_gather` in static graph mode now.)	Z	is_memberRuntimeErrorr   Zin_dynamic_moder   Z	is_tensorr   r   r*   )Ztensor_or_tensor_listr   r   r   r	   r   r   r   r   v   s4   .




r   )NTF)r   Zpaddle.distributeddistributedr"   r   Zpaddle.baser   Z&paddle.distributed.communication.groupr   r   r   r*   r   r   r   r   r   <module>   s   ;