o
    "j4                     @   s  d dl mZ d dlmZmZ d dlZd dlZd dlm	Z	 ddl
mZmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZmZ d"ddZ	d#ddZdadd Zdd Zi ZG dd dZdd Zd$ddZd%ddZ da!dd Z"deee#e$f  fd d!Z%dS )&    )reduce)ListTupleN)core   )ProcessMeshget_current_process_mesh)get_default_distributed_context)DistributedOperatorHelper)DistributedTensor)__no_shape_var_type__convert_to_dims_mappingverify_shard_specc                 C   s2  |durt |tjsJ d| dnt }|dusJ dt |ts+J d| dt | tr?tj 	 
| } t| }nt| }|j}||j_|jtv rRg }n|j}|durqt|||sjJ d|j|||t|||j_|dur{|jd |dur|jd	 t }|| || }|| | S )
a  
    Shard a tensor on a process mesh according to the shard specification.

    Args:
        x (Tensor): the tensor to be sharded.
        process_mesh (ProcessMesh, optional): An instance of ProcessMesh describes a mesh
            topology of the used logical processes where the tensor is sharded. If it is None,
            the found current process mesh will be used. And an error will be raised if the
            current process mesh cannot be found. Default: None.
        shard_spec (list, optional): a list to describe the sharding mapping between `x` and `process_mesh`,
            which means the dimension `i` of `x` is split across the dimension `shard_spec[i]` of `process_mesh`,
            where `None` means that tensor dimension is not split. For example, given a tensor wih
            the shape [6, 12] and a process mesh with the shape [2, 3] and the dimension names ["x", "y"]:
                If `shard_spec=["x", "y"]`, each shard of the tensor will have a shape [3, 4];
                If `shard_spec=["y", "x"]`, each shard of the tensor will have a shape [2, 6];
                If `shard_spec=["x", None]`, each shard of the tensor will have a shape [3, 12];
                If `shard_spec=[None, "x"]`, each shard of the tensor will have a shape [6, 4];
                If `shard_spec=["y", None]`, each shard of the tensor will have a shape [2, 12];
                If `shard_spec=[None, "y"]`, each shard of the tensor will have a shape [6, 4];
                If `shard_spec=[None, None]`, each shard of the tensor will have a shape [6, 12];
        If the `shard_spec` is None, the tensor will be replicated across all the processes of `process_mesh`.
        In the above example, the `shard_spec=None` is same as 'shard_spec=[None, None]'. Defaults: None.

    Returns:
        Tensor: the tensor `x` annotated with sharding information.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env:DISTRIBUTED)
            >>> import paddle
            >>> from paddle.distributed.fleet import auto

            >>> mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
            >>> x = paddle.ones([4, 6])
            >>> shard_spec = ["x", "y"]
            >>> auto.shard_tensor(x, mesh, shard_spec)

    NArgument process_mesh " is not an instance of ProcessMeshKSpecify the process mesh argument or use ProcessMesh context manager first.zArgument shard_spec z is not an instance of listzQFor tensor {}, shard_spec {} is invalid with tensor_shape {} and process_mesh {}.process_meshdims_mapping)
isinstancer   r   r   liststrpaddlestaticdefault_main_programZglobal_blockZ_var_recursiver   serial_tensorZ	dist_attrr   typer   shaper   formatnamer   r   Zmark_annotatedr	   Zadd_dist_tensor_for_programZget_dist_tensor_for_programZadd_process_mesh)xr   
shard_specZdist_tensorr   Ztensor_shapeZdefault_dist_ctx r!   k/var/www/html/Deteccion_Ine/venv/lib/python3.10/site-packages/paddle/distributed/auto_parallel/interface.pyshard_tensor"   s^   )









r#   c                 K   s  |durt |tsJ d| dnt }|dusJ dg }|durKtdd |D s4J d| d|D ]}|durE|t|| q6|d q6g }|durytd	d |D sbJ d
| d|D ]}|durs|t|| qd|d qdt| ||||} | S )aj	  
    Shard an operation on a process mesh according to its input and output shard specification.

    Args:
        op (Callable): a callable operator or module to be sharded.
        process_mesh (ProcessMesh, optional): An instance of ProcessMesh describes a mesh
            topology of the used logical processes where the op is sharded. All of its inputs and
            outputs are sharded by this process mesh. If it is None, the found current process mesh
            will be used. And an error will be raised if the current process mesh cannot be found.
            Default: None.
        in_shard_specs (list of list, optional): a list of list to describe the sharding specifications
            for the inputs. Each item of `in_shard_specs` is a `shard_spec` between the corresponding input
            and `process_mesh`. If one item is None, the corresponding input is replicated across all processes
            If it is None, all inputs are replicated across all processes. Note that the length of the
            `in_shard_specs` should be equal to the actual number of inputs when calling this operation.
            Default: None.
        out_shard_specs (list of list, optional): a list of list to describe the sharding specifications
            for the outputs. Each item of `out_shard_specs` is a `shard_spec` between the corresponding output
            and `process_mesh`. If one item is None, the corresponding output is replicated across all processes
            If it is None, all outputs are replicated across all processes. Note that the length of the
            `in_shard_specs` should be equal to the actual number of inputs when calling this operation.
            Default: None. Default: None.

    Returns:
        Outputs of `op`, each of which is annotated with sharding information.

    Examples:
        .. code-block:: python

            >>> import paddle
            >>> from paddle.distributed.fleet import auto

            >>> x = paddle.ones([4, 6])
            >>> y = paddle.zeros([4, 6])
            >>> mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
            >>> dist_add = auto.shard_op(paddle.add,
            ...                          mesh,
            ...                          in_shard_specs=[["x", "y"], ["y", None]],
            ...                          out_shard_specs=[[None, "x"]])
            >>> dist_add(x, y)

    Nr   r   r   c                 s   "    | ]}t |tp|d u V  qd S Nr   r   .0r    r!   r!   r"   	<genexpr>   
    
zshard_op.<locals>.<genexpr>zin_shard_spec z is not a list of list or Nonec                 s   r$   r%   r&   r'   r!   r!   r"   r)      r*   zout_shard_spec )r   r   r   allappendr   r
   )opr   Zin_shard_specsZout_shard_specskwargsZin_dims_mappingsr    Zout_dims_mappingsr!   r!   r"   shard_opz   sP   .






r/   c                 C   s   t d7 a G dd d}|| S )Nr   c                   @      e Zd Zdd Zdd ZdS )z$recompute.<locals>.RecomputeOperatorc                 S   
   || _ d S r%   )_op)selfr-   r!   r!   r"   __init__      
z-recompute.<locals>.RecomputeOperator.__init__c           
      _   s   t j }| }t|j}| j|i |}t|j}t||D ]*}|j| }	|	dr@d|		dv r@|	
ddtt d  q |	
ddtt  q |S )Nop_namescopezauto_parallel/exclude_rcz/auto_parallel/rc_Z_exclude_rc)r   r   r   current_blocklenopsr3   rangeZhas_attrattr	_set_attrr   _g_recompute_idx
r4   argsr.   Zdefault_progZ	cur_blockZop_sizeoutputZnew_op_sizeidxr-   r!   r!   r"   __call__   s0   




z-recompute.<locals>.RecomputeOperator.__call__N__name__
__module____qualname__r5   rC   r!   r!   r!   r"   RecomputeOperator       rH   )r>   )r-   rH   r!   r!   r"   	recompute   s   rJ   c                 C   s   G dd d}|| S )z
    Exclude some operators in recompute segements.
        Args:
        run_function (callabe): The callabe function to be excluded.

    Returns:
        ExcludeOperator: The callable object.

    c                   @   r1   )z1exclude_ops_in_recompute.<locals>.ExcludeOperatorc                 S   r2   r%   )_run_function)r4   run_functionr!   r!   r"   r5     r6   z:exclude_ops_in_recompute.<locals>.ExcludeOperator.__init__c           
      _   s`   t j }| }t|j}| j|i |}t|j}t||D ]}|j| }	|	dd q |S )Nr7   z/auto_parallel/exclude_rc)	r   r   r   r8   r9   r:   rK   r;   r=   r?   r!   r!   r"   rC     s   



z:exclude_ops_in_recompute.<locals>.ExcludeOperator.__call__NrD   r!   r!   r!   r"   ExcludeOperator  rI   rM   r!   )rL   rM   r!   r!   r"   exclude_ops_in_recompute   s   rN   c                   @   s   e Zd ZdZdZdS )CollectionNamesZfetchesloggingN)rE   rF   rG   FETCHESLOGGINGr!   r!   r!   r"   rO     s    rO   c                 C   s(   t | d }|d u rg }|t | < t |  S r%   )_g_collectionsget)r   Z
collectionr!   r!   r"   get_collection   s
   rU   c                 C   s   | t vrg t | < |d ur't |  D ]\}}||kr d S qt |  ||f d S t |  D ]\}}||kr6 d S q+t |  d |f d S r%   )rS   r,   )Zcollection_namevaluer   _vr!   r!   r"   add_to_collection(  s   rY   Fc                 C   s`   t | tjjr| j} nt | tr| } n	tdt| t	t
j| | |r.t	t
j| | d S d S )NzGOnly support fetch `Variable` or `str`[`Variable`'s name], but got `{}`)r   r   r   Variabler   r   	TypeErrorr   r   rY   rO   rQ   rR   )Ztensorr   rP   r!   r!   r"   fetch7  s   
r\   c                   C   s   t S r%   )_g_meshr!   r!   r!   r"   get_meshJ  s   r^   	mesh_dimsc                 C   sH   dd | D }dd | D }t dtdd |d|}t||atS )z
    Create a global process_mesh for auto parallel.

    Args:
        mesh_dims (list[tuple[str, int]]): A list of tuple, each element is (dim_name, dim_degree).
    c                 S      g | ]}|d  qS )r   r!   r(   Zmesh_dimr!   r!   r"   
<listcomp>W      zcreate_mesh.<locals>.<listcomp>c                 S   r`   )r   r!   ra   r!   r!   r"   rb   X  rc   r   c                 S   s   | | S r%   r!   )r   yr!   r!   r"   <lambda>Y  s    zcreate_mesh.<locals>.<lambda>r   )npZaranger   Zreshaper   r]   )r_   Z	dim_namesZ
mesh_shapeZmesh_arrr!   r!   r"   create_meshO  s   
rg   )NN)NNNr%   )NF)&	functoolsr   typingr   r   numpyrf   r   Zpaddle.frameworkr   r   r   r   Zstatic.dist_contextr	   Zstatic.dist_opr
   Zstatic.dist_tensorr   Zstatic.utilsr   r   r   r#   r/   r>   rJ   rN   rS   rO   rU   rY   r\   r]   r^   r   intrg   r!   r!   r!   r"   <module>   s0   
Y
W%

