o
     j                     @   s<   d dl Z d dlmZmZ d dlmZ g ZejdddZdS )    N)core	framework)gradients_with_optimizerFc                 C   s   dd }|| d} t | t t| ksJ d|dur=t|ttfs%|g}|D ]}|dur;t|tjtjjfs;J dq'ng }t |dkrQt | t |ksQJ dt|t	sZJ d	tj
| || dS )
a		  
    Compute the backward gradients of given tensors.

    Args:
        tensors(list of Tensors): the tensors which the gradient to be computed. The tensors can not contain the same tensor.

        grad_tensors(list of Tensors of None, optional): the init gradients of the `tensors`` .If not None, it must have the same length with ``tensors`` ,
            and if any of the elements is None, then the init gradient is the default value which is filled with 1.0.
            If None, all the gradients of the ``tensors`` is the default value which is filled with 1.0.
            Defaults to None.

        retain_graph(bool, optional): If False, the graph used to compute grads will be freed. If you would
            like to add more ops to the built graph after calling this method( :code:`backward` ), set the parameter
            :code:`retain_graph` to True, then the grads will be retained. Thus, setting it to False is much more memory-efficient.
            Defaults to False.

    Returns:
        NoneType: None


    Examples:
        .. code-block:: python

            >>> import paddle
            >>> x = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32', stop_gradient=False)
            >>> y = paddle.to_tensor([[3, 2], [3, 4]], dtype='float32')

            >>> grad_tensor1 = paddle.to_tensor([[1,2], [2, 3]], dtype='float32')
            >>> grad_tensor2 = paddle.to_tensor([[1,1], [1, 1]], dtype='float32')

            >>> z1 = paddle.matmul(x, y)
            >>> z2 = paddle.matmul(x, y)

            >>> paddle.autograd.backward([z1, z2], [grad_tensor1, grad_tensor2], True)
            >>> print(x.grad)
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
            [[12., 18.],
             [17., 25.]])


            >>> x.clear_grad()

            >>> paddle.autograd.backward([z1, z2], [grad_tensor1, None], True)
            >>> print(x.grad)
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
            [[12., 18.],
             [17., 25.]])

            >>> x.clear_grad()

            >>> paddle.autograd.backward([z1, z2])
            >>> print(x.grad)
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
            [[10., 14.],
             [10., 14.]])


    c                 S   s   | d usJ | dt | ttfr8t| dksJ | d| D ]}t |tjtjjfs5J d| dq!| S t | tjtjjfsIJ | d| gS )Nz should not be Noner   z connot be emptyzElements of z must be paddle.Tensorz! must be Tensor or list of Tensor)
isinstancelisttuplelenpaddleTensorr   eager)Zin_out_listnameZeach_var r   ^/var/www/html/Deteccion_Ine/venv/lib/python3.10/site-packages/paddle/autograd/backward_mode.pycheck_tensorsS   s    
zbackward.<locals>.check_tensorstensorsz[The argument 'tensors' of paddle.autograd.backward contains duplicate paddle.Tensor object.NzThe argument 'grad_tensors' of paddle.autograd.backward is invalid, it can be 'None', 'paddle.Tensor' or 'list[None/paddle.Tensor]'.r   z3The length of grad_tensors must be equal to tensorsz"retain_graph must be True or False)r   setr   r   r   r	   r
   r   r   boolZrun_backward)r   Zgrad_tensorsZretain_graphr   Zeach_tensorr   r   r   backward   s8   =
r   )NF)	r	   Zpaddle.baser   r   Zpaddle.base.backwardr   __all__Zdygraph_onlyr   r   r   r   r   <module>   s   