o
     jJ<                     @   s|   d dl Z d dlmZ g Zdd ZG dd dZG dd dejjeZG d	d
 d
e	Z
G dd dee
ejjeZdd ZdS )    N)corec                    s&   G  fddd}t |ddi S )Nc                       s   e Zd Z fddZdS )zwith_mateclass.<locals>.implc                    s   | |S N )clsnameZ
temp_basesattrsbasesmetar   Y/var/www/html/Deteccion_Ine/venv/lib/python3.10/site-packages/paddle/autograd/py_layer.py__new__   s   z$with_mateclass.<locals>.impl.__new__N)__name__
__module____qualname__r   r   r   r   r   impl   s    r   r   )typer   )r
   r	   r   r   r   r   with_mateclass   s   r   c                   @   s>   e Zd ZdZdd Zdd Zdd Zdd	 Zd
efddZ	dS )PyLayerContextaP  
    ``PyLayerContext`` can assist the :ref:`api_paddle_autograd_PyLayer` in implementing certain functionalities.

    Examples:
        .. code-block:: python

            >>> import paddle
            >>> from paddle.autograd import PyLayer

            >>> class cus_tanh(PyLayer):
            ...     @staticmethod
            ...     def forward(ctx, x):
            ...         # ctx is a object of PyLayerContext.
            ...         y = paddle.tanh(x)
            ...         ctx.save_for_backward(y)
            ...         return y
            ...
            ...     @staticmethod
            ...     def backward(ctx, dy):
            ...         # ctx is a object of PyLayerContext.
            ...         y, = ctx.saved_tensor()
            ...         grad = dy * (1 - paddle.square(y))
            ...         return grad
    c                 G   
   || _ dS )a  
        Saves given tensors that backward need. Use ``saved_tensor`` in the `backward` to get the saved tensors.

        Note:
            This API should be called at most once, and only inside `forward`.

        Args:
            tensors(list of Tensors): Tensors to be stored.

        Returns:
            None

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> from paddle.autograd import PyLayer

                >>> class cus_tanh(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         # ctx is a context object that store some objects for backward.
                ...         y = paddle.tanh(x)
                ...         # Pass tensors to backward.
                ...         ctx.save_for_backward(y)
                ...         return y
                ...
                ...     @staticmethod
                ...     def backward(ctx, dy):
                ...         # Get the tensors passed by forward.
                ...         y, = ctx.saved_tensor()
                ...         grad = dy * (1 - paddle.square(y))
                ...         return grad

        N	container)selfZtensorsr   r   r   save_for_backward7   s   
$z PyLayerContext.save_for_backwardc                 C   s   | j S )af  
        Get the tensors stored by ``save_for_backward``.

        Returns:
            list of Tensors or None: If context contains tensors stored by `save_for_backward`,
            then return these tensors, otherwise return None.

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> from paddle.autograd import PyLayer

                >>> class cus_tanh(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         # ctx is a context object that store some objects for backward.
                ...         y = paddle.tanh(x)
                ...         # Pass tensors to backward.
                ...         ctx.save_for_backward(y)
                ...         return y
                ...
                ...     @staticmethod
                ...     def backward(ctx, dy):
                ...         # Get the tensors passed by forward.
                ...         y, = ctx.saved_tensor()
                ...         grad = dy * (1 - paddle.square(y))
                ...         return grad
        r   )r   r   r   r   saved_tensor]   s   zPyLayerContext.saved_tensorc                 G   r   )a  
        Marks inputs as not inplace.
        This should be called at most once, only from inside the `forward` method,
        and all arguments should be Tensor inputs.

        If the Tensor returned by `forward` method is the same as the Tensor input of forward,
        and this Tensor is marked as not_inplace, then Paddle will help the user create a new Tensor as output.
        Thereby preventing the auto grad information of the input Tensor from being overwritten.

        Examples:
            .. code-block:: python

                >>> import paddle

                >>> class Exp(paddle.autograd.PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         ctx.mark_not_inplace(x)
                ...         return x
                ...
                ...     @staticmethod
                ...     def backward(ctx, grad_output):
                ...         out = grad_output.exp()
                ...         return out

                >>> paddle.seed(2023)
                >>> x = paddle.randn((1, 1))
                >>> x.stop_gradient = False
                >>> attn_layers = []
                >>> for idx in range(0, 2):
                ...     attn_layers.append(Exp())

                >>> for step in range(0, 2):
                ...     a = x
                ...     for j in range(0,2):
                ...         a = attn_layers[j].apply(x)
                ...     a.backward()
        N)Znot_inplace_tensorsr   argsr   r   r   mark_not_inplace}   s   
'zPyLayerContext.mark_not_inplacec                 G   r   )a  
        Marks outputs as non-differentiable.
        This should be called at most once, only from inside the `forward` method,
        and all arguments should be tensor outputs.

        This will mark outputs as not requiring gradients, increasing the
        efficiency of backward computation. You still need to accept a gradient
        for each output in `backward`, but it's always going to
        be a zero tensor with the same shape as the shape of a corresponding
        output.

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> from paddle.autograd import PyLayer
                >>> import numpy as np

                >>> class Tanh(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         a = x + x
                ...         b = x + x + x
                ...         ctx.mark_non_differentiable(a)
                ...         return a, b
                ...
                ...     @staticmethod
                ...     def backward(ctx, grad_a, grad_b):
                ...         assert np.equal(grad_a.numpy(), paddle.zeros([1]).numpy())
                ...         assert np.equal(grad_b.numpy(), paddle.ones([1], dtype="float64").numpy())
                ...         return grad_b

                >>> x = paddle.ones([1], dtype="float64")
                >>> x.stop_gradient = False
                >>> a, b = Tanh.apply(x)
                >>> b.sum().backward()
        N)Znon_differentiabler   r   r   r   mark_non_differentiable   s   
&z&PyLayerContext.mark_non_differentiablevaluec                 C   r   )a  
        Sets whether to materialize output grad tensors. Default is True.

        This should be called only from inside the `forward` method.

        If True, undefined output grad tensors will be expanded to tensors full
        of zeros prior to calling the `backward` method.

        If False, undefined output grad tensors will be None.

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> from paddle.autograd import PyLayer
                >>> import numpy as np

                >>> class Tanh(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         return x+x+x, x+x
                ...
                ...     @staticmethod
                ...     def backward(ctx, grad, grad2):
                ...         assert np.equal(grad2.numpy(), paddle.zeros([1]).numpy())
                ...         return grad

                >>> class Tanh2(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         ctx.set_materialize_grads(False)
                ...         return x+x+x, x+x
                ...
                ...     @staticmethod
                ...     def backward(ctx, grad, grad2):
                ...         assert grad2==None
                ...         return grad

                >>> x = paddle.ones([1], dtype="float64")
                >>> x.stop_gradient = False
                >>> Tanh.apply(x)[0].backward()

                >>> x2 = paddle.ones([1], dtype="float64")
                >>> x2.stop_gradient = False
                >>> Tanh2.apply(x2)[0].backward()
        N)Zmaterialize_grads)r   r   r   r   r   set_materialize_grads   s   
/z$PyLayerContext.set_materialize_gradsN)
r   r   r   __doc__r   r   r   r   boolr   r   r   r   r   r      s    & )(r   c                   @   s   e Zd Zdd ZdS )PyLayerBackwardc                 G   s   | j j| g|R  S r   )_forward_clsbackwardr   r   r   r   r$     s   zPyLayerBackward.backwardN)r   r   r   r$   r   r   r   r   r"      s    r"   c                       s   e Zd Z fddZ  ZS )PyLayerMetac                    s(   t |d tfd| i| _t |||S )NZ	_backwardr#   )r   r"   Z_backward_functionsuper__init__)r   r   r	   r   	__class__r   r   r'     s   zPyLayerMeta.__init__)r   r   r   r'   __classcell__r   r   r(   r   r%     s    r%   c                   @   s(   e Zd ZdZedd Zedd ZdS )PyLayera	  
    Paddle implements Python custom operators on the PaddlePaddle framework by creating a subclass of
    ``PyLayer``, which must comply with the following rules:

    1. The subclass must contain static ``forward`` and ``backward`` functions, with the first argument being
    :ref:`api_paddle_autograd_PyLayerContext`. If a returned value in ``backward`` corresponds to a ``Tensor`` that
    requires gradients in ``forward``, the returned value must be a ``Tensor``.

    2. Except for the first argument, other arguments of ``backward`` are gradients of the output ``Tensors``
    of ``forward``. Therefore, the number of input ``Tensor`` in ``backward`` must be the same as the number
    of output ``Tensor`` in ``forward``. If you need to use input ``Tensor`` from ``forward`` in ``backward``,
    you can save these ``Tensors`` by inputting them into :ref:`api_paddle_autograd_PyLayerContext`'s
    ``save_for_backward`` method and use them in ``backward`` later.

    3. The output of ``backward`` can be ``Tensor`` or ``list/tuple(Tensor)``, which are gradients of the
    output ``Tensor`` of ``forward``. Therefore, the number of output ``Tensor`` in ``backward`` is the same
    as the number of input ``Tensor`` in ``forward``.

    After building the custom operator, apply it by running the ``apply`` method.

    Examples:
        .. code-block:: python

            >>> import paddle
            >>> from paddle.autograd import PyLayer

            >>> class cus_tanh(PyLayer):
            ...     @staticmethod
            ...     def forward(ctx, x):
            ...         y = paddle.tanh(x)
            ...         # Pass tensors to backward.
            ...         ctx.save_for_backward(y)
            ...         return y
            ...
            ...     @staticmethod
            ...     def backward(ctx, dy):
            ...         # Get the tensors passed by forward.
            ...         y, = ctx.saved_tensor()
            ...         grad = dy * (1 - paddle.square(y))
            ...         return grad

            >>> paddle.seed(2023)
            >>> data = paddle.randn([2, 3], dtype="float64")
            >>> data.stop_gradient = False
            >>> z = cus_tanh.apply(data)
            >>> z.mean().backward()

            >>> print(data.grad)
            Tensor(shape=[2, 3], dtype=float64, place=Place(cpu), stop_gradient=True,
            [[0.16604150, 0.05858341, 0.14051214],
             [0.15677770, 0.01564609, 0.02991660]])
    c                 O      t d)a  
        It is to be overloaded by subclasses. It must accept a object of :ref:`api_paddle_autograd_PyLayerContext` as
        the first argument, followed by any number of arguments (tensors or other types).
        `None` can not be included in the returned result.

        Args:
            *args(tuple): input of PyLayer.
            **kwargs(dict): input of PyLayer.

        Returns:
            tensors or other types : output of PyLayer.

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> from paddle.autograd import PyLayer

                >>> class cus_tanh(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         y = paddle.tanh(x)
                ...         # Pass tensors to backward.
                ...         ctx.save_for_backward(y)
                ...         return y
                ...
                ...     @staticmethod
                ...     def backward(ctx, dy):
                ...         # Get the tensors passed by forward.
                ...         y, = ctx.saved_tensor()
                ...         grad = dy * (1 - paddle.square(y))
                ...         return grad
        z4You must implement the forward function for PyLayer.NotImplementedError)ctxr   kwargsr   r   r   forwardD  s   #zPyLayer.forwardc                 G   r,   )a  
        This is a function to calculate the gradient. It is to be overloaded by subclasses.
        It must accept a object of :ref:`api_paddle_autograd_PyLayerContext` as the first
        argument, and the rest arguments are the gradient of forward's output tensors.
        Output tensors of backward are the gradient of forward's input tensors.

        Args:
            *args(tuple): The gradient of forward's output tensor(s).
            **kwargs(dict): The gradient of forward's output tensor(s).

        Returns:
            Tensor or list of Tensors: The gradient of forward's input tensor(s).

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> from paddle.autograd import PyLayer

                >>> class cus_tanh(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         y = paddle.tanh(x)
                ...         # Pass tensors to backward.
                ...         ctx.save_for_backward(y)
                ...         return y
                ...
                ...     @staticmethod
                ...     def backward(ctx, dy):
                ...         # Get the tensors passed by forward.
                ...         y, = ctx.saved_tensor()
                ...         grad = dy * (1 - paddle.square(y))
                ...         return grad
        z5You must implement the backward function for PyLayer.r-   )r/   r   r   r   r   r$   k  s   %zPyLayer.backwardN)r   r   r   r    staticmethodr1   r$   r   r   r   r   r+     s    5
&r+   c                    s    fdd}|S )Nc                    sB   t jj   | g|R  }W d    |S 1 sw   Y  |S r   )paddlebaseZdygraphZno_grad)r/   r   Zoutputsr$   r   r   wrapper  s   
z$once_differentiable.<locals>.wrapperr   )r$   r6   r   r5   r   once_differentiable  s   r7   )r3   Zpaddle.baser   __all__r   r   eagerr+   r"   r   r%   r7   r   r   r   r   <module>   s    d	 