o
    #jB                     @   sp   d dl Z d dlZd dlmZmZ d dlmZ d dlmZ d dlm	Z	 G dd dej
jZG dd	 d	ej
jZdS )
    N)_C_opsin_dynamic_mode)LayerHelper)no_grad)_BatchNormBasec                       s@   e Zd ZdZ							d fdd	Zdd	 Zd
d Z  ZS )	BatchNormaV  
    Applies Batch Normalization over a SparseCooTensor as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

    When use_global_stats = False, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are the statistics of one mini-batch.
    Calculated as follows:

    ..  math::

        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\
        \ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\

    When use_global_stats = True, the :math:`\mu_{\beta}`
    and :math:`\sigma_{\beta}^{2}` are not the statistics of one mini-batch.
    They are global or running statistics (moving_mean and moving_variance). It usually got from the
    pre-trained model. Calculated as follows:

    .. math::
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\

    The normalization function formula is as follows:

    ..  math::

        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift

    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable proportional parameter
    - :math:`\beta` : trainable deviation parameter

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
            of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
            will create ParamAttr as weight_attr. If it is set to False, the weight is not learnable.
            If the Initializer of the weight_attr is not set, the parameter is initialized with Xavier. Default: None.
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of batch_norm.
            If it is set to None or one attribute of ParamAttr, batch_norm
            will create ParamAttr as bias_attr. If it is set to False, the weight is not learnable.
            If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
        data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Default "NCL".
        use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shape:
        - x: A SparseCooTensor with layout = 'NDHWC' or 'NHWC'.
        - output: SparseCooTensor with same shape as input x.

    Returns:
        None.


    Examples:
        .. code-block:: python

            >>> import paddle
            >>> paddle.seed(123)
            >>> channels = 3
            >>> x_data = paddle.randn((1, 6, 6, 6, channels)).astype('float32')
            >>> dense_x = paddle.to_tensor(x_data)
            >>> sparse_x = dense_x.to_sparse_coo(4)
            >>> batch_norm = paddle.sparse.nn.BatchNorm(channels)
            >>> batch_norm_out = batch_norm(sparse_x)
            >>> print(batch_norm_out.shape)
            [1, 6, 6, 6, 3]
    ?h㈵>NNDHWCc	           	   
      s    t  j||||||||d d S )N)momentumepsilonweight_attr	bias_attrdata_formatuse_global_statsnamesuper__init__)	selfnum_featuresr   r   r   r   r   r   r   	__class__ \/var/www/html/Deteccion_Ine/venv/lib/python3.10/site-packages/paddle/sparse/nn/layer/norm.pyr   b   s   
zBatchNorm.__init__c                 C   s   |dvrt dd S )N)r
   NHWCz:sparse BatchNorm only support layout of "NDHWC" and "NHWC")
ValueError)r   inputr   r   r   _check_data_formatx   s
   zBatchNorm._check_data_formatc                 C   sZ  |  | j | jrtd | jd u r| j | _d}n| j }| jd dkr(dnd}t rMt|| j	| j
| j| j| j | j| j|| j|\}}}}}}|S || j| j| j	| j
d}| j| j|| j | j|dd}d	}t|}	|j}
|	j|
d
d}|	j|
d
d}|	j|
d
d}|	j|
d
d}|	j|
d
d}|	|
}||||||d}|	j||||d |S )Nz<When training, we now always track global mean and variance.F   CNCHWr   )xscalebiasmeanZvariance)r   r   Zdata_layoutZis_testr   trainable_statisticsZfuse_with_reluZsparse_batch_normT)dtypeZstop_gradient)outmean_outvariance_out
saved_meansaved_variancereserve_space)typeinputsoutputsattrs)r   _data_formattrainingwarningswarnZ_use_global_statsr   r   Zsparse_batch_norm__mean	_varianceweightr$   	_momentum_epsilonr   r'   Z"create_variable_for_type_inferenceZ)create_sparse_variable_for_type_inferenceZ	append_op)r   r   r&   r   Zbatch_norm_out_r/   r1   Zop_typehelperr'   r)   r*   r+   r,   r-   r(   r0   r   r   r   forward~   s   

	
zBatchNorm.forward)r   r	   NNr
   NN)__name__
__module____qualname____doc__r   r   r=   __classcell__r   r   r   r   r      s    Lr   c                       sB   e Zd ZdZ						d fdd	Zdd	 Zed
d Z  ZS )SyncBatchNorma  
    This interface is used to construct a callable object of the ``SyncBatchNorm`` class.
    It implements the function of the Cross-GPU Synchronized Batch Normalization Layer, and can
    be used as a normalizer function for other operations, such as conv2d and fully connected
    operations.
    The data is normalized by the mean and variance of the channel based on whole mini-batch
    , which including data in all gpus.
    Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
    Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
    for more details.

    When model in training mode, the :math:`\\mu_{\\beta}`
    and :math:`\\sigma_{\\beta}^{2}` are the statistics of whole mini-batch data in all gpus.
    Calculated as follows:

    ..  math::

        \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\
        \ mini-batch\ mean \\
        \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \
        \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\

    - :math:`x` : whole mini-batch data in all gpus
    - :math:`m` : the size of the whole mini-batch data

    When model in evaluation mode, the :math:`\\mu_{\\beta}`
    and :math:`\sigma_{\beta}^{2}` are global statistics (moving_mean and moving_variance,
    which usually got from the pre-trained model). Global statistics calculated as follows:

    .. math::
        moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global \ mean \\
        moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global \ variance \\

    The formula of normalization is as follows:

    ..  math::

        \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\
        \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
        y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift

    - :math:`\epsilon` : add a smaller value to the variance to prevent division by zero
    - :math:`\gamma` : trainable scale parameter vector
    - :math:`\beta` : trainable shift parameter vector

    Note:
        If you want to use container to pack your model and has ``SyncBatchNorm`` in the
        evaluation phase, please use ``nn.LayerList`` or ``nn.Sequential`` instead of
        ``list`` to pack the model.

    Parameters:
        num_features(int): Indicate the number of channels of the input ``Tensor``.
        epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
        momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
        weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
             of this layer. If it is set to None or one attribute of ParamAttr, this layerr
             will create ParamAttr as param_attr. If the Initializer of the param_attr
             is not set, the parameter is initialized with Xavier. If it is set to False,
             this layer will not have trainable scale parameter. Default: None.
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of this layer.
             If it is set to None or one attribute of ParamAttr, this layer
             will create ParamAttr as bias_attr. If the Initializer of the bias_attr
             is not set, the bias is initialized zero. If it is set to False, this layer will not
             have trainable bias parameter. Default: None.
        data_format(str, optional): Specify the input data format, may be "NCHW". Default "NCHW".
        name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

    Shapes:
        input: Tensor that the dimension from 2 to 5.

        output: Tensor with the same shape as input.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env:GPU)
            >>> import paddle
            >>> import paddle.sparse.nn as nn
            >>> paddle.device.set_device('gpu')

            >>> x = paddle.to_tensor([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]], dtype='float32')
            >>> x = x.to_sparse_coo(len(x.shape)-1)

            >>> if paddle.is_compiled_with_cuda():
            ...     sync_batch_norm = nn.SyncBatchNorm(2)
            ...     hidden1 = sync_batch_norm(x)
            ...     print(hidden1)
            Tensor(shape=[1, 2, 2, 2], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=False,
                   indices=[[0, 0, 0, 0],
                            [0, 0, 1, 1],
                            [0, 1, 0, 1]],
                   values=[[-0.40730840, -0.13725480],
                            [-0.40730840, -1.20299828],
                            [ 1.69877410, -0.23414057],
                            [-0.88415730,  1.57439375]])
    r   r	   Nr!   c              	      s   t  ||||||| d S )Nr   )r   r   r   r   r   r   r   r   r   r   r   r   1  s   
zSyncBatchNorm.__init__c                 C   sH   |    t|| j| j| j| j| j | j| j	| j
dd\}}}}}}|S )NF)r   r   Zsparse_sync_batch_norm_r6   r7   r8   r$   r3   r9   r:   r2   )r   r"   Zsync_batch_norm_outr;   r   r   r   r=   E  s   zSyncBatchNorm.forwardc              	   C   sJ  |}t |tr|jdur t |jts |jjdur |jjd |j_|jdur9t |jts9|jjdur9|jjd |j_t |trPt|j|j	|j
|j|j|j|j}ntj|j|j	|j
|j|j|j|j}|jdur|jdurt  |j|_|j|_W d   n1 sw   Y  |j|_|j|_| D ]\}}||| | q~|S )a  
        Helper function to convert :class: `paddle.sparse.nn.BatchNorm` layers in the model to :class: `paddle.sparse.nn.SyncBatchNorm` layers.

        Parameters:
            layer(paddle.nn.Layer): model containing one or more `BatchNorm` layers.

        Returns:
            The original model with converted SyncBatchNorm layers. If BatchNorm layer in the model, use SyncBatchNorm layer instead.

        Examples:

            .. code-block:: python

                >>> import paddle
                >>> import paddle.sparse.nn as nn

                >>> model = paddle.nn.Sequential(nn.Conv3D(3, 5, 3), nn.BatchNorm(5))
                >>> sync_model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
        NZ_syncF)
isinstancer   Z_weight_attrboolr   Z
_bias_attrr   rC   Z_num_featuresr9   r:   r2   _namepaddlennr   r8   r$   r6   r7   Znamed_childrenZadd_sublayerconvert_sync_batchnorm)clslayerZlayer_outputr   Zsublayerr   r   r   rI   V  sZ   









z$SyncBatchNorm.convert_sync_batchnorm)r   r	   NNr!   N)	r>   r?   r@   rA   r   r=   classmethodrI   rB   r   r   r   r   rC      s    drC   )r4   rG   r   r   Zpaddle.base.layer_helperr   Zpaddle.frameworkr   Zpaddle.nn.layer.normr   rH   ZBatchNorm1Dr   rC   r   r   r   r   <module>   s    8