o
    "j                     @   s  d dl Z d dl mZmZ d dlmZ d dlmZmZ d dlm	Z	 d dl
mZ d dlmZmZ g Zdd	 Z							
	
									dddZ				
				dddZ											
	
								dddZ										 						dddZdS )    N)_C_ops_legacy_C_ops)core)check_dtypecheck_variable_and_dtype)default_main_program)LayerHelper)in_dynamic_modein_dynamic_or_pir_modec                 C   s2   t | ttfstd| dk s| dkrtdd S )Nz(dropout_rate argument should be a numberr      z,dropout_rate argument should between 0 and 1)
isinstancefloatint	TypeError
ValueError)dropout_rate r   p/var/www/html/Deteccion_Ine/venv/lib/python3.10/site-packages/paddle/incubate/nn/functional/fused_transformer.py_verify_dropout_rate   s
   r         ?reluh㈵>FTupscale_in_trainc           #      C   sv  t |	 t |
 d}|dvrtd|dkrdn|}t rtj jdkr+tj j}t rtj	g | dd||||||||d|d|d	|d
|d|	d|
d| d|dud|dud|durx|ndd|dur|ndd|d|d|d|R  \}}}}}}}}}}}|S | j
}t| dg dd t|dg dd t	| dd|||||||||||||	|
||| |du|du|dur|nd|dur|nd||\}}}}}}}}}}}|S td}| j
}t| dg dd t|dg dd || j
}|jddd}|jddd}|j| j
dd}|j| j
dd}|j| j
dd}|j| j
dd}|j| j
dd}|j| j
dd} |j| j
dd}!|j| j
dd}"|du sl|dkrw|jjdkrw|jj}|jd| ||||||||d	||||||||| |!|"d|	|
||||| |du|du|dur|nd|dur|nd||||dd  |S )!a  
    This is a fusion operator to compute feed forward layer in transformer model architecture.
    This operator only supports running on GPU. The function of the operator is consistent with
    the following pseudo code:

    .. code-block:: text

        >>> residual = x
        >>> if pre_layer_norm:
        ...     out = layer_norm1(x)
        ...  else:
        ...     out = x
        >>> out = linear2(dropout1(activation(linear1(src))))
        >>> if add_residual:
        ...     out = residual + dropout2(out)
        ... else:
        ...     out = dropout2(out)
        >>> if not pre_layer_norm:
        ...     out = layer_norm2(out)


    Args:
        x (Tensor): the input tensor could be 3-D tensor, the input data type could be float16, float32 or float64, the shape is`[batch\_size, sequence\_length, d_model]`.
        linear1_weight (Tensor): The weight of first linear, the data type is same as `x`, the shape is `[d\_model, dim\_feedforward]`.
        linear2_weight (Tensor): The weight of second linear, the data type is same as `x`, the shape is `[dim\_feedforward, d\_model]`.
        linear1_bias (Tensor, optional): The bias of first linear, the data type is same as `x`, the shape is `[dim_feedforward]`. Default None.
        linear2_bias (Tensor, optional): The bias of second linear, the data type is same as `x`, the shape is `[d_model]`. Default None.
        ln1_scale (Tensor, optional): the weight of first layer_norm, the data type is float32 or float64, the shape is same as `x`. Default None.
        ln1_bias (Tensor, optional): The bias of first layer_norm, the data type is float32 or float64, the shape is `[d\_model]`. Default None.
        ln2_scale (Tensor, optional): The weight of second layer_norm, the data type is float32 or float64, the shape is same as `x`. Default None.
        ln2_bias (Tensor, optional): The bias of second layer_norm, the data type is float32 or float64, the shape is `[d\_model]`. Default None.
        dropout1_rate (float, optional): The first dropout probability of setting units to zero. Default 0.5.
        dropout2_rate (float, optional): The second dropout probability of setting units to zero. Default 0.5.
        activation (str, optional): The activation. Default "relu".
        ln1_epsilon (float, optional): Small float of first layer_norm added to denominator to avoid dividing by zero. Default is 1e-5.
        ln2_epsilon (float, optional): Small float of second layer_norm added to denominator to avoid dividing by zero. Default is 1e-5.
        pre_layer_norm (bool, optional): add layer_norm in the pre-processing stage or post-processing state.
        training (bool, optional): A flag indicating whether it is in train phrase or not. Default True.
        mode (str, optional): ['upscale_in_train'(default) | 'downscale_in_infer']

                               1. upscale_in_train(default), upscale the output at training time

                                  - train: out = input * mask / ( 1.0 - p )
                                  - inference: out = input

                               2. downscale_in_infer, downscale the output at inference

                                  - train: out = input * mask
                                  - inference: out = input * (1.0 - p)
        ring_id (int, optional): For distributed forward in tensor model parallel, only support NCCL. Default is -1, means not using tensor parallel.
        add_residual (bool, optional): Whether add residual at the end. Default is True.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor: The output Tensor, the data type and shape is same as `x`.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env:GPU)
            >>> import paddle
            >>> paddle.device.set_device('gpu')
            >>> import paddle.incubate.nn.functional as F

            >>> x = paddle.randn(shape=(1, 8, 8), dtype="float32")
            >>> linear1_weight = paddle.randn(shape=(8, 8), dtype="float32")
            >>> linear2_weight = paddle.randn(shape=(8, 8), dtype="float32")
            >>> out = F.fused_feedforward(x, linear1_weight, linear2_weight)
            >>> print(out.shape)
            [1, 8, 8]
    Ndownscale_in_inferr   Bmode argument should be 'downscale_in_infer' or 'upscale_in_train'r   downgrade_in_inferr   pre_layer_normln1_epsilonln2_epsilon
act_methoddropout1_ratedropout2_rateis_testdropout1_fix_seeddropout2_fix_seeddropout1_seeddropout2_seeddropout1_implementationdropout2_implementationadd_residualring_idxfloat16float32Zfloat64fused_feedforwarddtypeZuint8T)stop_gradient)	XZLinear1WeightZLinear1BiasZLinear2WeightZLinear2BiasZLn1ScaleZLn1BiasLn2ScaleLn2Bias)OutZDropout1MaskZDropout2MaskZLn1MeanZLn1VarianceLn2MeanLn2VarianceZ
Linear1OutZLn1OutZDropout1OutZDropout2Out)r"   r#   r!   r   r   r    r$   r%   r&   r'   r(   r)   r*   r+   r,   typeinputsoutputsattrs)r   r   r
   paddlestaticr   random_seedr	   r   r1   r2   r   r   r   r   "create_variable_for_type_inferencemain_program	append_op)#r-   Zlinear1_weightZlinear2_weightZlinear1_biasZlinear2_biasZ	ln1_scaleZln1_biasZ	ln2_scaleZln2_biasr"   r#   
activationr   r    r   trainingmoder,   r+   nameseedout_r2   helperZdropout1_maskZdropout2_maskZln1_meanZln1_varianceZln2_meanZln2_varianceZlinear1_outZln1_outZdropout1_outZdropout2_outr   r   r   r1   $   s  ]	
 !"#$%&'()T",r1   c
                 C   sD  d}
|dvr
t d|dkrdn|}|dur4t|jdks!J d| jt| jd  |jd ks4J d	|durVt|jdksCJ d
| jt| jd  |jd ksVJ dt rt jdkrct j}
t| ||||d|d|d| d|
dud|
dur||
ndd|\}}}}}|S t	d!i t	 }| j
}t| dg dd t|dg dd i }| g|d< |g|d< |dur|g|d< |r|g|d< |r|g|d< |
du s|
dkr|jjdkr|jj}
||| |
du|
dur|
nd|d}|jtjjjdd}|j|dd}|j|dd}|j|d}|j|d}|jd||||||d|d  |S )"a
  

    The fused_bias_dropout_residual_layer_norm operator. The pseudo code is as follows:

    .. code-block:: text

        >>> y = layer_norm(residual + dropout(bias + x))

    Parameters:
        x (Tensor): The input tensor. The shape is `[*, embed\_dim]`.
        residual (Tensor): The residual tensor. The shape is same as x.
        bias (Tensor, optional): The bias of linear. The shape is `[embed_dim]`. Default None.
        ln_scale (Tensor, optional): The weight tensor of layernorm. The shape is `[embed_dim]`. Default None.
        ln_bias (Tensor, optional): The bias tensor of layernorm. The shape is `[embed_dim]`. Default None.
        dropout_rate (float, optional): The dropout probability used on attention
            weights to drop some attention targets for the dropout after attention.
            0 for no dropout. Default 0.5.
        ln_epsilon (float, optional): Small float value added to denominator of layer_norm
            to avoid dividing by zero. Default is 1e-5.
        training (bool, optional): A flag indicating whether it is in train phrase or not. Default True.
        mode (str, optional): ['upscale_in_train'(default) | 'downscale_in_infer']

                               1. upscale_in_train(default), upscale the output at training time

                                  - train: out = input * mask / ( 1.0 - p )
                                  - inference: out = input

                               2. downscale_in_infer, downscale the output at inference

                                  - train: out = input * mask
                                  - inference: out = input * (1.0 - p)
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, The output Tensor, the data type and shape is same as `x`.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env:GPU)
            >>> import paddle
            >>> paddle.device.set_device('gpu')
            >>> import paddle.incubate.nn.functional as F

            >>> # input: [batch_size, seq_len, embed_dim]
            >>> x = paddle.rand(shape=(2, 4, 128), dtype="float32")
            >>> # residual: [batch_size, seq_len, embed_dim]
            >>> residual = paddle.rand(shape=(2, 4, 128), dtype="float32")
            >>> # linear bias: [embed_dim]
            >>> bias = paddle.rand(shape=[128], dtype="float32")
            >>> # output: [batch_size, seq_len, embed_dim]
            >>> output = F.fused_bias_dropout_residual_layer_norm(
            ...     x, residual, bias)
            >>> print(output.shape)
            [2, 4, 128]

    Nr   r   r   r   r   z.The dims of the shape of ln_scale should be 1.r   z4The dim of ln_scale must equal to the last dim of x.z-The dims of the shape of ln_bias should be 1.z3The dim of ln_bias must equal to the last dim of x.r   
ln_epsilonr$   dropout_fix_seeddropout_seeddropout_implementation&fused_bias_dropout_residual_layer_normr-   r.   r2   r4   ZResidualZBiasLnScaleLnBias)rM   r   r$   rN   rO   rP   Tr2   r3   r2   )BiasDropoutResidualOutDropoutMaskOutLnMean
LnVarianceYr:   )rQ   )r   lenshaper	   r   rA   r   rQ   r   localsr2   r   r   rC   rB   r   VarDescVarTypeUINT8rD   )r-   ZresidualZbiasln_scaleln_biasr   rM   rF   rG   rH   rI   rK   	final_outrL   r2   r<   r>   dropout_mask_outln_mean_outln_variance_outbias_dropout_residual_outr   r   r   rQ   C  s   E  




	
rQ   c           1      C   sd  d}|dvr
t d|dkrdn|}| jdkr t d| j dt rtj jd	kr2tj j}|spt|jd
ks?J d|jd	 dksJJ d|jd | jd ksXJ d|dkro|jd |jd  |jd ksoJ dnR|d	ksxJ dt|jdksJ d|dkr|jd d|jd	  ksJ d|jd	 | jd ksJ d|	durt|	jdksJ d|	jd	 |jd ksJ dt	 rJt
jg | ||||	||||
||d|d|d|d|d|d|d|d| d |dud!|dud"|dur|nd	d#|dur |nd	d$|d%|d&|d'|R  \}}}}}}}}}}}}}}}}}}}}nFt| ||||	||||
|||||||| |du|durh|nd	|||du|duru|nd	||||\}}}}}}}}}}}}}}}}}}}}|dur||fS |S tdOi t }| j}t| d)g d*d+ t|d,g d*d( i }| g|d-< |r|g|d.< |r|g|d/< |g|d0< |	dur|	g|d1< ||d2< |g|d3< |
dur|
g|d4< |r|g|d5< |r|g|d6< |r	|g|d7< |du s|d	kr|jjd	kr|jj}i d|d|d|d|d|d| d |dud!|dud"|durD|nd	d#|durN|nd	d$|d%|d&|d'|d|d|}|j|d8d9}|j|d8d9} |j|d:}!|j|d:}"|j|d:}#|j|d:}$|j|d:}%|j|d:}&|j|d:}'|jtjjjd8d9}(|j|d:})|j|d:}*|j|d:}+|j|d:},|jtjjjd8d9}-|j|d8d9}.|j|d8d9}/|j|d:}0|j|d:}|j|d:}|jd;|i d<|d=| d>|!d?|"d@|#dA|$dB|%dC|&dD|'dE|(dF|)dG|*dH|+dI|,dJ|-dK|.dL|/|0||dM|dN |r0||fS |S )Pa  
    Attention mapps queries and a set of key-value pairs to outputs, and
    Multi-Head Attention performs multiple parallel attention to jointly attending
    to information from different representation subspaces. This API only
    support self_attention. The pseudo code is as follows:

    .. code-block:: text

        >>> residual = x
        >>> if pre_layer_norm:
        ...     out = layer_norm(x)
        ... else:
        ...     out = x
        >>> # compute q, k, v
        >>> out = matmul(out, qkv_weight) + qkv_bias
        >>> out = transpose(out, perm=[2, 0, 3, 1, 4])
        >>> # extract q, k and v from out
        >>> q = out[0:1,::] * (head_dim ** -0.5)
        >>> k = out[1:2,::]
        >>> v = out[2:3,::]
        >>> out = matmul(q, k, transpose_y=True)
        >>> out = out + attn_mask
        >>> out = softmax(out)
        >>> out = dropout(out)
        >>> out = matmul(out, v)
        >>> # combine heads
        >>> out = transpose(out, perm=[0, 2, 1, 3])
        >>> # project to output
        >>> out = linear(out)
        >>> if add_residual:
        ...     out = residual + dropout(out)
        ... else:
        ...     out = dropout(out)
        >>> if not pre_layer_norm:
        ...     out = layer_norm(out)


    Parameters:
        x (Tensor): The input tensor of fused_multi_head_attention. The shape is
            `[batch\_size, sequence\_len, embed\_dim]`.
        qkv_weight (Tensor): The qkv weight tensor. If `transpose_qkv_wb` is False, the shape is `[3, num_head, dim_head, dim_embed]`. Otherwise, the shape is `[dim_embed, 3 * dim_embed]`.
        linear_weight (Tensor): The linear weight tensor. The shape is `[embed_dim, embed_dim]`.
        pre_layer_norm (bool, optional): whether it is pre_layer_norm (True) or post_layer_norm architecture
                                        (False). Default False.
        pre_ln_scale (Tensor, optional): The weight tensor of pre layernorm. Default None.
        pre_ln_bias (Tensor, optional): The bias tensor of pre layernorm. Default None.
        ln_scale (Tensor, optional): The weight tensor of layernorm. Default None.
        ln_bias (Tensor, optional): The bias tensor of layernorm. Default None.
        pre_ln_epsilon (float, optional): Small float value added to denominator of the pre layer_norm
            to avoid dividing by zero. Default is 1e-5.
        qkv_bias (Tensor, optional): The bias of qkv computation. If `transpose_qkv_wb` is False, the shape is `[3, num_head, dim_head]`. Otherwise, the shape is `[3 * dim_embed]`.
            Default None.
        linear_bias (Tensor, optional): The bias of linear. The shape is `[embed_dim]`. Default None.
        cache_kv (Tensor, optional): For generation model, cache structure. The shape is `[2, bsz, num_head, seq_len, head_dim]`. Default None.
        attn_mask (Tensor, optional):  A tensor used in multi-head attention to prevents attention to
            some unwanted positions, usually the paddings or the subsequent positions. It is a tensor
            with shape broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`. When the
            data type is bool, the unwanted positions have `False` values and the others have `True` values.
            When the data type is int, the unwanted positions have 0 values and the others have 1 values.
            When the data type is float, the unwanted positions have `-INF` values and the others have 0 values.
            It can be None when nothing wanted or needed to be prevented attention to. Default None.
        dropout_rate (float, optional): The dropout probability used on attention
            weights to drop some attention targets for the dropout after attention.
            0 for no dropout. Default 0.5.
        attn_dropout_rate (float, optional): The dropout probability used on attention
            weights to drop some attention targets for the dropout in attention.
            0 for no dropout. Default 0.5.
        ln_epsilon (float, optional): Small float value added to denominator of layer_norm
            to avoid dividing by zero. Default is 1e-5.
        training (bool, optional): A flag indicating whether it is in train phrase or not. Default True.
        mode (str, optional): ['upscale_in_train'(default) | 'downscale_in_infer']

                               1. upscale_in_train(default), upscale the output at training time

                                  - train: out = input * mask / ( 1.0 - p )
                                  - inference: out = input

                               2. downscale_in_infer, downscale the output at inference

                                  - train: out = input * mask
                                  - inference: out = input * (1.0 - p)
        ring_id (int, optional): For distributed forward in mp, only support NCCL and forward. Default is -1, means not using mp
        add_residual (bool, optional): Whether add residual at the end. Default is True.
        num_heads (int, optional): If enable transpose_qkv_wb, should provide the num_heads. Default is -1, means not transpose qkv wb.
        transpose_qkv_wb (bool, optional): Whether transpose the qkv_weight and qkv_bias in the op. Only support GPU for now. Default is false, means not transpose qkv wb.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.

    Returns:
        Tensor: The output Tensor, the data type and shape is same as `x`.

    Examples:

        .. code-block:: python

            >>> # doctest: +REQUIRES(env:GPU)
            >>> import paddle
            >>> paddle.device.set_device('gpu')
            >>> import paddle.incubate.nn.functional as F

            >>> # input: [batch_size, seq_len, embed_dim]
            >>> x = paddle.rand(shape=(2, 4, 128), dtype="float32")
            >>> # qkv_weight: [3, num_head, head_dim, embed_dim]
            >>> qkv_weight = paddle.rand(shape=(3, 4, 32, 128), dtype="float32")
            >>> # qkv_bias: [3, num_head, head_dim]
            >>> qkv_bias = paddle.rand(shape=(3, 4, 32), dtype="float32")
            >>> # linear_weight: [embed_dim, embed_dim]
            >>> linear_weight = paddle.rand(shape=(128, 128), dtype="float32")
            >>> # linear_bias: [embed_dim]
            >>> linear_bias = paddle.rand(shape=[128], dtype="float32")
            >>> # self attention mask: [batch_size, num_heads, seq_len, seq_len]
            >>> attn_mask = paddle.rand(shape=(2, 4, 4, 4), dtype="float32")

            >>> # output: [batch_size, seq_len, embed_dim]
            >>> output = F.fused_multi_head_attention(
            ...     x, qkv_weight, linear_weight, False,
            ...     None, None, None, None, 1e-5, qkv_bias,
            ...     linear_bias, None, attn_mask)
            >>> print(output.shape)
            [2, 4, 128]
    Nr   r   r   r      z,The rank of the x should be 3, but received .r      z0The dims of the shape of qkv_weight should be 4.zEThe shape of qkv_weight should be [3, num_head, head_dim, embed_dim].   zOThe 3rd dim of qkv_weight and 2nd dim of x should be the same, i.e., embed_dim.r   r   z)embed_dim must be divisible by num_heads.zRWhen enable transpose_qkv_wb, the num_heads should be provided and greater than 0.zkWhen enable transpose_qkv_wb, the dims of the shape of qkv_weight should be 2 when enable transpose_qkv_wb.zxWhen enable transpose_qkv_wb, the shape of qkv_weight should be [embed_dim, 3 * embed_dim] when enable transpose_qkv_wb.zmWhen enable transpose_qkv_wb, the 1st dim of qkv_weight and 2nd dim of x should be the same, i.e., embed_dim.zLWhen enable transpose_qkv_wb, the dims of the shape of qkv_bias should be 1.ztWhen enable transpose_qkv_wb, the 1st dim of qkv_bias and 2nd dim of qkv_weight should be the same, i.e., embed_dim.	num_headstranspose_qkv_wbr   epsilonr   attn_dropout_raterM   r$   Zattn_dropout_fix_seedrN   Zattn_dropout_seedrO   Zattn_dropout_implementationrP   r+   r,   fused_multi_head_attentionr-   r.   Zfused_multihead_attentionr2   r4   rR   rS   QKVWQKVBiasSrcMask
OutLinearWOutLinearBiasr5   r6   CacheKVTrT   rU   fused_attentionrX   rY   ZLnOutZQKVOutZ
QKVBiasOutZTransposeOut2ZQKOutZQKTVOutZ
SoftmaxOutZAttnDropoutMaskOutZAttnDropoutOutZ
SrcMaskOutZFMHAOutZOutLinearOutrW   r8   r9   )rV   rZ   
CacheKVOutr:   )rp   )r   ndimr
   r?   r@   r   rA   r[   r\   r	   r   rw   r   r   r]   r2   r   r   rC   rB   r   r^   r_   r`   rD   )1r-   Z
qkv_weightZlinear_weightr   Zpre_ln_scaleZpre_ln_biasra   rb   Zpre_ln_epsilonZqkv_biasZlinear_biasZcache_kv	attn_maskr   ro   rM   rF   rG   r,   r+   rl   rm   rH   rI   rK   cache_kv_outrc   rL   r2   r<   r>   Zpre_ln_mean_outZpre_ln_variance_outZ
pre_ln_outZqkv_outZqkv_bias_outZtranspose_outZqk_outZqktv_outZsoftmax_outZattn_dropout_mask_outZattn_dropout_outZattn_mask_outZfmha_outZout_linear_outrd   re   rf   rg   r   r   r   rp     s   

	
 !"#$%&'()*+D












"	


	
rp           geluc           $   
   C   sv  |dvrt d|dkrdn|}t rrtjg | |||||||||||||||	|
|||d|d|d|d|d	| d
|d|d|d|R  \}}|durp||fS |S td-i t }| j} t| dddgd t| dddgd i }!| g|!d< ||!d< ||!d< ||!d< |dur||!d< |durt	|t	|ksJ ||!d< |dur||!d< |dur||!d< |dkr||!d< ||!d< ||!d< ||!d < |dur||!d!< ||!d"< ||!d#< |	|!d$< |
dur|
|!d%< ||!d&< |dur	||!d'< ||||| ||||d(	}"i }#|j
| d)}||#d*< |r)||#d+< |jd|!|#|"d, |r9||fS |S ).a"  
    This is a fusion operator to compute multi transformer layers in transformer model architecture.
    This operator only supports running on GPU. The function of the transformer layer is consistent
    with the following pseudo code:

    .. code-block:: text

        >>> if pre_layer_norm:
        ...     out = layer_norm(x)
        ...     out = qkv_linear(out) + qkv_bias
        ... else:
        ...     out = qkv_linear(x) + qkv_bias
        >>> out = transpose(out, perm=[2, 0, 3, 1, 4])
        >>> # extract q, k and v from out.
        >>> q = out[0:1, ::]
        >>> k = out[1:2, ::]
        >>> v = out[2:3, ::]
        >>> out = q * k^t
        >>> out = attn_mask + out
        >>> out = softmax(out)
        >>> out = dropout(out)
        >>> out = out * v
        >>> out = transpose(out, perm=[0, 2, 1, 3])
        >>> out = linear(out)
        >>> if pre_layer_norm:
        ...     out = x + dropout(out + bias)
        ... else:
        ...     out = layer_norm(x + dropout(out + bias))

        >>> residual = out;
        >>> if pre_layer_norm:
        ...     out = ffn_layer_norm(out)
        >>> out = ffn1_linear(out)
        >>> out = dropout(activation(out + ffn1_bias))
        >>> out = ffn2_linear(out)
        >>> out = residual + dropout(out + ffn2_bias)
        >>> if not pre_layer_norm:
        ...     out = ffn_layer_norm(out)

    Args:
        x (Tensor): the input tensor could be 3-D tensor, the input data type could be float16 or float32, the shape is `[batch\_size, sequence\_length, d\_model]`.
        ln_scales (list(Tensor)|tuple(Tensor)): The weight tensors of attention layer_norm, the shape is `[d\_model]`.
        ln_biases (list(Tensor)|tuple(Tensor)): The bias tensors of attention layer_norm. the shape is `[d\_model]`.
        qkv_weights (list(Tensor)|tuple(Tensor)): The weight tensors of attention qkv computation. The shape is `[3, num\_head, dim\_head, d\_model]`.
        qkv_biases (list(Tensor)|tuple(Tensor)|None): The bias tensors of attention qkv computation. The shape is `[3, num\_head, dim\_head]`.
        linear_weights (list(Tensor)|tuple(Tensor)): The weight tensors of attention linear. The shape is `[num\_head * dim\_head, d\_model]`.
        linear_biases (list(Tensor)|tuple(Tensor)|None): The bias tensors of attention linear. The shape is `[d\_model]`.
        ffn_ln_scales (list(Tensor)|tuple(Tensor)): The weight tensors of feedforward layer_norm, the shape is `[d\_model]`
        ffn_ln_biases (list(Tensor)|tuple(Tensor)): The bias tensors of feedforward layer_norm, the shape is `[d\_model]`
        ffn1_weights (list(Tensor)|tuple(Tensor)): The weight tensors of feedforward first linear, the shape is `[d\_model, dim\_feedforward]`.
        ffn1_biases (list(Tensor)|tuple(Tensor)|None): The bias tensors of feedforward first linear, the shape is `[dim\_feedforward]`.
        ffn2_weights (list(Tensor)|tuple(Tensor)): The weight tensors of feedforward second linear, the shape is `[dim\_feedforward, d\_model]`.
        ffn2_biases (list(Tensor)|tuple(Tensor)|None): The bias tensors of feedforward second linear, the shape is `[d_model]`.
        pre_layer_norm (bool, optional): whether it is pre_layer_norm(True) or post_layer_norm(False). Default True.
        epsilon (float, optional): Small float value added to denominator of the layer_norm to avoid dividing by zero. Default is 1e-5.
        cache_kvs (list(Tensor)|tuple(Tensor), optional): The cache structure tensors for the generation model. The shape is `[2, bsz, num\_head, max\_seq\_len, head\_dim]`. Default None.
        pre_caches (list(Tensor)|tuple(Tensor), optional): The prefix caches for the generation model. The shape is `[2, bsz, num\_head, cache\_len, head\_dim]`. Default None.
        seq_lens (Tensor optional): The sequence lengths of this batch. The shape is `[bsz]`. Default None.
        rotary_embs (Tensor optional): The RoPE embs for rotary computation. The shape is `[2, bsz, 1, seq\_len, head\_dim]`. Default None.
        time_step (Tensor, optional): The time step tensor for the generation model. Which used in decode stage, to represent the time step, that is, the real seq_len of CacheKV. The shape is `[1]`, must be in CPUPlace. Default None.
        attn_mask (Tensor, optional):  A tensor used in multi-head attention to prevents attention to
            some unwanted positions, usually the paddings or the subsequent positions. It is a tensor
            with shape `[batch_size, 1, sequence_length, sequence_length]`. Default None.
        dropout_rate (float, optional): The dropout probability of setting units to zero. Default 0.0.
        rotary_emb_dims (int, optional): The rotary_emb_dims of rotary computation, and it is 0 when rotary_embs is None,
            1 when rotary_embs is not None and pos_extra_ids is None, 2 when rotary_embs and pos_extra_ids are both not None. Default 0.
        activation (str, optional): The activation. Default "gelu".
        training (bool, optional): A flag indicating whether it is in train phrase or not. Default False.
        mode (str, optional): ['upscale_in_train'(default) | 'downscale_in_infer']

                               1. upscale_in_train(default), upscale the output at training time

                                  - train: out = input * mask / ( 1.0 - p )
                                  - inference: out = input

                               2. downscale_in_infer, downscale the output at inference

                                  - train: out = input * mask
                                  - inference: out = input * (1.0 - p)
        trans_qkvw (bool, optional): Whether to transpose for weights of qkv.
            If true, the shape eights of qkv should be [3, num_head, dim_head, dim_embed].
            Otherwise the shape of weights of qkv should be [dim_embed, 3, num_head, dim_head]. Default True.
        ring_id (int, optional): For distributed forward in tensor model parallel, only support NCCL. Default is -1, means not using mp.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor|tuple: If `cache_kvs` is None, return a tensor that has
        the same shape and data type with `x`, representing the output
        of Transformer layers. If `cache_kvs` is not None, return the
        tuple (output, cache_kvs), which output is the output of
        Transformer layers, cache_kvs is inplace with input `cache_kvs`.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env:GPU)
            >>> import paddle
            >>> paddle.device.set_device('gpu')
            >>> import paddle.incubate.nn.functional as F

            >>> # input: [batch_size, seq_len, embed_dim]
            >>> x = paddle.rand(shape=(2, 4, 128), dtype="float32")

            >>> # ln_scale: [embed_dim], ln_bias: [embed_dim]
            >>> ln_scale = paddle.rand(shape=(128,), dtype="float32")
            >>> ln_bias = paddle.rand(shape=(128,), dtype="float32")

            >>> # qkv_weight: [3, num_head, head_dim, embed_dim], qkv_bias: [3, num_head, head_dim]
            >>> qkv_weight = paddle.rand(shape=(3, 4, 32, 128), dtype="float32")
            >>> qkv_bias = paddle.rand(shape=(3, 4, 32), dtype="float32")

            >>> # linear_weight: [embed_dim, embed_dim], linear_bias: [embed_dim]
            >>> linear_weight = paddle.rand(shape=(128, 128), dtype="float32")
            >>> linear_bias = paddle.rand(shape=(128,), dtype="float32")

            >>> # ffn_ln_scale: [embed_dim], ffn_ln_bias: [embed_dim]
            >>> ffn_ln_scale = paddle.rand(shape=(128,), dtype="float32")
            >>> ffn_ln_bias = paddle.rand(shape=(128,), dtype="float32")

            >>> # ffn1_weight: [embed_dim, 4*embed_dim], ffn1_bias: [4*embed_dim]
            >>> ffn1_weight = paddle.rand(shape=(128, 4*128), dtype="float32")
            >>> ffn1_bias = paddle.rand(shape=(4*128,), dtype="float32")

            >>> # ffn2_weight: [4*embed_dim, embed_dim], ffn2_bias: [embed_dim]
            >>> ffn2_weight = paddle.rand(shape=(4*128, 128), dtype="float32")
            >>> ffn2_bias = paddle.rand(shape=(128,), dtype="float32")

            >>> # self attention mask: [batch_size, 1, seq_len, seq_len]
            >>> attn_mask = paddle.rand(shape=(2, 1, 4, 4), dtype="float32")

            >>> # output: [batch_size, seq_len, embed_dim]
            >>> output = F.fused_multi_transformer(
            ...     x, [ln_scale], [ln_bias], [qkv_weight], [qkv_bias],
            ...     [linear_weight], [linear_bias], [ffn_ln_scale], [ffn_ln_bias],
            ...     [ffn1_weight], [ffn1_bias], [ffn2_weight], [ffn2_bias],
            ...     attn_mask=attn_mask)
            >>> print(output.shape)
            [2, 4, 128]
    r   r   r   r   r   rn   r   rotary_emb_dimsr$   rP   r!   
trans_qkvwr,   Nfused_multi_transformerr-   r/   r0   r2   r4   rR   rS   rq   rr   rv   ZTimeStepZ	PreCachesr   ZRotaryPosEmbZ
SeqLengthsrs   rt   ru   Z
FFNLnScaleZ	FFNLnBiasZ
FFN1WeightZFFN1BiasZ
FFN2WeightZFFN2Bias)	r   rn   r   r~   r$   rP   r!   r   r,   rU   r7   rx   r:   )r   )r   r	   r   r   r   r]   r2   r   r   r[   rB   rD   )$r-   Z	ln_scalesZ	ln_biasesZqkv_weightsZ
qkv_biasesZlinear_weightsZlinear_biasesZffn_ln_scalesZffn_ln_biasesZffn1_weightsZffn1_biasesZffn2_weightsZffn2_biasesr   rn   Z	cache_kvsZ
pre_cachesZseq_lensZrotary_embsZ	time_steprz   r   r~   rE   rF   rG   r   r,   rH   r{   rc   rL   r2   r<   r>   r=   r   r   r   r     s&   +	
 !"#$%&(

r   )NNNNNNr   r   r   r   r   FTr   r   TN)NNNr   r   Tr   N)FNNNNr   NNNNr   r   r   Tr   r   Tr   FN)Tr   NNNNNNr|   r   r}   Fr   Tr   N)r?   r   r   Zpaddle.baser   Zpaddle.base.data_feederr   r   Zpaddle.base.frameworkr   Zpaddle.base.layer_helperr   Zpaddle.frameworkr	   r
   __all__r   r1   rQ   rp   r   r   r   r   r   <module>   s   
  $
 D
   _