Delete configuration_intern_vit copy.py
#1
by
sy1998
- opened
- configuration_intern_vit copy.py +0 -120
configuration_intern_vit copy.py
DELETED
|
@@ -1,120 +0,0 @@
|
|
| 1 |
-
# --------------------------------------------------------
|
| 2 |
-
# InternVL
|
| 3 |
-
# Copyright (c) 2024 OpenGVLab
|
| 4 |
-
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
-
# --------------------------------------------------------
|
| 6 |
-
|
| 7 |
-
import os
|
| 8 |
-
from typing import Union
|
| 9 |
-
|
| 10 |
-
from transformers.configuration_utils import PretrainedConfig
|
| 11 |
-
from transformers.utils import logging
|
| 12 |
-
|
| 13 |
-
logger = logging.get_logger(__name__)
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
class InternVisionConfig(PretrainedConfig):
|
| 17 |
-
r"""
|
| 18 |
-
This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
|
| 19 |
-
instantiate a vision encoder according to the specified arguments, defining the model architecture.
|
| 20 |
-
|
| 21 |
-
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 22 |
-
documentation from [`PretrainedConfig`] for more information.
|
| 23 |
-
|
| 24 |
-
Args:
|
| 25 |
-
num_channels (`int`, *optional*, defaults to 3):
|
| 26 |
-
Number of color channels in the input images (e.g., 3 for RGB).
|
| 27 |
-
patch_size (`int`, *optional*, defaults to 14):
|
| 28 |
-
The size (resolution) of each patch.
|
| 29 |
-
image_size (`int`, *optional*, defaults to 224):
|
| 30 |
-
The size (resolution) of each image.
|
| 31 |
-
qkv_bias (`bool`, *optional*, defaults to `False`):
|
| 32 |
-
Whether to add a bias to the queries and values in the self-attention layers.
|
| 33 |
-
hidden_size (`int`, *optional*, defaults to 3200):
|
| 34 |
-
Dimensionality of the encoder layers and the pooler layer.
|
| 35 |
-
num_attention_heads (`int`, *optional*, defaults to 25):
|
| 36 |
-
Number of attention heads for each attention layer in the Transformer encoder.
|
| 37 |
-
intermediate_size (`int`, *optional*, defaults to 12800):
|
| 38 |
-
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 39 |
-
qk_normalization (`bool`, *optional*, defaults to `True`):
|
| 40 |
-
Whether to normalize the queries and keys in the self-attention layers.
|
| 41 |
-
num_hidden_layers (`int`, *optional*, defaults to 48):
|
| 42 |
-
Number of hidden layers in the Transformer encoder.
|
| 43 |
-
use_flash_attn (`bool`, *optional*, defaults to `True`):
|
| 44 |
-
Whether to use flash attention mechanism.
|
| 45 |
-
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 46 |
-
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 47 |
-
`"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
|
| 48 |
-
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
|
| 49 |
-
The epsilon used by the layer normalization layers.
|
| 50 |
-
dropout (`float`, *optional*, defaults to 0.0):
|
| 51 |
-
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 52 |
-
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
| 53 |
-
Dropout rate for stochastic depth.
|
| 54 |
-
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 55 |
-
The dropout ratio for the attention probabilities.
|
| 56 |
-
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 57 |
-
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 58 |
-
initializer_factor (`float`, *optional*, defaults to 0.1):
|
| 59 |
-
A factor for layer scale.
|
| 60 |
-
"""
|
| 61 |
-
|
| 62 |
-
model_type = 'intern_vit_6b'
|
| 63 |
-
|
| 64 |
-
def __init__(
|
| 65 |
-
self,
|
| 66 |
-
num_channels=3,
|
| 67 |
-
patch_size=14,
|
| 68 |
-
image_size=224,
|
| 69 |
-
qkv_bias=False,
|
| 70 |
-
hidden_size=3200,
|
| 71 |
-
num_attention_heads=25,
|
| 72 |
-
intermediate_size=12800,
|
| 73 |
-
qk_normalization=True,
|
| 74 |
-
num_hidden_layers=48,
|
| 75 |
-
use_flash_attn=True,
|
| 76 |
-
hidden_act='gelu',
|
| 77 |
-
norm_type='rms_norm',
|
| 78 |
-
layer_norm_eps=1e-6,
|
| 79 |
-
dropout=0.0,
|
| 80 |
-
drop_path_rate=0.0,
|
| 81 |
-
attention_dropout=0.0,
|
| 82 |
-
initializer_range=0.02,
|
| 83 |
-
initializer_factor=0.1,
|
| 84 |
-
**kwargs,
|
| 85 |
-
):
|
| 86 |
-
super().__init__(**kwargs)
|
| 87 |
-
|
| 88 |
-
self.hidden_size = hidden_size
|
| 89 |
-
self.intermediate_size = intermediate_size
|
| 90 |
-
self.dropout = dropout
|
| 91 |
-
self.drop_path_rate = drop_path_rate
|
| 92 |
-
self.num_hidden_layers = num_hidden_layers
|
| 93 |
-
self.num_attention_heads = num_attention_heads
|
| 94 |
-
self.num_channels = num_channels
|
| 95 |
-
self.patch_size = patch_size
|
| 96 |
-
self.image_size = image_size
|
| 97 |
-
self.initializer_range = initializer_range
|
| 98 |
-
self.initializer_factor = initializer_factor
|
| 99 |
-
self.attention_dropout = attention_dropout
|
| 100 |
-
self.layer_norm_eps = layer_norm_eps
|
| 101 |
-
self.hidden_act = hidden_act
|
| 102 |
-
self.norm_type = norm_type
|
| 103 |
-
self.qkv_bias = qkv_bias
|
| 104 |
-
self.qk_normalization = qk_normalization
|
| 105 |
-
self.use_flash_attn = use_flash_attn
|
| 106 |
-
|
| 107 |
-
@classmethod
|
| 108 |
-
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
|
| 109 |
-
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
| 110 |
-
|
| 111 |
-
if 'vision_config' in config_dict:
|
| 112 |
-
config_dict = config_dict['vision_config']
|
| 113 |
-
|
| 114 |
-
if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
|
| 115 |
-
logger.warning(
|
| 116 |
-
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
| 117 |
-
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
|
| 118 |
-
)
|
| 119 |
-
|
| 120 |
-
return cls.from_dict(config_dict, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|