cicdatopea commited on
Commit
5c62524
·
verified ·
1 Parent(s): 8db7190

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +1 -62
config.json CHANGED
@@ -56,68 +56,7 @@
56
  "scale_dtype": "torch.float16",
57
  "seqlen": 512,
58
  "sym": true,
59
- "to_quant_block_names": [
60
- [
61
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.0",
62
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.1",
63
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.2",
64
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.3",
65
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.4",
66
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.5",
67
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.6",
68
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.7",
69
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.8",
70
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.9",
71
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.10",
72
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.11",
73
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.12",
74
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.13",
75
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.14",
76
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.15",
77
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.16",
78
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.17",
79
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.18",
80
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.19",
81
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.20",
82
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.21",
83
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.22",
84
- "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.23"
85
- ],
86
- [
87
- "model.layers.0",
88
- "model.layers.1",
89
- "model.layers.2",
90
- "model.layers.3",
91
- "model.layers.4",
92
- "model.layers.5",
93
- "model.layers.6",
94
- "model.layers.7",
95
- "model.layers.8",
96
- "model.layers.9",
97
- "model.layers.10",
98
- "model.layers.11",
99
- "model.layers.12",
100
- "model.layers.13",
101
- "model.layers.14",
102
- "model.layers.15",
103
- "model.layers.16",
104
- "model.layers.17",
105
- "model.layers.18",
106
- "model.layers.19",
107
- "model.layers.20",
108
- "model.layers.21",
109
- "model.layers.22",
110
- "model.layers.23",
111
- "model.layers.24",
112
- "model.layers.25",
113
- "model.layers.26",
114
- "model.layers.27",
115
- "model.layers.28",
116
- "model.layers.29",
117
- "model.layers.30",
118
- "model.layers.31"
119
- ]
120
- ]
121
  },
122
  "resid_pdrop": 0.0,
123
  "rms_norm_eps": 1e-05,
 
56
  "scale_dtype": "torch.float16",
57
  "seqlen": 512,
58
  "sym": true,
59
+ "block_name_to_quantize":"model.vision_embed_tokens.img_processor.vision_model.encoder.layers,model.layers"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  },
61
  "resid_pdrop": 0.0,
62
  "rms_norm_eps": 1e-05,