Spaces:
Sleeping
Sleeping
ci : add an option to fail on compile warning (llama/3952)
Browse files* feat(ci): add an option to fail on compile warning
* Update CMakeLists.txt
* minor : fix compile warnings
ggml-ci
* ggml : fix unreachable code warnings
ggml-ci
* ci : disable fatal warnings for windows, ios and tvos
* ggml : fix strncpy warning
* ci : disable fatal warnings for MPI build
* ci : add fatal warnings to ggml-ci
ggml-ci
---------
Co-authored-by: Georgi Gerganov <[email protected]>
- ggml-backend.c +1 -0
- ggml-metal.m +1 -1
- ggml.c +10 -5
ggml-backend.c
CHANGED
|
@@ -1006,6 +1006,7 @@ static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, gg
|
|
| 1006 |
}
|
| 1007 |
}
|
| 1008 |
GGML_ASSERT(false && "tensor buffer type not supported by any backend");
|
|
|
|
| 1009 |
}
|
| 1010 |
|
| 1011 |
#if 0
|
|
|
|
| 1006 |
}
|
| 1007 |
}
|
| 1008 |
GGML_ASSERT(false && "tensor buffer type not supported by any backend");
|
| 1009 |
+
return -1; // silence warning
|
| 1010 |
}
|
| 1011 |
|
| 1012 |
#if 0
|
ggml-metal.m
CHANGED
|
@@ -176,7 +176,7 @@ struct ggml_metal_context {
|
|
| 176 |
// MSL code
|
| 177 |
// TODO: move the contents here when ready
|
| 178 |
// for now it is easier to work in a separate file
|
| 179 |
-
//static NSString * const msl_library_source = @"see metal.metal";
|
| 180 |
|
| 181 |
// Here to assist with NSBundle Path Hack
|
| 182 |
@interface GGMLMetalClass : NSObject
|
|
|
|
| 176 |
// MSL code
|
| 177 |
// TODO: move the contents here when ready
|
| 178 |
// for now it is easier to work in a separate file
|
| 179 |
+
// static NSString * const msl_library_source = @"see metal.metal";
|
| 180 |
|
| 181 |
// Here to assist with NSBundle Path Hack
|
| 182 |
@interface GGMLMetalClass : NSObject
|
ggml.c
CHANGED
|
@@ -868,7 +868,7 @@ do { \
|
|
| 868 |
const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
|
| 869 |
_mm256_extractf128_ps(x[0], 1)); \
|
| 870 |
const __m128 t1 = _mm_hadd_ps(t0, t0); \
|
| 871 |
-
res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1));
|
| 872 |
} while (0)
|
| 873 |
// TODO: is this optimal ?
|
| 874 |
|
|
@@ -1149,7 +1149,7 @@ inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
|
|
| 1149 |
x[i] = _mm_add_ps(x[i], x[offset+i]); \
|
| 1150 |
} \
|
| 1151 |
const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
|
| 1152 |
-
res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0));
|
| 1153 |
}
|
| 1154 |
// TODO: is this optimal ?
|
| 1155 |
|
|
@@ -2086,6 +2086,7 @@ void ggml_numa_init(enum ggml_numa_strategy numa_flag) {
|
|
| 2086 |
}
|
| 2087 |
}
|
| 2088 |
#else
|
|
|
|
| 2089 |
// TODO
|
| 2090 |
#endif
|
| 2091 |
}
|
|
@@ -3219,7 +3220,7 @@ const char * ggml_get_name(const struct ggml_tensor * tensor) {
|
|
| 3219 |
}
|
| 3220 |
|
| 3221 |
struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
|
| 3222 |
-
strncpy(tensor->name, name, sizeof(tensor->name));
|
| 3223 |
tensor->name[sizeof(tensor->name) - 1] = '\0';
|
| 3224 |
return tensor;
|
| 3225 |
}
|
|
@@ -18575,7 +18576,9 @@ static enum ggml_opt_result linesearch_backtracking(
|
|
| 18575 |
(*step) *= width;
|
| 18576 |
}
|
| 18577 |
|
| 18578 |
-
|
|
|
|
|
|
|
| 18579 |
}
|
| 18580 |
|
| 18581 |
static enum ggml_opt_result ggml_opt_lbfgs(
|
|
@@ -18843,7 +18846,9 @@ static enum ggml_opt_result ggml_opt_lbfgs(
|
|
| 18843 |
step[0] = 1.0;
|
| 18844 |
}
|
| 18845 |
|
| 18846 |
-
|
|
|
|
|
|
|
| 18847 |
}
|
| 18848 |
|
| 18849 |
struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
|
|
|
|
| 868 |
const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
|
| 869 |
_mm256_extractf128_ps(x[0], 1)); \
|
| 870 |
const __m128 t1 = _mm_hadd_ps(t0, t0); \
|
| 871 |
+
res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
|
| 872 |
} while (0)
|
| 873 |
// TODO: is this optimal ?
|
| 874 |
|
|
|
|
| 1149 |
x[i] = _mm_add_ps(x[i], x[offset+i]); \
|
| 1150 |
} \
|
| 1151 |
const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
|
| 1152 |
+
res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
|
| 1153 |
}
|
| 1154 |
// TODO: is this optimal ?
|
| 1155 |
|
|
|
|
| 2086 |
}
|
| 2087 |
}
|
| 2088 |
#else
|
| 2089 |
+
GGML_UNUSED(numa_flag);
|
| 2090 |
// TODO
|
| 2091 |
#endif
|
| 2092 |
}
|
|
|
|
| 3220 |
}
|
| 3221 |
|
| 3222 |
struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
|
| 3223 |
+
strncpy(tensor->name, name, sizeof(tensor->name) - 1);
|
| 3224 |
tensor->name[sizeof(tensor->name) - 1] = '\0';
|
| 3225 |
return tensor;
|
| 3226 |
}
|
|
|
|
| 18576 |
(*step) *= width;
|
| 18577 |
}
|
| 18578 |
|
| 18579 |
+
GGML_ASSERT(false && "line search failed");
|
| 18580 |
+
|
| 18581 |
+
return GGML_LINESEARCH_FAIL;
|
| 18582 |
}
|
| 18583 |
|
| 18584 |
static enum ggml_opt_result ggml_opt_lbfgs(
|
|
|
|
| 18846 |
step[0] = 1.0;
|
| 18847 |
}
|
| 18848 |
|
| 18849 |
+
GGML_ASSERT(false && "lbfgs failed");
|
| 18850 |
+
|
| 18851 |
+
return GGML_OPT_DID_NOT_CONVERGE;
|
| 18852 |
}
|
| 18853 |
|
| 18854 |
struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
|