ggerganov commited on
Commit
97e710a
·
unverified ·
1 Parent(s): 8a648fc

main : add cli option to disable system prints (#1740)

Browse files
Files changed (2) hide show
  1. examples/main/main.cpp +24 -15
  2. whisper.cpp +1 -1
examples/main/main.cpp CHANGED
@@ -85,6 +85,7 @@ struct whisper_params {
85
  bool output_jsn = false;
86
  bool output_jsn_full = false;
87
  bool output_lrc = false;
 
88
  bool print_special = false;
89
  bool print_colors = false;
90
  bool print_progress = false;
@@ -155,6 +156,7 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
155
  else if (arg == "-oj" || arg == "--output-json") { params.output_jsn = true; }
156
  else if (arg == "-ojf" || arg == "--output-json-full"){ params.output_jsn_full = params.output_jsn = true; }
157
  else if (arg == "-of" || arg == "--output-file") { params.fname_out.emplace_back(argv[++i]); }
 
158
  else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
159
  else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; }
160
  else if (arg == "-pp" || arg == "--print-progress") { params.print_progress = true; }
@@ -212,6 +214,7 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
212
  fprintf(stderr, " -oj, --output-json [%-7s] output result in a JSON file\n", params.output_jsn ? "true" : "false");
213
  fprintf(stderr, " -ojf, --output-json-full [%-7s] include more information in the JSON file\n", params.output_jsn_full ? "true" : "false");
214
  fprintf(stderr, " -of FNAME, --output-file FNAME [%-7s] output file path (without file extension)\n", "");
 
215
  fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
216
  fprintf(stderr, " -pc, --print-colors [%-7s] print colors\n", params.print_colors ? "true" : "false");
217
  fprintf(stderr, " -pp, --print-progress [%-7s] print progress\n", params.print_progress ? "true" : "false");
@@ -852,6 +855,9 @@ bool output_lrc(struct whisper_context * ctx, const char * fname, const whisper_
852
  return true;
853
  }
854
 
 
 
 
855
  int main(int argc, char ** argv) {
856
  whisper_params params;
857
 
@@ -878,6 +884,10 @@ int main(int argc, char ** argv) {
878
  exit(0);
879
  }
880
 
 
 
 
 
881
  // whisper init
882
 
883
  struct whisper_context_params cparams;
@@ -905,26 +915,25 @@ int main(int argc, char ** argv) {
905
  continue;
906
  }
907
 
908
- // print system information
909
- {
 
 
 
 
 
 
 
 
 
 
 
910
  fprintf(stderr, "\n");
911
  fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
912
  params.n_threads*params.n_processors, std::thread::hardware_concurrency(), whisper_print_system_info());
913
- }
914
 
915
- // print some info about the processing
916
- {
917
  fprintf(stderr, "\n");
918
- if (!whisper_is_multilingual(ctx)) {
919
- if (params.language != "en" || params.translate) {
920
- params.language = "en";
921
- params.translate = false;
922
- fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
923
- }
924
- }
925
- if (params.detect_language) {
926
- params.language = "auto";
927
- }
928
  fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, %d beams + best of %d, lang = %s, task = %s, %stimestamps = %d ...\n",
929
  __func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
930
  params.n_threads, params.n_processors, params.beam_size, params.best_of,
 
85
  bool output_jsn = false;
86
  bool output_jsn_full = false;
87
  bool output_lrc = false;
88
+ bool no_prints = false;
89
  bool print_special = false;
90
  bool print_colors = false;
91
  bool print_progress = false;
 
156
  else if (arg == "-oj" || arg == "--output-json") { params.output_jsn = true; }
157
  else if (arg == "-ojf" || arg == "--output-json-full"){ params.output_jsn_full = params.output_jsn = true; }
158
  else if (arg == "-of" || arg == "--output-file") { params.fname_out.emplace_back(argv[++i]); }
159
+ else if (arg == "-np" || arg == "--no-prints") { params.no_prints = true; }
160
  else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
161
  else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; }
162
  else if (arg == "-pp" || arg == "--print-progress") { params.print_progress = true; }
 
214
  fprintf(stderr, " -oj, --output-json [%-7s] output result in a JSON file\n", params.output_jsn ? "true" : "false");
215
  fprintf(stderr, " -ojf, --output-json-full [%-7s] include more information in the JSON file\n", params.output_jsn_full ? "true" : "false");
216
  fprintf(stderr, " -of FNAME, --output-file FNAME [%-7s] output file path (without file extension)\n", "");
217
+ fprintf(stderr, " -np, --no-prints [%-7s] do not print anything other than the results\n", params.no_prints ? "true" : "false");
218
  fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
219
  fprintf(stderr, " -pc, --print-colors [%-7s] print colors\n", params.print_colors ? "true" : "false");
220
  fprintf(stderr, " -pp, --print-progress [%-7s] print progress\n", params.print_progress ? "true" : "false");
 
855
  return true;
856
  }
857
 
858
+
859
+ void cb_log_disable(enum ggml_log_level , const char * , void * ) { }
860
+
861
  int main(int argc, char ** argv) {
862
  whisper_params params;
863
 
 
884
  exit(0);
885
  }
886
 
887
+ if (params.no_prints) {
888
+ whisper_log_set(cb_log_disable, NULL);
889
+ }
890
+
891
  // whisper init
892
 
893
  struct whisper_context_params cparams;
 
915
  continue;
916
  }
917
 
918
+ if (!whisper_is_multilingual(ctx)) {
919
+ if (params.language != "en" || params.translate) {
920
+ params.language = "en";
921
+ params.translate = false;
922
+ fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
923
+ }
924
+ }
925
+ if (params.detect_language) {
926
+ params.language = "auto";
927
+ }
928
+
929
+ if (!params.no_prints) {
930
+ // print system information
931
  fprintf(stderr, "\n");
932
  fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
933
  params.n_threads*params.n_processors, std::thread::hardware_concurrency(), whisper_print_system_info());
 
934
 
935
+ // print some info about the processing
 
936
  fprintf(stderr, "\n");
 
 
 
 
 
 
 
 
 
 
937
  fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, %d beams + best of %d, lang = %s, task = %s, %stimestamps = %d ...\n",
938
  __func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
939
  params.n_threads, params.n_processors, params.beam_size, params.best_of,
whisper.cpp CHANGED
@@ -1070,7 +1070,7 @@ static ggml_backend_t whisper_backend_init(const whisper_context_params & params
1070
  #ifdef GGML_USE_METAL
1071
  if (params.use_gpu) {
1072
  WHISPER_LOG_INFO("%s: using Metal backend\n", __func__);
1073
- ggml_metal_log_set_callback(whisper_log_callback_default, nullptr);
1074
  backend_gpu = ggml_backend_metal_init();
1075
  if (!backend_gpu) {
1076
  WHISPER_LOG_ERROR("%s: ggml_backend_metal_init() failed\n", __func__);
 
1070
  #ifdef GGML_USE_METAL
1071
  if (params.use_gpu) {
1072
  WHISPER_LOG_INFO("%s: using Metal backend\n", __func__);
1073
+ ggml_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
1074
  backend_gpu = ggml_backend_metal_init();
1075
  if (!backend_gpu) {
1076
  WHISPER_LOG_ERROR("%s: ggml_backend_metal_init() failed\n", __func__);