4 #include <boost/program_options.hpp>
9 namespace po = boost::program_options;
13 std::ostream& operator<<(std::ostream& os, const std::vector<T>& v)
15 std::copy(v.begin(), v.end(), std::ostream_iterator<T>(os,
" "));
22 std::ifstream t(filename);
24 throw std::runtime_error(
"could not open: "+filename);
26 t.seekg(0, std::ios::end);
27 str.reserve(t.tellg());
28 t.seekg(0, std::ios::beg);
29 str.assign((std::istreambuf_iterator<char>(t)),
30 std::istreambuf_iterator<char>());
44 int main(
int argc,
char* argv[])
48 po::options_description desc(
"Allowed options - See headers `audio.hpp` and `speech_detection_sphinx4.hpp` for details");
50 (
"help",
"produce help message")
51 (
"audio", po::value<std::string>(),
"(required) the wav/pcm audio input")
52 (
"audio-source", po::value<std::string>(),
"(required) the audio source type")
53 (
"lang", po::value<std::string>(),
"(required) set language, e.g: `en` or `gr`")
54 (
"user", po::value<std::string>(),
"(required) set user, e.g: rapp")
55 (
"words", po::value<std::vector<std::string>>()->multitoken(),
56 "(optional) keyword search, e.g: key book beer")
57 (
"sentences", po::value<std::vector<std::string>>()->multitoken(),
58 "(optional) sentence matching, e.g: find my keys")
59 (
"jsgf", po::value<std::string>(),
"(optional) JSGF grammar file");
61 po::positional_options_description p;
62 p.add(
"input-file", -1);
65 po::store(po::command_line_parser(argc, argv).options(desc).positional(p).run(), vm);
68 std::string token =
"my_token";
70 std::string audio_file, audio_source, lang, user, jsgf =
"";
71 std::vector<std::string> words = {}, sentences = {};
73 if (vm.count(
"help")) {
74 std::cout <<
"Usage: options_description [options]\n";
78 if (vm.count(
"audio")) {
79 std::cout <<
"audio: " << vm[
"audio"].as<std::string>() <<
"\n";
81 audio_file = vm[
"audio"].as<std::string>();
83 if (vm.count(
"audio-source")) {
84 std::cout <<
"audio-source: " << vm[
"audio-source"].as<std::string>() <<
"\n";
86 audio_source = vm[
"audio-source"].as<std::string>();
88 if (vm.count(
"lang")) {
89 std::cout <<
"lang: " << vm[
"lang"].as<std::string>() <<
"\n";
91 lang = vm[
"lang"].as<std::string>();
93 if (vm.count(
"user")) {
94 std::cout <<
"user: " << vm[
"user"].as<std::string>() <<
"\n";
96 user = vm[
"user"].as<std::string>();
98 if (vm.count(
"words")) {
99 std::cout <<
"words: " << vm[
"words"].as<std::vector<std::string>>() <<
"\n";
100 words = vm[
"words"].as<std::vector<std::string>>();
102 if (vm.count(
"sentences")) {
103 std::cout <<
"sentences: " << vm[
"sentences"].as<std::vector<std::string>>() <<
"\n";
104 sentences = vm[
"sentences"].as<std::vector<std::string>>();
106 if (vm.count(
"jsgf")) {
107 std::cout <<
"JSGF: " << vm[
"jsgf"].as<std::string>() <<
"\n";
108 jsgf = vm[
"jsgf"].as<std::string>();
114 std::shared_ptr<rapp::object::audio> audio;
115 std::vector<std::string> gram;
117 if (audio_source ==
"microphone_wav")
118 audio = std::make_shared<rapp::object::microphone_wav>(audio_file);
119 else if (audio_source ==
"nao_single_channel_wav")
120 audio = std::make_shared<rapp::object::nao_single_channel_wav>(audio_file);
121 else if (audio_source ==
"nao_quad_channel_wav")
122 audio = std::make_shared<rapp::object::nao_quad_channel_wav>(audio_file);
123 else if (audio_source ==
"ogg")
124 audio = std::make_shared<rapp::object::ogg>(audio_file);
126 throw std::runtime_error(
"uknown audio source");
133 auto callback = [&](std::vector<std::string> words)
135 for (
const auto & str : words)
136 std::cout << str <<
" ";
137 std::cout << std::endl;
139 auto sphinx4_call = std::make_shared<rapp::cloud::speech_detection_sphinx4>(audio,
151 std::cerr <<
"missing required arguments -- please see \"--help\"\n";
154 catch(std::exception & e) {
155 std::cerr <<
"error: " << e.what() <<
"\n";
159 std::cerr <<
"Exception of unknown type!\n";
Main class that controllers RAPP Services.
std::string load_jsgf(const std::string filename)
int main(int argc, char *argv[])
void run_job(const std::shared_ptr< asio_socket > job)
Run one service job.