
{"id":206,"date":"2017-04-28T07:38:15","date_gmt":"2017-04-28T07:38:15","guid":{"rendered":"http:\/\/appsinmed.com\/program\/?page_id=206"},"modified":"2017-08-16T08:58:26","modified_gmt":"2017-08-16T08:58:26","slug":"interspeech","status":"publish","type":"page","link":"https:\/\/program.appinconf.com\/program\/interspeech\/","title":{"rendered":"Interspeech"},"content":{"rendered":"\n<script type=\"text\/javascript\">\n    var allRooms = JSON.parse('[{\"name\":\"A2\",\"id\":1064},{\"name\":\"Aula Magna\",\"id\":1063},{\"name\":\"B3\",\"id\":1072},{\"name\":\"B4\",\"id\":1065},{\"name\":\"B5\",\"id\":1075},{\"name\":\"C6\",\"id\":1066},{\"name\":\"C307\",\"id\":1076},{\"name\":\"C389\",\"id\":1115},{\"name\":\"C397\",\"id\":1077},{\"name\":\"D8\",\"id\":1062},{\"name\":\"E10\",\"id\":1060},{\"name\":\"E306\",\"id\":1070},{\"name\":\"E397\",\"id\":1071},{\"name\":\"F0 (KTH)\",\"id\":1074},{\"name\":\"F11\",\"id\":1059},{\"name\":\"Fantum (KTH)\",\"id\":1073},{\"name\":\"Poster 1\",\"id\":1061},{\"name\":\"Poster 2\",\"id\":1067},{\"name\":\"Poster 3\",\"id\":1069},{\"name\":\"Poster 4\",\"id\":1068},{\"name\":\"K\\u00e4gelbanan, S\\u00f6dra teatern\",\"id\":1078},{\"name\":\"Tekniska Museet and Etnografiska museet\",\"id\":1081},{\"name\":\"Various locations\",\"id\":1079},{\"name\":\"S\\u00f6dra Huset, House A\",\"id\":1112},{\"name\":\"Stockholm City Hall and Teaterbaren\",\"id\":1116}]');\n    var allCategories = JSON.parse('[{\"name\":\"Analysis of Paralinguistics in Speech and Language\",\"id\":1052},{\"name\":\"Analysis of Speech and Audio Signals\",\"id\":1062},{\"color\":\"#72D9EE\",\"name\":\"Keynote\",\"id\":1057},{\"name\":\"Misc\",\"id\":1068},{\"name\":\"Phonetics, Phonology, and Prosody\",\"id\":1056},{\"color\":\"#EEA2A2\",\"name\":\"Show & Tell\",\"id\":1063},{\"color\":\"#C9EE91\",\"name\":\"Social event\",\"id\":1067},{\"name\":\"Speaker and Language Identification\",\"id\":1054},{\"name\":\"Special event\",\"id\":1064},{\"name\":\"Speech Coding and Enhancement\",\"id\":1060},{\"name\":\"Speech Perception, Production and Acquisition\",\"id\":1055},{\"name\":\"Speech Recognition: Architecture, Search, and Linguistic Components\",\"id\":1061},{\"name\":\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\",\"id\":1058},{\"name\":\"Speech Recognition: Technologies and Systems for New Applications\",\"id\":1066},{\"name\":\"Speech Synthesis and Spoken Language Generation\",\"id\":1059},{\"name\":\"Spoken Dialog Systems and Analysis of Conversation\",\"id\":1051},{\"name\":\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\",\"id\":1053},{\"color\":\"#9264EE\",\"name\":\"Tutorial\",\"id\":1065}]');\n  <\/script>\n\n  <script type=\"text\/javascript\">\n    var templateUrl = 'https:\/\/program.appinconf.com\/program\/wp-content\/plugins\/wordpress_plugin_schedule\/img\/';\n  <\/script>\n\n  <input type=\"hidden\" id=\"new_cms\" value=\"0\" \/>\n  <input type=\"hidden\" id=\"spanning_color\" value=\"#A2BEDC\" \/>\n  <input type=\"hidden\" id=\"spanning_text_color\" value=\"#000000\" \/>\n  <input type=\"hidden\" id=\"list_as_default\" value=\"0\" \/>\n  <input type=\"hidden\" id=\"fetch_abstract\" value=\"\" \/>\n  <input type=\"hidden\" id=\"show_abs_nbr\" value=\"\" \/>\n  <input type=\"hidden\" id=\"use-auto-heights\" value=\"false\" \/>\n  <input id='base_path' type='hidden' value='https:\/\/program.appinconf.com\/program\/wp-content\/plugins\/wordpress_plugin_schedule' \/>\n  <input id='project' type='hidden' value='project_242_2017_01_12' \/>\n  <input id='lang' type='hidden' value='en' \/>\n  <input id='show_link_to_abstract' type='hidden' value='0' \/>\n\n  <div id=\"plugin_main_container\" style='height: 1170px;'>\n\n    \n    <div id=\"type_filter_div\" style='display: none;'>\n      <div class=\"btn-group\" role=\"group\" aria-label=\"...\">\n        <button id=\"show_graph_schedule_button\" type=\"button\" class=\"btn btn-default selected_list_type\">\n          <i class=\"fa fa-calendar\" aria-hidden=\"true\"><\/i>\n        <\/button>\n        <button id=\"show_list_schedule_button\" type=\"button\" class=\"btn btn-default \">\n          <i class=\"fa fa-list\" aria-hidden=\"true\"><\/i>\n        <\/button>\n        <button id=\"list_program_button_print\" type=\"button\" class=\"btn btn-default\" style=\"display: none;\">\n          <i class=\"fa fa-print\" aria-hidden=\"true\"><\/i>\n        <\/button>\n      <\/div>\n      \n    <\/div>\n\n    <div id=\"personal_filter_div\" style='display: none;'>\n        <div id=\"email-input-group\" class=\"email-input-group input-group\">\n          <span class=\"input-group-addon\" id=\"sizing-addon2\">@<\/span>\n          <input id=\"list_filter_input\" type=\"email\" class=\"form-control\" placeholder=\"Enter your email or booking reference\" aria-describedby=\"sizing-addon2\">\n        <\/div>\n        <button id=\"list_filter_button\" class=\"btn btn-primary\">Show  my program<\/button>\n\n    <\/div>\n\n    <div id=\"personal_filter_user_information\" style=\"display: none;\">\n      <i class=\"fa fa-user fa-2x\" aria-hidden=\"true\"><\/i><span id=\"personal_information\"><\/span>\n      <button id=\"list_filter_button_print\" class=\"btn btn-success hide\"><i class=\"fa fa-print\" aria-hidden=\"true\"><\/i><\/button>\n      <button id=\"list_filter_button_reset\" class=\"btn btn-danger hide\">\u00c5terst\u00e4ll<\/button>\n    <\/div>\n\n    <div id=\"list_calendar_container\" style='display: none;'>\n\n      <div id=\"list_filter_alert_div_not_found\" class=\"alert alert-info hide\" role=\"alert\">\n          <span>\n            User was not found. Please try again.          <\/span>\n      <\/div>\n\n      <div id=\"list_filter_alert_div_error\" class=\"alert alert-info hide\" role=\"alert\">\n          <span>\n            There was an error fetching your program. Please try again.          <\/span>\n      <\/div>\n\n      <div id=\"list_calendar_container_inner\" style=\"height: 1000px;\">\n\n          <div class='list_header' data-day='0' style='background-color: rgba(32, 132, 196, 0.83); color: rgba(255, 255, 255, 1);'>Sunday 20 August<\/div>\n          <div class=\"box-wrapper\"\n              data-id=\"5243\">\n\n              <div class='time-header'>08:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"5243\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Registration\"\n                data-time=\"08:00-17:00\"\n                data-room=\"S\u00f6dra Huset, House A\"\n                data-room-id=\"1112\"\n                data-room-name=\"S\u00f6dra Huset, House A\"\n                data-day=\"0\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"\"\n                data-category-ids=\"\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Registration<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">S\u00f6dra Huset, House A<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"08:00\">08:00-17:00 - Sunday 20 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4262\">\n\n              <div class='time-header'>09:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4262\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"TUTORIAL: Real-world Ambulatory Monitoring of Vocal Behavior\"\n                data-time=\"09:00-12:30\"\n                data-room=\"B5\"\n                data-room-id=\"1075\"\n                data-room-name=\"B5\"\n                data-day=\"0\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4262.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;strong&gt;Organizer:&lt;\/strong&gt;&lt;br&gt;Daryush D. Mehta, Center for Laryngeal Surgery and Voice Rehabilitation, Massachusetts General Hospital&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Tutorial\"\n                data-category-ids=\"1065\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #9264EE;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>TUTORIAL: Real-world Ambulatory Monitoring of Vocal Behavior<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B5<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"09:00\">09:00-12:30 - Sunday 20 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4261\">\n\n              <div class='time-header' style='visibility: hidden;'>09:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4261\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"TUTORIAL: Deep Learning for Dialogue Systems\"\n                data-time=\"09:00-12:30\"\n                data-room=\"C6\"\n                data-room-id=\"1066\"\n                data-room-name=\"C6\"\n                data-day=\"0\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4261.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;strong&gt;Organizers:&lt;\/strong&gt;&lt;br&gt;Yun-Nung Chen, National Taiwan University, Taipei, Taiwan&lt;br&gt;Asli Celikyilmazy, Microsoft Research, Redmond, WA&lt;br&gt;Dilek Hakkani-Tur, Google Research, Mountain View, CA&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Tutorial\"\n                data-category-ids=\"1065\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #9264EE;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>TUTORIAL: Deep Learning for Dialogue Systems<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">C6<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"09:00\">09:00-12:30 - Sunday 20 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4260\">\n\n              <div class='time-header' style='visibility: hidden;'>09:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4260\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"TUTORIAL: Statistical Parametric Speech Processing: Solving Problems with the Model-based Approach\"\n                data-time=\"09:00-12:30\"\n                data-room=\"B4\"\n                data-room-id=\"1065\"\n                data-room-name=\"B4\"\n                data-day=\"0\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4260.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;strong&gt;Organizers:&lt;\/strong&gt;&lt;br&gt;Mads Gr\u00e6sb\u00f8ll Christensen, Aalborg University&lt;br&gt;Assistant Prof. Jesper Rindom Jensen, Aalborg University&lt;br&gt;Assistant Prof. Jesper Kj\u00e6r Nielsen, Aalborg University.&lt;br&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Tutorial\"\n                data-category-ids=\"1065\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #9264EE;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>TUTORIAL: Statistical Parametric Speech Processing: Solving Problems with the Model-based Approach<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"09:00\">09:00-12:30 - Sunday 20 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4264\">\n\n              <div class='time-header' style='visibility: hidden;'>09:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4264\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"TUTORIAL: Creating Speech Databases of Less-Resourced Languages: A CLARIN Hands-On Tutorial\"\n                data-time=\"09:00-12:30\"\n                data-room=\"C307\"\n                data-room-id=\"1076\"\n                data-room-name=\"C307\"\n                data-day=\"0\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4264.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;strong&gt;Organizers:&lt;\/strong&gt;&lt;br&gt;Christoph Draxler, Institute of Phonetics and Speech Communication, Ludwig Maximilian University Munich, Germany&lt;br&gt;Florian Schiel, Institute of Phonetics and Speech Communication, Ludwig Maximilian University Munich, Germany&lt;br&gt;Thomas Kisler, Institute of Phonetics and Speech Communication, Ludwig Maximilian University Munich, Germany&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Tutorial\"\n                data-category-ids=\"1065\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #9264EE;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>TUTORIAL: Creating Speech Databases of Less-Resourced Languages: A CLARIN Hands-On Tutorial<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">C307<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"09:00\">09:00-12:30 - Sunday 20 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4263\">\n\n              <div class='time-header' style='visibility: hidden;'>09:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4263\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"TUTORIAL: Insights from Qualitative Research: An Introduction to the Phonetics of Talk-In-Interaction\"\n                data-time=\"09:00-12:30\"\n                data-room=\"C397\"\n                data-room-id=\"1077\"\n                data-room-name=\"C397\"\n                data-day=\"0\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4263.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;strong&gt;Organizers:&lt;\/strong&gt;&lt;br&gt;Richard Ogden Department of Language &amp;amp; Linguistic Science, Centre for Advanced Studies in Language &amp;amp; Communication, University of York, UK.&lt;br&gt;Jan Gorisch Department of Pragmatics, Institute for the German Language (IDS), Mannheim, Germany.&lt;br&gt;Gareth Walker School of English, University of Sheffield, UK.&lt;br&gt;Meg Zellers Department of Linguistics: English, University of Stuttgart, Germany&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Tutorial\"\n                data-category-ids=\"1065\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #9264EE;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>TUTORIAL: Insights from Qualitative Research: An Introduction to the Phonetics of Talk-In-Interaction<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">C397<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"09:00\">09:00-12:30 - Sunday 20 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4258\">\n\n              <div class='time-header'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4258\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"The Second Workshop for Young Female Researchers in Speech Science & Technology (YFRSW)\"\n                data-time=\"10:00-17:00\"\n                data-room=\"Fantum (KTH)\"\n                data-room-id=\"1073\"\n                data-room-name=\"Fantum (KTH)\"\n                data-day=\"0\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4258.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Special event\"\n                data-category-ids=\"1064\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>The Second Workshop for Young Female Researchers in Speech Science & Technology (YFRSW)<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Fantum (KTH)<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-17:00 - Sunday 20 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4259\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4259\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"3rd Doctoral Consortium\"\n                data-time=\"10:00-17:00\"\n                data-room=\"F0 (KTH)\"\n                data-room-id=\"1074\"\n                data-room-name=\"F0 (KTH)\"\n                data-day=\"0\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4259.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Special event\"\n                data-category-ids=\"1064\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>3rd Doctoral Consortium<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">F0 (KTH)<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-17:00 - Sunday 20 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4267\">\n\n              <div class='time-header'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4267\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"TUTORIAL: Latest Advances in Computational Speech and Audio Analysis: Big Data, Deep Learning, and Whatnots\"\n                data-time=\"13:30-17:00\"\n                data-room=\"C6\"\n                data-room-id=\"1066\"\n                data-room-name=\"C6\"\n                data-day=\"0\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4267.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\n                 &lt;strong&gt;Organizers:&lt;\/strong&gt;\n                 &lt;br&gt;Bj\u00f6rn W. Schuller, Imperial College London, U.K. &amp;amp; Univeristy of Passau, Germany &amp;amp; audEERING Gmbh, Germany\n                 &lt;br&gt;Nicholas Cummins, Univeristy of Passau, Germany\n                &lt;br&gt;&lt;br&gt;\"\n                data-category=\"Tutorial\"\n                data-category-ids=\"1065\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #9264EE;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>TUTORIAL: Latest Advances in Computational Speech and Audio Analysis: Big Data, Deep Learning, and Whatnots<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">C6<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-17:00 - Sunday 20 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4268\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4268\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"TUTORIAL: Modeling Situated Multi-modal Interaction with the Furhat Robot Head\"\n                data-time=\"13:30-17:00\"\n                data-room=\"C389\"\n                data-room-id=\"1115\"\n                data-room-name=\"C389\"\n                data-day=\"0\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4268.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;strong&gt;Organizers:&lt;\/strong&gt;&lt;br&gt;Gabriel Skantze, KTH Speech Music and Hearing &amp;amp; Furhat Robotics, Sweden&lt;br&gt;Andr\u00e9 Pereira, Furhat Robotics, Sweden&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Tutorial\"\n                data-category-ids=\"1065\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #9264EE;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>TUTORIAL: Modeling Situated Multi-modal Interaction with the Furhat Robot Head<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">C389<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-17:00 - Sunday 20 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4265\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4265\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"TUTORIAL: Computational Modeling of Language Acquisition\"\n                data-time=\"13:30-17:00\"\n                data-room=\"B5\"\n                data-room-id=\"1075\"\n                data-room-name=\"B5\"\n                data-day=\"0\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4265.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;strong&gt;Organizers:&lt;\/strong&gt;&lt;br&gt;Naomi Feldman, University of Maryland, MD&lt;br&gt;Emmanuel Dupoux, Ecole des Hautes Etudes en Sciences Sociales, France&lt;br&gt;Okko R\u00e4s\u00e4nen, Aalto University, Finland&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Tutorial\"\n                data-category-ids=\"1065\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #9264EE;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>TUTORIAL: Computational Modeling of Language Acquisition<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B5<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-17:00 - Sunday 20 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4266\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4266\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"TUTORIAL: Deep Learning for Text-to-Speech Synthesis, using the Merlin toolkit\"\n                data-time=\"13:30-17:00\"\n                data-room=\"B4\"\n                data-room-id=\"1065\"\n                data-room-name=\"B4\"\n                data-day=\"0\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4266.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;p&gt;\n                 &lt;\/p&gt;&lt;p&gt;&lt;strong&gt;Organizers:&lt;\/strong&gt;&lt;\/p&gt;&lt;p&gt;&lt;\/p&gt;&lt;ul&gt;&lt;li&gt;Simon King, Centre for Speech Technology Research, University of Edinburgh, UK&lt;\/li&gt;&lt;li&gt;Oliver Watts, Centre for Speech Technology Research, University of Edinburgh, UK&lt;\/li&gt;&lt;li&gt;Srikanth Ronanki, Centre for Speech Technology Research, University of Edinburgh, UK&lt;\/li&gt;&lt;li&gt;Zhizheng Wu, Apple Inc, USA&lt;\/li&gt;&lt;li&gt;Felipe Espic, Centre for Speech Technology Research, University of Edinburgh, UK&lt;\/li&gt;&lt;\/ul&gt;&lt;p&gt;\n                &lt;\/p&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Tutorial\"\n                data-category-ids=\"1065\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #9264EE;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>TUTORIAL: Deep Learning for Text-to-Speech Synthesis, using the Merlin toolkit<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-17:00 - Sunday 20 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          <div class='list_header' data-day='1' style='background-color: rgba(32, 132, 196, 0.83); color: rgba(255, 255, 255, 1);'>Monday 21 August<\/div>\n          <div class=\"box-wrapper\"\n              data-id=\"5245\">\n\n              <div class='time-header'>08:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"5245\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Registration\"\n                data-time=\"08:00-17:00\"\n                data-room=\"S\u00f6dra Huset, House A\"\n                data-room-id=\"1112\"\n                data-room-name=\"S\u00f6dra Huset, House A\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"\"\n                data-category-ids=\"\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Registration<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">S\u00f6dra Huset, House A<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"08:00\">08:00-17:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5264\">\n\n              <div class='time-header' style='visibility: hidden;'>08:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"5264\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Registration\"\n                data-time=\"08:00-09:00\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"\"\n                data-category-ids=\"\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Registration<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"08:00\">08:00-09:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5240\">\n\n              <div class='time-header'>09:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"5240\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Opening session\"\n                data-time=\"09:00-09:45\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;p&gt;The session will also be broadcasted (with two-way communication) to rooms A2 and C6.&lt;\/p&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Misc\"\n                data-category-ids=\"1068\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Opening session<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"09:00\">09:00-09:45 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4232\">\n\n              <div class='time-header'>09:45<\/div>\n\n              <div class=\"box\"\n                data-id=\"4232\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"ISCA Medal 2017 Ceremony\"\n                data-time=\"09:45-10:15\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4232.html\"\n                data-speaker=\"Fumitada Itakura\"\n                data-speakercell=\"Fumitada Itakura\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Haizhou Li&lt;br&gt;&lt;br&gt;&lt;p&gt;The session will also be broadcasted (with two-way communication) to rooms A2 and C6.&lt;br&gt;&lt;\/p&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Keynote\"\n                data-category-ids=\"1057\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #72D9EE;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>ISCA Medal 2017 Ceremony<\/span>\n               <br>\n               <div class=\"lecturer\"><span>Fumitada Itakura<\/span><\/div>\n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"09:45\">09:45-10:15 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5256\">\n\n              <div class='time-header'>10:15<\/div>\n\n              <div class=\"box\"\n                data-id=\"5256\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Refreshments\"\n                data-time=\"10:15-11:00\"\n                data-room=\"Various locations\"\n                data-room-id=\"1079\"\n                data-room-name=\"Various locations\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"\"\n                data-category-ids=\"\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Refreshments<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Various locations<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:15\">10:15-11:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4237\">\n\n              <div class='time-header'>11:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4237\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Acoustic and Articulatory Phonetics\"\n                data-time=\"11:00-13:00\"\n                data-room=\"C6\"\n                data-room-id=\"1066\"\n                data-room-name=\"C6\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Marzena Zygis; \u0160tefan Be\u0148u\u0161&lt;br&gt;&lt;br&gt;11.00-11.20 - Phonetic Correlates of Pharyngeal and Pharyngealized Consonants in Saudi, Lebanese, and Jordanian Arabic: an rt-MRI Study&lt;br&gt;&lt;small&gt;Zainab Hermes; Marissa Barlaz; Ryan Shosted; Zhi-Pei Liang; Brad Sutton&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Glottal opening and strategies of production of fricatives&lt;br&gt;&lt;small&gt;Benjamin Elie; Yves Laprie&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Acoustics and articulation of medial versus final coronal stop gemination contrasts in Moroccan Arabic&lt;br&gt;&lt;small&gt;Mohamed Yassine Frej; Christopher Carignan; Catherine T. Best&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.00-12.20 - How are four-level length distinctions produced? Evidence from Moroccan Arabic&lt;br&gt;&lt;small&gt;Giuseppina Turco; Karim Shoul; Rachid Ridouane&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.20-12.40 - Nature of contrast and coarticulation: Evidence from Mizo tones and Assamese vowel harmony&lt;br&gt;&lt;small&gt;Indranil Dutta; Irfan S.; Pamir Gogoi; Priyankoo Sarmah&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.40-13.00 - CANCELLED: Vowels in the Barunga variety of North Australian Kriol&lt;br&gt;&lt;small&gt;Caroline Jones; Katherine Demuth; Weicong Li; Andre Almeida&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Phonetics, Phonology, and Prosody\"\n                data-category-ids=\"1056\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Acoustic and Articulatory Phonetics<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">C6<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"11:00\">11:00-13:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4256\">\n\n              <div class='time-header' style='visibility: hidden;'>11:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4256\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Interspeech 2017 Automatic Speaker Verification Spoofing and Countermeasures Challenge 1\"\n                data-time=\"11:00-13:00\"\n                data-room=\"D8\"\n                data-room-id=\"1062\"\n                data-room-name=\"D8\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Tomi Kinnunen; Junichi Yamagishi&lt;br&gt;&lt;br&gt;11.00-11.30 - The ASVspoof 2017 Challenge: Assessing the Limits of Replay Spoofing Attack Detection&lt;br&gt;&lt;small&gt;Tomi Kinnunen; Md Sahidullah; H\u00e9ctor Delgado; Massimiliano Todisco; Nicholas Evans; Junichi Yamagishi; Kong Aik Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.30-11.45 - Experimental analysis of features for replay attack detection-Results on the ASVspoof 2017 Challenge&lt;br&gt;&lt;small&gt;Roberto Javier Font Ruiz; Mar\u00eda Jos\u00e9 Cano Vicente; Juan Manuel Esp\u00edn L\u00f3pez&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.45-12.00 - Novel Variable Length Teager Energy Separation Based Instantaneous Frequency Features for Replay Detection&lt;br&gt;&lt;small&gt;Hemant Patil; Madhu Kamble; Tanvina Patel; Meet Soni&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.00-12.15 - Countermeasures for Automatic Speaker Verification Replay Spoofing Attack : On Data Augmentation, Feature Representation, Classification and Fusion&lt;br&gt;&lt;small&gt;Weicheng Cai; Danwei Cai; Wenbo Liu; Ming Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.15-12.30 - Spoof Detection Using Source, Instantaneous Frequency and Cepstral Features&lt;br&gt;&lt;small&gt;Sarfaraz Jelil; Rohan Kumar Das; S R Mahadeva Prasanna; Rohit Sinha&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.30-12.45 - Audio Replay Attack Detection with High-Frequency Features&lt;br&gt;&lt;small&gt;Marcin Witkowski; Stanis\u0142aw Kacprzak; Piotr \u017belasko; Konrad Kowalczyk; Jakub Ga\u0142ka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.45-13.00 - Feature selection based on CQCCs for Automatic Speaker Verification spoofing&lt;br&gt;&lt;small&gt;Wang Xianliang; Xiao Yanhong; Zhu Xuan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speaker and Language Identification\"\n                data-category-ids=\"1054\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Interspeech 2017 Automatic Speaker Verification Spoofing and Countermeasures Challenge 1<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">D8<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"11:00\">11:00-13:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4236\">\n\n              <div class='time-header' style='visibility: hidden;'>11:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4236\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Dereverberation, Echo Cancellation and Speech \"\n                data-time=\"11:00-13:00\"\n                data-room=\"B4\"\n                data-room-id=\"1065\"\n                data-room-name=\"B4\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Stephen Zahorian ; Bernd T. Meyer&lt;br&gt;&lt;br&gt;11.00-11.20 - Improving Speaker Verification for Reverberant Conditions with Deep Neural Network Dereverberation Processing&lt;br&gt;&lt;small&gt;Peter Guzewich; Stephen Zahorian&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Stepsize Control for Acoustic Feedback Cancellation Based on the Detection of Reverberant Signal Periods and the Estimated System Distance&lt;br&gt;&lt;small&gt;Philipp Bulling; Klaus Linhard; Arthur Wolf; Gerhard Schmidt&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - A Delay-Flexible Stereo Acoustic Echo Cancellation for DFT-Based In-Car Communication (ICC) Systems&lt;br&gt;&lt;small&gt;Jan Franzen; Tim Fingscheidt&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.00-12.20 - Speech Enhancement Based on Harmonic Estimation combined with MMSE to Improve Speech Intelligibility for Cochlear Implant Recipients&lt;br&gt;&lt;small&gt;Dongmei Wang; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.20-12.40 - Improving speech intelligibility in binaural hearing aids by estimating a time-frequency mask with a weighted least squares classi\ufb01er&lt;br&gt;&lt;small&gt;David Ayllon; Roberto Gil-Pita; Manuel Rosa-Zurera&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.40-13.00 - Simulations of high-frequency vocoder on Mandarin speech recognition for acoustic hearing preserved cochlear implant&lt;br&gt;&lt;small&gt;Tsung-Chen Wu; Tai-Shih Chi; Chia-Fone Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Coding and Enhancement\"\n                data-category-ids=\"1060\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Dereverberation, Echo Cancellation and Speech <\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"11:00\">11:00-13:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4234\">\n\n              <div class='time-header' style='visibility: hidden;'>11:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4234\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Multimodal and Articulatory Synthesis\"\n                data-time=\"11:00-13:00\"\n                data-room=\"E10\"\n                data-room-id=\"1060\"\n                data-room-name=\"E10\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Ingmar Steiner; Korin Richmond&lt;br&gt;&lt;br&gt;11.00-11.20 - The Influence of Synthetic Voice on the Evaluation of a Virtual Character&lt;br&gt;&lt;small&gt;Joao Cabral; Benjamin Cowan; Katja Zibrek; Rachel McDonnell&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Articulatory Text-to-Speech Synthesis using the Digital Waveguide Mesh driven by a Deep Neural Network&lt;br&gt;&lt;small&gt;Amelia Gully; Takenori Yoshimura; Damian Murphy; Kei Hashimoto; Yoshihiko Nankaku; Keiichi Tokuda&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - An HMM\/DNN comparison for synchronized text-to-speech and tongue motion synthesis&lt;br&gt;&lt;small&gt;S\u00e9bastien Le Maguer; Ingmar Steiner; Alexander Hewer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.00-12.20 - VCV Synthesis using Task Dynamics to Animate a Factor-based Articulatory Model&lt;br&gt;&lt;small&gt;Rachel Alexander; Tanner Sorensen; Asterios Toutios; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.20-12.40 - Beyond the Listening Test: An interactive approach to TTS Evaluation&lt;br&gt;&lt;small&gt;Joseph Mendelson; Matthew Aylett&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.40-13.00 - Integrating Articulatory Information into Deep Learning-Based Text-to-Speech Synthesis&lt;br&gt;&lt;small&gt;Beiming Cao; Myungjong Kim; Jan van Santen; Ted Mau; Jun Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Synthesis and Spoken Language Generation\"\n                data-category-ids=\"1059\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Multimodal and Articulatory Synthesis<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E10<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"11:00\">11:00-13:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4250\">\n\n              <div class='time-header' style='visibility: hidden;'>11:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4250\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Show & Tell 1\"\n                data-time=\"11:00-13:00\"\n                data-room=\"E306\"\n                data-room-id=\"1070\"\n                data-room-name=\"E306\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"11.00-13.00 - A system for real-time collaborative transcription correction&lt;br&gt;&lt;small&gt;Peter Bell; Joachim Fainberg; Catherine Lai; Mark Sinclair&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - ChunkitApp: Investigating the relevant units of online speech processing&lt;br&gt;&lt;small&gt;Svetlana Vetchinnikova; Anna Mauranen; Nina Mikusova&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Extending the EMU Speech Database Management System: Cloud Hosting, Team Collaboration, Automatic Revision Control&lt;br&gt;&lt;small&gt;Markus Jochim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - HomeBank: A repository for long-form real-world audio recordings of children&lt;br&gt;&lt;small&gt;Anne Warlaumont; Mark vanDam; Elika Bergelson; Alejandrina Cristia&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - MoPAReST - Mobile Phone Assisted Remote Speech Therapy Platform&lt;br&gt;&lt;small&gt;Chitralekha Bhat; Anjali Kant; Bhavik Vachhani; Sarita Rautara; Ashok Kumar Sinha; Sunil Kumar Kopparapu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Prosograph: A Tool for Prosody Visualisation of Large Speech Corpora&lt;br&gt;&lt;small&gt;Alp Oktem; Mireia Farr\u00fas; Leo Wanner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Show & Tell\"\n                data-category-ids=\"1063\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #EEA2A2;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Show & Tell 1<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E306<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"11:00\">11:00-13:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4235\">\n\n              <div class='time-header' style='visibility: hidden;'>11:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4235\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Multimodal Paralinguistics\"\n                data-time=\"11:00-13:00\"\n                data-room=\"A2\"\n                data-room-id=\"1064\"\n                data-room-name=\"A2\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Paula Lopez-Otero; Elizabeth Shriberg&lt;br&gt;&lt;br&gt;11.00-11.20 - Multimodal markers of persuasive speech : designing a Virtual Debate Coach&lt;br&gt;&lt;small&gt;Volha Petukhova; Manoj Raju; Harry Bunt&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Acoustic-Prosodic and Physiological Response to Stressful Interactions in Children with Autism Spectrum Disorder&lt;br&gt;&lt;small&gt;Daniel Bone; Julia Mertens; Emily Zane; Sungbok Lee; Shrikanth Narayanan; Ruth Grossman&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - A Stepwise Analysis of Aggregated Crowdsourced Labels Describing Multimodal Emotional Behaviors&lt;br&gt;&lt;small&gt;Alec Burmania; Carlos Busso&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.00-12.20 - An information theoretic analysis of the temporal synchrony between head gestures and prosodic patterns in spontaneous speech&lt;br&gt;&lt;small&gt;Gaurav Fotedar; Prasanta Ghosh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.20-12.40 - Multimodal Prediction of Affect Dimensions Fusing Multiple Regression Techniques&lt;br&gt;&lt;small&gt;Dongyan Huang; Wan Ding; Mingyu Xu; Huaiping Ming; Xinguo Yu; Minghui Dong; Haizhou Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.40-13.00 - Co-production of speech and pointing gestures in clear and perturbed interactive tasks: multimodal designation strategies&lt;br&gt;&lt;small&gt;Marion Dohen; Benjamin Roustan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Paralinguistics in Speech and Language\"\n                data-category-ids=\"1052\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Multimodal Paralinguistics<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">A2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"11:00\">11:00-13:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4251\">\n\n              <div class='time-header' style='visibility: hidden;'>11:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4251\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Show & Tell 2\"\n                data-time=\"11:00-13:00\"\n                data-room=\"E397\"\n                data-room-id=\"1071\"\n                data-room-name=\"E397\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"11.00-13.00 - An apparatus to investigate western opera singing skill learning using performance and result biofeedback, and measuring its neural correlates&lt;br&gt;&lt;small&gt;Aurore Jaumard-Hakoun; Samy Chikhi; Takfarinas Medani; Angelika Nair; G\u00e9rard Dreyfus; Fran\u00e7ois-Beno\u00eet Vialatte&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Emojive! Collecting Emotion Data from Speech and Facial Expression using Mobile Game App&lt;br&gt;&lt;small&gt;Ji Ho Park; Nayeon Lee; Dario Bertero; Anik Dey; Pascale Fung&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Mylly - The Mill: A new platform for processing speech and text corpora easily and efficiently&lt;br&gt;&lt;small&gt;Mietta Lennes; Jussi Piitulainen; Martin Matthiesen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - PercyConfigurator -- Perception Experiments as a Service&lt;br&gt;&lt;small&gt;Christoph Draxler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - System for speech transcription and post-editing in Microsoft Word&lt;br&gt;&lt;small&gt;Askars Salimbajevs; Indra Ikauniece&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Visual Learning 2: Pronunciation app using ultrasound, video, and MRI&lt;br&gt;&lt;small&gt;Kyori Suzuki; Ian Wilson; Hayato Watanabe&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Show & Tell\"\n                data-category-ids=\"1063\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #EEA2A2;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Show & Tell 2<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E397<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"11:00\">11:00-13:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4244\">\n\n              <div class='time-header' style='visibility: hidden;'>11:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4244\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech and Audio Segmentation and Classification 2\"\n                data-time=\"11:00-13:00\"\n                data-room=\"Poster 2\"\n                data-room-id=\"1067\"\n                data-room-name=\"Poster 2\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Hugo van Hamme&lt;br&gt;&lt;br&gt;11.00-13.00 - A robust Voiced\/Unvoiced phoneme classification from whispered speech using the \u201ccolor\u201d of whispered phonemes and Deep Neural Network&lt;br&gt;&lt;small&gt;Nisha Meenakshi; Prasanta Ghosh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Attention based CLDNNs for short-duration acoustic scene classification&lt;br&gt;&lt;small&gt;Jinxi Guo; Ning Xu; Li-Jia Li; Abeer Alwan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - AUDIO CLASSIFICATION USING CLASS-SPECIFIC LEARNED DESCRIPTORS&lt;br&gt;&lt;small&gt;Sukanya Sonowal; Tushar Sandhan; Inkyu Choi; Nam Soo Kim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Enhanced Feature Extraction for Speech Detection in Media Audio&lt;br&gt;&lt;small&gt;Inseon Jang; ChungHyun Ahn; Jeongil Seo; Younseon Jang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Frame-wise dynamic threshold based polyphonic acoustic event detection&lt;br&gt;&lt;small&gt;Xianjun Xia; Roberto Togneri; Ferdous Sohel; David Huang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Hidden Markov Model Variational Autoencoder for Acoustic Unit Discovery&lt;br&gt;&lt;small&gt;Janek Ebbers; Jahn Heymann; Lukas Drude; Thomas Glarner; Reinhold Haeb-Umbach; Bhiksha Raj&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Indoor\/Outdoor Audio Classification using Foreground Speech Segmentation&lt;br&gt;&lt;small&gt;Banriskhem K. Khonglah; Deepak K T; S R Mahadeva Prasanna&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Montreal Forced Aligner: trainable text-speech alignment using Kaldi&lt;br&gt;&lt;small&gt;Michael McAuliffe; Michaela Socolof; Sarah Mihuc; Michael Wagner; Morgan Sonderegger&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Multilingual I-Vector based Statistical Modeling for Music Genre Classification&lt;br&gt;&lt;small&gt;Jia Dai; Wei Xue; Wenju Liu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Virtual Adversarial Training and Data Augmentation for Acoustic Event Detection with Gated Recurrent Neural Networks&lt;br&gt;&lt;small&gt;Matthias Z\u00f6hrer; Franz Pernkopf&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Speech and Audio Signals\"\n                data-category-ids=\"1062\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech and Audio Segmentation and Classification 2<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"11:00\">11:00-13:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4255\">\n\n              <div class='time-header' style='visibility: hidden;'>11:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4255\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Speech Technology for Code-Switching in Multilingual Communities\"\n                data-time=\"11:00-13:00\"\n                data-room=\"F11\"\n                data-room-id=\"1059\"\n                data-room-name=\"F11\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Kalika Bali; Alan W Black&lt;br&gt;&lt;br&gt; \n                 &lt;p&gt;&lt;\/p&gt; \n                 &lt;p&gt;See full description at: https:\/\/www.microsoft.com\/en-us\/research\/event\/interspeech-2017-special-session-speech-technologies-for-code-switching-in-multilingual-communities\/#&lt;\/p&gt;\n                 &lt;p&gt;Topics of interest for this special session will include but are not limited to:&amp;nbsp;&lt;br&gt;&lt;\/p&gt; \n                 &lt;li&gt;Speech Recognition of code-switched speech&lt;\/li&gt; \n                 &lt;li&gt;Language Modeling for code-switched speech&lt;\/li&gt; \n                 &lt;li&gt;Speech Synthesis of code-switched text&lt;\/li&gt; \n                 &lt;li&gt;Speech Translation of code-switched languages&lt;\/li&gt; \n                 &lt;li&gt;Spoken Dialogue Systems that can handle code-switching&lt;\/li&gt; \n                 &lt;li&gt;Speech data and resources for code-switching&lt;\/li&gt; \n                 &lt;li&gt;Language Identification from speech&lt;\/li&gt; \n                 &lt;p&gt;&lt;\/p&gt; \n                 &lt;p&gt;&lt;br&gt;&lt;\/p&gt;\n                 &lt;p&gt;&lt;span style=&quot;font-weight: bold;&quot;&gt;Organizing Committee:&lt;\/span&gt;&lt;br&gt;&lt;\/p&gt;\n                 &lt;p&gt;Kalika Bali, Microsoft Research India&lt;br&gt;Alan W Black, Carnegie Mellon University&lt;br&gt;Mona Diab, George Washington University&lt;br&gt;Julia Hirschberg, Columbia University&lt;br&gt;Sunayana Sitaram, Microsoft Research India&lt;br&gt;Thamar Solorio, University of Houston&lt;\/p&gt; \n                &lt;br&gt;&lt;br&gt;11.00-11.20 - Introduction&lt;br&gt;&lt;small&gt;Kalika Bali; Alan W Black&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Longitudinal Speaker Clustering and Verification Corpus with Code-Switching Frisian-Dutch Speech&lt;br&gt;&lt;small&gt;Emre Yilmaz; Jelske Dijkstra; Hans Van de Velde; Frederik Kampstra; Jouke Algra; Henk Van den Heuvel; David Van Leeuwen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Exploiting Untranscribed Broadcast Data for Improved Code-Switching Detection&lt;br&gt;&lt;small&gt;Emre Yilmaz; Henk van den Heuvel; David van Leeuwen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.00-12.20 - Jee haan, I'd like both, por favor: Elicitation of a Code-Switched Corpus of Hindi-English and Spanish-English Human-Machine Dialog&lt;br&gt;&lt;small&gt;Vikram Ramanarayanan; David Suendermann-Oeft&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.20-12.40 - On building mixed lingual speech synthesis systems&lt;br&gt;&lt;small&gt;SaiKrishna Rallabandi; Alan W Black&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.40-13.00 - Speech Synthesis for Mixed-Language Navigation Instructions&lt;br&gt;&lt;small&gt;Khyathi Chandu; Sai Krishna Rallabandi; Sunayana Sitaram; Alan W Black&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Perception, Production and Acquisition\"\n                data-category-ids=\"1055\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Speech Technology for Code-Switching in Multilingual Communities<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">F11<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"11:00\">11:00-13:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4245\">\n\n              <div class='time-header' style='visibility: hidden;'>11:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4245\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Search, Computational Strategies and Language Modeling\"\n                data-time=\"11:00-13:00\"\n                data-room=\"Poster 4\"\n                data-room-id=\"1068\"\n                data-room-name=\"Poster 4\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Gy\u00f6rgy Szasz\u00e1k&lt;br&gt;&lt;br&gt;11.00-13.00 - A phonological phrase sequence modelling approach for resource efficient and robust real-time punctuation recovery&lt;br&gt;&lt;small&gt;Anna Mor\u00f3; Gy\u00f6rgy Szasz\u00e1k&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Binary Deep Neural Networks for Speech Recognition&lt;br&gt;&lt;small&gt;Xu Xiang; Yanmin Qian; Kai Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Comparison of Different Decoding Strategies for CTC Acoustic Models&lt;br&gt;&lt;small&gt;Thomas Zenkel; Ramon Sanabria; Florian Metze; Jan Niehues; Matthias Sperber; Sebastian St\u00fcker; Alex Waibel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Empirical Evaluation of Parallel Training Algorithms on Acoustic Modeling&lt;br&gt;&lt;small&gt;Wenpeng Li; Binbin Zhang; Lei Xie; Dong Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Estimation of Gap Between Current Language Models and Human Performance&lt;br&gt;&lt;small&gt;Xiaoyu Shen; Youssef Oualil; Clayton Greenberg; Mittul Singh; Dietrich Klakow&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Hierarchical Constrained Bayesian Optimization for Joint Feature, Acoustic Model and Decoder Parameter Optimization&lt;br&gt;&lt;small&gt;Akshay Chandrashekaran; Ian Lane&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Joint Learning of Correlated Sequence Labeling Tasks Using Bidirectional Recurrent Neural Networks&lt;br&gt;&lt;small&gt;Vardaan Pahuja; Anirban Laha; Shachar Mirkin; Vikas Raykar; Lili Kotlerman; Guy Lev&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Phone duration modeling for LVCSR using neural networks&lt;br&gt;&lt;small&gt;Hossein Hadian; Daniel Povey; Hossein Sameti; Sanjeev Khudanpur&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Rescoring-aware Beam Search for Reduced Search Errors in Contextual Automatic Speech Recognition&lt;br&gt;&lt;small&gt;Ian Williams; Aleksic Google, Inc&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Towards better decoding and language model integration in sequence to sequence models&lt;br&gt;&lt;small&gt;Jan Chorowski; Navdeep Jaitly&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Use of Global and Acoustic Features Associated with Contextual Factors to Adapt Language Models for Spontaneous Speech Recognition&lt;br&gt;&lt;small&gt;Shohei Toyama; Daisuke Saito; Nobuaki Minematsu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Architecture, Search, and Linguistic Components\"\n                data-category-ids=\"1061\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Search, Computational Strategies and Language Modeling<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"11:00\">11:00-13:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4243\">\n\n              <div class='time-header' style='visibility: hidden;'>11:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4243\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech Analysis and Representation 2\"\n                data-time=\"11:00-13:00\"\n                data-room=\"Poster 1\"\n                data-room-id=\"1061\"\n                data-room-name=\"Poster 1\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Sekhar Seelamantula&lt;br&gt;&lt;br&gt;11.00-13.00 - A modulation property of time-frequency derivatives of filtered phase and its application to aperiodicity and FO estimation&lt;br&gt;&lt;small&gt;Hideki Kawahara; Ken-Ichi Sakakibara; Masanori Morise; Hideki Banno; Tomoki Toda&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Analytic Filter Bank for Speech Analysis, Feature Extraction and Perceptual Studies&lt;br&gt;&lt;small&gt;Unto K. Laine&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - A Time-Warping Pitch Tracking Algorithm considering fast f0 changes&lt;br&gt;&lt;small&gt;Simon Stone; Peter Steiner; Peter Birkholz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Learning the mapping function from voltage amplitudes to sensor positions in 3D-EMA using deep neural networks&lt;br&gt;&lt;small&gt;Christian Kroos; Mark D. Plumbley&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Low-dimensional representation of spectral envelope without deterioration for full-band speech analysis\/synthesis system&lt;br&gt;&lt;small&gt;Masanori Morise; Kenji Ozawa; Genta Miayashita&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Non-Local Estimation of Speech Signal for Vowel Onset Point Detection in Varied Environments&lt;br&gt;&lt;small&gt;Avinash Kumar; Syed Shahnawazuddin; Gayadhar Pradhan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Robust Source-Filter Separation of Speech Signal in the Phase Domain&lt;br&gt;&lt;small&gt;Erfan Loweimi; Jon Barker; Oscar Saz Torralba; Thomas Hain&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Time-domain envelope modulating the noise component of excitation in a continuous residual-based vocoder for statistical parametric speech synthesis&lt;br&gt;&lt;small&gt;Mohammed Salah Al-Radhi; Tam\u00e1s G\u00e1bor Csap\u00f3; G\u00e9za N\u00e9meth&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Wavelet Speech Enhancement Based on Robust Principal Component Analysis&lt;br&gt;&lt;small&gt;Chia-Lung Wu; Hsiang-Ping Hsu; Syu-Siang Wang; Jeih-weih Hung; Ying-Hui Lai; Hsin-Min Wang; Yu Tsao&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Vowel Onset Point Detection using Sonority Information&lt;br&gt;&lt;small&gt;Bidisha Sharma; S R Mahadeva Prasanna&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Speech and Audio Signals\"\n                data-category-ids=\"1062\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech Analysis and Representation 2<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 1<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"11:00\">11:00-13:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4233\">\n\n              <div class='time-header' style='visibility: hidden;'>11:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4233\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Conversational Telephone Speech Recognition\"\n                data-time=\"11:00-13:00\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Penny Karanasou; Ralf Schl\u00fcter&lt;br&gt;&lt;br&gt;11.00-11.20 - Improved Single System Conversational Telephone Speech Recognition with VGG Bottleneck Features&lt;br&gt;&lt;small&gt;William Hartmann; Roger Hsiao; Tim Ng; Jeff Ma; Francis Keith; Man-hung Siu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Student-teacher training with diverse decision tree ensembles&lt;br&gt;&lt;small&gt;Jeremy H. M. Wong; Mark Gales&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Embedding-Based Speaker Adaptive Training of Deep Neural Networks&lt;br&gt;&lt;small&gt;Xiaodong Cui; Vaibhava Goel; George Saon&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.00-12.20 - Improving Deliverable Speech-to-text Systems with Multilingual Knowledge Transfer&lt;br&gt;&lt;small&gt;Jeff Ma; Francis Keith; Owen Kimball; Man-hung Siu; Tim Ng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.20-12.40 - English Conversational Telephone Speech Recognition by Humans and Machines&lt;br&gt;&lt;small&gt;George Saon; Gakuto Kurata; Tom Sercu; Kartik Audhkhasi; Samuel Thomas; Dimitrios Dimitriadis; Xiaodong Cui; Bhuvana Ramabhadran; Michael Picheny; Lynn-Li Lim; Bergul Roomi; Phil Hall&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.40-13.00 - Comparing Human and Machine Errors in Conversational Speech Transcription&lt;br&gt;&lt;small&gt;Andreas Stolcke; Jasha Droppo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n                data-category-ids=\"1058\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Conversational Telephone Speech Recognition<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"11:00\">11:00-13:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5255\">\n\n              <div class='time-header'>13:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"5255\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Lunch\"\n                data-time=\"13:00-14:30\"\n                data-room=\"Various locations\"\n                data-room-id=\"1079\"\n                data-room-name=\"Various locations\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Misc\"\n                data-category-ids=\"1068\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Lunch<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Various locations<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:00\">13:00-14:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4242\">\n\n              <div class='time-header'>14:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4242\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Perception of Dialects and L2\"\n                data-time=\"14:30-16:30\"\n                data-room=\"C6\"\n                data-room-id=\"1066\"\n                data-room-name=\"C6\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Marija Tabain; Felicitas Kleber&lt;br&gt;&lt;br&gt;14.30-14.50 - End-to-End Acoustic Feedback in Language Learning for Correcting Devoiced French Final-Fricatives&lt;br&gt;&lt;small&gt;Sucheta Ghosh; Camille Fauth; Yves Laprie; Aghilas Sini&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Dialect perception by older children&lt;br&gt;&lt;small&gt;Ewa Jacewicz; Robert A. Fox&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Perception of non-contrastive variations in American English by Japanese learners: Flaps are less favored than stops&lt;br&gt;&lt;small&gt;Kiyoko Yoneyama; Mafuyu Kitahara; Keiichi Tajima&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.30-15.50 - How L1 speakers perceive L2 prosody: The cumulative effect of intonation, rhythm, and speech rate on accentedness and comprehensibility ratings&lt;br&gt;&lt;small&gt;Lieke van Maastricht; Tim Zee; Emiel Krahmer; Marc Swerts&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.50-16.10 - Effects of Pitch Fall and L1 on Vowel Length Identification in L2 Japanese&lt;br&gt;&lt;small&gt;IZUMI TAKIGUCHI&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-16.30 - A Preliminary Study of Prosodic Disambiguation by Chinese EFL Learners&lt;br&gt;&lt;small&gt;Yuanyuan Zhang; Hongwei Ding&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Phonetics, Phonology, and Prosody\"\n                data-category-ids=\"1056\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Perception of Dialects and L2<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">C6<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"14:30\">14:30-16:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4249\">\n\n              <div class='time-header' style='visibility: hidden;'>14:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4249\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Prosody and Text Processing\"\n                data-time=\"14:30-16:30\"\n                data-room=\"Poster 4\"\n                data-room-id=\"1068\"\n                data-room-name=\"Poster 4\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Zofia Malisz&lt;br&gt;&lt;br&gt;14.30-16.30 - An RNN Model of Text Normalization&lt;br&gt;&lt;small&gt;Richard Sproat; Navdeep Jaitly&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Comparison of Modeling Target in LSTM-RNN Duration Model&lt;br&gt;&lt;small&gt;Bo Chen; Jiahao Lai; Kai Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Discrete Duration Model For Speech Synthesis&lt;br&gt;&lt;small&gt;Bo Chen; Tianling Bian; Kai Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Global Syllable Vectors for Building TTS Front-End with Deep Learning&lt;br&gt;&lt;small&gt;Jinfu Ni; Yoshinori Shiga; Hisashi Kawai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Investigating Efficient Feature Representation Methods and Training Objective for BLSTM-Based Phone Duration Prediction&lt;br&gt;&lt;small&gt;Yibin Zheng; Jianhua Tao; Zhengqi Wen; Ya Li; Bin Liu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Learning word vector representations based on acoustic counts&lt;br&gt;&lt;small&gt;Manuel Sam Ribeiro; Oliver Watts; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Multi-Task Learning for Prosodic Structure Generation using BLSTM RNN with Structured Output Layer&lt;br&gt;&lt;small&gt;Yuchen Huang; Zhiyong Wu; Runnan Li; Helen Meng; Lianhong Cai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Prosody Aware Word-level Encoder Based on BLSTM-RNNs for DNN-based Speech Synthesis&lt;br&gt;&lt;small&gt;Yusuke Ijima; Nobukatsu Hojo; Ryo Masumura; Taichi Asami&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Prosody Control of Utterance Sequence for Information Delivering&lt;br&gt;&lt;small&gt;Ishin Fukuoka; Kazuhiko Iwata; Tetsunori Kobayashi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Synthesising uncertainty: the interplay of vocal effort and hesitation disfluencies&lt;br&gt;&lt;small&gt;Eva Szekely; Joseph Mendelson; Joakim Gustafson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Weakly-Supervised Phrase Assignment from Text in a Speech-Synthesis System Using Noisy Labels&lt;br&gt;&lt;small&gt;Asaf Rendel; Raul Fernandez; Zvi Kons; Andrew Rosenberg; Ron Hoory; Bhuvana Ramabhadran&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Synthesis and Spoken Language Generation\"\n                data-category-ids=\"1059\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Prosody and Text Processing<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"14:30\">14:30-16:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4248\">\n\n              <div class='time-header' style='visibility: hidden;'>14:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4248\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Multi-lingual Models and Adaptation for ASR\"\n                data-time=\"14:30-16:30\"\n                data-room=\"Poster 3\"\n                data-room-id=\"1069\"\n                data-room-name=\"Poster 3\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Khe Chai Sim&lt;br&gt;&lt;br&gt;14.30-16.30 - 2016 BUT Babel system: Multilingual BLSTM acoustic model with i-vector based adaptation&lt;br&gt;&lt;small&gt;Martin Karafiat; Murali Karthick Baskar; Pavel Matejka; Karel Vesely; Frantisek Grezl; Lukas Burget; Jan \u010cernock\u00fd&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - An Investigation of Deep Neural Networks for Multilingual Speech Recognition Training and Adaptation&lt;br&gt;&lt;small&gt;Sibo Tong; Philip N. Garner; Herve Bourlard&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - CTC Training of Multi-Phone Acoustic Models for Speech Recognition&lt;br&gt;&lt;small&gt;Olivier Siohan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Deep Least Squares Regression for Speaker Adaptation&lt;br&gt;&lt;small&gt;Younggwan Kim; Hyungjun Lim; Jahyun Goo; Hoirin Kim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Factorised representations for neural network adaptation to diverse acoustic environments&lt;br&gt;&lt;small&gt;Joachim Fainberg; Steve Renals; Peter Bell&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Generalized Distillation Framework For Speaker Normalization&lt;br&gt;&lt;small&gt;Neethu Mariam Joy; Sandeep Reddy Kothinti; Srinivasan Umesh; Basil Abraham&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Learning Factorized Transforms for Unsupervised Adaptation of LSTM-RNN Acoustic Models&lt;br&gt;&lt;small&gt;Lahiru Samarakoon; Brian Mak; Khe Chai Sim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Multilingual Recurrent Neural Networks with Residual Learning for Low-Resource Speech Recognition&lt;br&gt;&lt;small&gt;Shiyu Zhou; Yuanyuan Zhao; Shuang Xu; Bo Xu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Multi-task Learning using Mismatched Transcription for Under-resourced Speech Recognition&lt;br&gt;&lt;small&gt;Van Hai Do; Nancy F. Chen; Boon Pang Lim; Mark Hasegawa-Johnson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - OPTIMIZING DNN ADAPTATION FOR RECOGNITION OF ENHANCED SPEECH&lt;br&gt;&lt;small&gt;Marco Matassoni; Alessio Brutti; Falavigna Daniele&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n                data-category-ids=\"1058\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Multi-lingual Models and Adaptation for ASR<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 3<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"14:30\">14:30-16:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4247\">\n\n              <div class='time-header' style='visibility: hidden;'>14:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4247\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech Production and Perception\"\n                data-time=\"14:30-16:30\"\n                data-room=\"Poster 2\"\n                data-room-id=\"1067\"\n                data-room-name=\"Poster 2\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Wentao Gu&lt;br&gt;&lt;br&gt;14.30-16.30 - Accurate Synchronization of Speech and EGG signal using Phase Information&lt;br&gt;&lt;small&gt;Sunil Kumar S B; K Sreenivasa Rao; Tanumay Mandal&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - An objective critical distance measure based on the relative level of spectral valley&lt;br&gt;&lt;small&gt;Ananthapadmanabha T V; Ramakrishnan Angarai Ganesan; Shubham Sharma&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Audiovisual recalibration of vowel categories&lt;br&gt;&lt;small&gt;Matthias Franken; Frank Eisner; Jan-Mathijs Schoffelen; Dan Acheson; Peter Hagoort; James McQueen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Auditory-visual integration of talker gender in Cantonese tone perception&lt;br&gt;&lt;small&gt;Wei Lai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Critical articulators identification from RT-MRI of the vocal tract&lt;br&gt;&lt;small&gt;Samuel Silva; Ant\u00f3nio Teixeira&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Cross-modal Analysis between Phonation Differences and Texture Images based on Sentiment Correlations&lt;br&gt;&lt;small&gt;Win Thuzar Kyaw; Yoshinori Sagisaka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Database of volumetric and real-time vocal tract MRI for speech science&lt;br&gt;&lt;small&gt;Tanner Sorensen; Zisis Iason Skordilis; Asterios Toutios; Yoon-Chul Kim; Yinghua Zhu; Jangwon Kim; Adam Lammert; Vikram Ramanarayanan; Louis Goldstein; Dani Byrd; Krishna Nayak; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Event-related potentials associated with somatosensory effect in audio-visual speech perception&lt;br&gt;&lt;small&gt;Takayuki Ito; Hiroki Ohashi; Eva Montas; Vincent Gracco&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Semantic Edge Detection for Tracking Vocal Tract Air-tissue Boundaries in Real-time Magnetic Resonance Images&lt;br&gt;&lt;small&gt;Krishna Somandepalli; Asterios Toutios; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - The acquisition of focal lengthening in Stockholm Swedish&lt;br&gt;&lt;small&gt;Anna Sara Hexeberg Rom\u00f8ren; Aoju Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - The effect of gesture on persuasive speech&lt;br&gt;&lt;small&gt;Judith Peters; Marieke Hoetjes&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - The Influence on Realization and Perception of Lexical Tones from Affricate's Aspiration&lt;br&gt;&lt;small&gt;Chong Cao; Yanlu Xie; Qi Zhang; Jinsong Zhang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - When a dog is a cat and how it changes your pupil size: Pupil dilation in response to information mismatch&lt;br&gt;&lt;small&gt;Lena F. Renner; Marcin Wlodarczak&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Video-based tracking of jaw movements during speech: Preliminary results and future directions&lt;br&gt;&lt;small&gt;Andrea Bandini; Aravind Namasivayam; Yana Yunusova&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Wireless neck-surface accelerometer and microphone on flex circuit with application to noise-robust monitoring of Lombard speech&lt;br&gt;&lt;small&gt;Daryush Mehta; Patrick Chwalek; Thomas Quatieri; Laura Brattain&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Vocal Tract Airway Tissue Boundary Tracking for rtMRI using Shape and Appearance Priors&lt;br&gt;&lt;small&gt;Sasan Asadiabadi; Engin Erzin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Perception, Production and Acquisition\"\n                data-category-ids=\"1055\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech Production and Perception<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"14:30\">14:30-16:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4238\">\n\n              <div class='time-header' style='visibility: hidden;'>14:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4238\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Neural Networks for Language Modeling\"\n                data-time=\"14:30-16:30\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Tanel Alum\u00e4e; Xunying Liu&lt;br&gt;&lt;br&gt;14.30-14.50 - Approaches for Neural-Network Language Model Adaptation&lt;br&gt;&lt;small&gt;Min Ma; Michael Nirschl; Fadi Biadsy; Shankar Kumar&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - A Batch Noise Contrastive Estimation Approach for Training Large Vocabulary Language Models&lt;br&gt;&lt;small&gt;Youssef Oualil; Dietrich Klakow&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Investigating Bidirectional Recurrent Neural Network Language Models for Speech Recognition&lt;br&gt;&lt;small&gt;Xie Chen; Anton Ragni; Xunying Liu; Mark Gales&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.30-15.50 - FAST NEURAL NETWORK LANGUAGE MODEL LOOKUPS AT N-GRAM SPEEDS&lt;br&gt;&lt;small&gt;Yinghui Huang; Abhinav Sethy; Bhuvana Ramabhadran&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.50-16.10 - Empirical Exploration of Novel Architectures and Objectives for Language Models&lt;br&gt;&lt;small&gt;Gakuto Kurata; Abhinav Sethy; Bhuvana Ramabhadran; George Saon&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-16.30 - Residual Memory Networks in Language Modeling: Improving the Reputation of Feed-Forward Networks&lt;br&gt;&lt;small&gt;Karel Bene\u0161; Murali Baskar; Luk\u00e1\u0161 Burget&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Architecture, Search, and Linguistic Components\"\n                data-category-ids=\"1061\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Neural Networks for Language Modeling<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"14:30\">14:30-16:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4246\">\n\n              <div class='time-header' style='visibility: hidden;'>14:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4246\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech Perception\"\n                data-time=\"14:30-16:30\"\n                data-room=\"Poster 1\"\n                data-room-id=\"1061\"\n                data-room-name=\"Poster 1\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Louis ten Bosch&lt;br&gt;&lt;br&gt;14.30-16.30 - Emotional thin-slicing: a proposal for a short- and long-term division of emotional speech&lt;br&gt;&lt;small&gt;Daniel Oliveira Peres; Dominic Watt; Waldemar Ferreira Netto&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Factors Affecting the Intelligibility of Low-pass Filtered Speech&lt;br&gt;&lt;small&gt;Lei Wang; Fei Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Lexically Guided Perceptual Learning in Mandarin Chinese&lt;br&gt;&lt;small&gt;L. Ann Burchfield; San-hei Kenny Luk; Mark Antoniou; Anne Cutler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Misperceptions of the emotional content of natural and vocoded speech in a car&lt;br&gt;&lt;small&gt;Jaime Lorenzo-Trueba; Cassia Valentini-Botinhao; Gustav Eje Henter; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Perception and acoustics of vowel nasality in Brazilian Portuguese&lt;br&gt;&lt;small&gt;Luciana Marques; Rebecca Scarborough&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Phonetic Restoration of Temporally Reversed Speech&lt;br&gt;&lt;small&gt;Shi-yu Wang; Fei Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Predicting epenthetic vowel quality from acoustics&lt;br&gt;&lt;small&gt;Adriana Guevara-Rukoz; Erika Parlato-Oliveira; Shi Yu; Yuki Hirose; Sharon Peperkamp; Emmanuel Dupoux&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Simultaneous articulatory and acoustic distortion in L1 and L2 Listening: Locally time-reversed \u201cfast\u201d speech&lt;br&gt;&lt;small&gt;Mako Ishida&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Sociophonetic realizations guide subsequent lexical access&lt;br&gt;&lt;small&gt;Jonny Kim; Katie Drager&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - The effect of spectral profile on the intelligibility of emotional speech in noise&lt;br&gt;&lt;small&gt;Chris Davis; Chee Seng Chong; Jeesun Kim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - The effect of spectral tilt on size discrimination of voiced speech sounds&lt;br&gt;&lt;small&gt;Toshie Matsui; Toshio Irino; Kodai Yamamoto; Hideki Kawahara; Roy Patterson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - The relative cueing power of F0 and duration in German prominence perception&lt;br&gt;&lt;small&gt;Oliver Niebuhr; Jana Winkler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Whether long-term tracking of speech rate affects perception depends on who is talking&lt;br&gt;&lt;small&gt;Merel Maslowski; Antje S. Meyer; Hans Rutger Bosker&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Perception, Production and Acquisition\"\n                data-category-ids=\"1055\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech Perception<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 1<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"14:30\">14:30-16:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4253\">\n\n              <div class='time-header' style='visibility: hidden;'>14:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4253\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Show & Tell 2\"\n                data-time=\"14:30-16:30\"\n                data-room=\"E397\"\n                data-room-id=\"1071\"\n                data-room-name=\"E397\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"14.30-16.30 - An apparatus to investigate western opera singing skill learning using performance and result biofeedback, and measuring its neural correlates&lt;br&gt;&lt;small&gt;Aurore Jaumard-Hakoun; Samy Chikhi; Takfarinas Medani; Angelika Nair; G\u00e9rard Dreyfus; Fran\u00e7ois-Beno\u00eet Vialatte&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Emojive! Collecting Emotion Data from Speech and Facial Expression using Mobile Game App&lt;br&gt;&lt;small&gt;Ji Ho Park; Nayeon Lee; Dario Bertero; Anik Dey; Pascale Fung&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - PercyConfigurator -- Perception Experiments as a Service&lt;br&gt;&lt;small&gt;Christoph Draxler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - System for speech transcription and post-editing in Microsoft Word&lt;br&gt;&lt;small&gt;Askars Salimbajevs; Indra Ikauniece&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Visual Learning 2: Pronunciation app using ultrasound, video, and MRI&lt;br&gt;&lt;small&gt;Kyori Suzuki; Ian Wilson; Hayato Watanabe&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Show & Tell\"\n                data-category-ids=\"1063\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #EEA2A2;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Show & Tell 2<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E397<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"14:30\">14:30-16:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4241\">\n\n              <div class='time-header' style='visibility: hidden;'>14:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4241\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech Analysis and Representation 1\"\n                data-time=\"14:30-16:30\"\n                data-room=\"B4\"\n                data-room-id=\"1065\"\n                data-room-name=\"B4\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Hema Murthy; Jon Barker&lt;br&gt;&lt;br&gt;14.30-14.50 - Phone Classification using a Non-Linear Manifold with Broad Phone Class Dependent DNNs&lt;br&gt;&lt;small&gt;Linxue Bai; Peter Jancovic; Martin Russell; Philip Weber; Steve Houghton&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - An Investigation of Crowd Speech for Room Occupancy Estimation&lt;br&gt;&lt;small&gt;Siyuan Chen; Julien Epps; Eliathamby Ambikairajah; Phu Le&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Time-Frequency Coherence for Periodic-Aperiodic Decomposition of Speech Signals&lt;br&gt;&lt;small&gt;Karthika Vijayan; Jitendra Dhiman; Chandra Sekhar Seelamantula&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.30-15.50 - Musical Speech: a New Methodology for Transcribing Speech Prosody&lt;br&gt;&lt;small&gt;Alexsandro Meireles; Ant\u00f4nio Sim\u00f5es; Antonio Celso Ribeiro; Beatriz Raposo de Medeiros&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.50-16.10 - Estimation of Place of Articulation of Fricatives from Spectral Characteristics for Speech Training&lt;br&gt;&lt;small&gt;K S Nataraj; Prem C. Pandey; Hirak Dasgupta&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-16.30 - Estimation of the Probability Distribution of Spectral Fine Structure in the Speech Source&lt;br&gt;&lt;small&gt;Tom B\u00e4ckstr\u00f6m&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Speech and Audio Signals\"\n                data-category-ids=\"1062\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech Analysis and Representation 1<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"14:30\">14:30-16:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4257\">\n\n              <div class='time-header' style='visibility: hidden;'>14:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4257\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Interspeech 2017 Automatic Speaker Verification Spoofing and Countermeasures Challenge 2\"\n                data-time=\"14:30-16:30\"\n                data-room=\"D8\"\n                data-room-id=\"1062\"\n                data-room-name=\"D8\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Nicholas Evans; Kong Aik Lee&lt;br&gt;&lt;br&gt;14.30-14.45 - Audio replay attack detection with deep learning frameworks&lt;br&gt;&lt;small&gt;Galina Lavrentyeva; Sergey Novoselov; Egor Malykh; Alexandr Kozlov; Oleg Kudashev; Vadim Shchemelinin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.45-15.00 - Ensemble learning for countermeasure of audio replay spoofing attack in ASVspoof2017&lt;br&gt;&lt;small&gt;Zhe Ji; Zhi-Yi Li; Peng Li; Maobo An; Shengxiang Gao; Dan Wu; Faru Zhao&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.00-15.15 - A Study on Replay Attack and Anti-Spoofing for Automatic Speaker Verification&lt;br&gt;&lt;small&gt;Lantian Li; Yixiang Chen; Dong Wang; Thomas Fang Zheng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.15-15.30 - Replay Attack Detection using DNN for Channel Discrimination&lt;br&gt;&lt;small&gt;Parav Nagarsheth; Elie Khoury; Kailash Patil; Matt Garland&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.30-15.45 - ResNet and Model Fusion for Automatic Spoofing Detection&lt;br&gt;&lt;small&gt;Zhuxin Chen; Zhifeng Xie; Weibin Zhang; Xiangmin Xu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.45-16.00 - SFF Anti-Spoofer: IIIT-H Submission for Automatic Speaker Verification Spoofing and Countermeasures Challenge 2017&lt;br&gt;&lt;small&gt;K N R K Raju Alluri; Sivanand Achanta; Sudarsana Reddy Kadiri; Suryakanth V Gangashetty; Anil Kumar Vuppala&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-16.30 - Discussion&lt;br&gt;&lt;small&gt;&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speaker and Language Identification\"\n                data-category-ids=\"1054\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Interspeech 2017 Automatic Speaker Verification Spoofing and Countermeasures Challenge 2<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">D8<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"14:30\">14:30-16:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5241\">\n\n              <div class='time-header' style='visibility: hidden;'>14:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"5241\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Speech Technology for Code-Switching in Multilingual Communities\"\n                data-time=\"14:30-16:30\"\n                data-room=\"F11\"\n                data-room-id=\"1059\"\n                data-room-name=\"F11\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Alan W Black; Kalika Bali&lt;br&gt;&lt;br&gt;&lt;p&gt;See full description at: https:\/\/www.microsoft.com\/en-us\/research\/event\/interspeech-2017-special-session-speech-technologies-for-code-switching-in-multilingual-communities\/#&lt;\/p&gt;&lt;p&gt;Topics of interest for this special session will include but are not limited to:&amp;nbsp;&lt;br&gt;&lt;\/p&gt;&lt;li&gt;Speech Recognition of code-switched speech&lt;\/li&gt;&lt;li&gt;Language Modeling for code-switched speech&lt;\/li&gt;&lt;li&gt;Speech Synthesis of code-switched text&lt;\/li&gt;&lt;li&gt;Speech Translation of code-switched languages&lt;\/li&gt;&lt;li&gt;Spoken Dialogue Systems that can handle code-switching&lt;\/li&gt;&lt;li&gt;Speech data and resources for code-switching&lt;\/li&gt;&lt;li&gt;Language Identification from speech&lt;\/li&gt;&lt;p&gt;&lt;\/p&gt;&lt;p&gt;&lt;br&gt;&lt;\/p&gt;&lt;p&gt;Organizing Committee:&lt;br&gt;&lt;\/p&gt;&lt;p&gt;Kalika Bali, Microsoft Research India&lt;br&gt;Alan W Black, Carnegie Mellon University&lt;br&gt;Mona Diab, George Washington University&lt;br&gt;Julia Hirschberg, Columbia University&lt;br&gt;Sunayana Sitaram, Microsoft Research India&lt;br&gt;Thamar Solorio, University of Houston&lt;\/p&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Addressing Code-Switching in French\/Algerian Arabic Speech&lt;br&gt;&lt;small&gt;Amazouz Djegdjiga; Martine Adda-Decker; Lori Lamel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Metrics for modeling code-switching across corpora&lt;br&gt;&lt;small&gt;Wally Guzman; Joseph Ricard; Jacqueline Serigos; Barbara Bullock; Almeida Jacqueline Toribio&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Synthesising isiZulu-English code-switch bigrams using word embeddings&lt;br&gt;&lt;small&gt;Ewald Van der westhuizen; Thomas Niesler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.30-15.50 - Crowdsourcing Universal Part-Of-Speech Tags for Code-Switching&lt;br&gt;&lt;small&gt;Victor Soto; Julia Hirschberg&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.50-16.30 - Discussion&lt;br&gt;&lt;small&gt;&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"\"\n                data-category-ids=\"\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Speech Technology for Code-Switching in Multilingual Communities<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">F11<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"14:30\">14:30-16:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4239\">\n\n              <div class='time-header' style='visibility: hidden;'>14:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4239\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Far-field Speech Recognition\"\n                data-time=\"14:30-16:30\"\n                data-room=\"E10\"\n                data-room-id=\"1060\"\n                data-room-name=\"E10\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Thomas Hain; Zheng-Hua Tan&lt;br&gt;&lt;br&gt;14.30-14.50 - Generation of simulated utterances in virtual rooms to train deep-neural networks for far-field speech recognition in Google Home&lt;br&gt;&lt;small&gt;Chanwoo Kim; Ananya Misra; K.K. Chin; Thad Hughes; Arun Narayanan; Tara Sainath; Michiel Bacchiani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Neural network-based spectrum estimation for online WPE dereverberation&lt;br&gt;&lt;small&gt;Keisuke Kinoshita; Marc Delcroix; Haeyong Kwon; Takuma Mori; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Factorial Modeling for Effective Suppression of Directional Noise&lt;br&gt;&lt;small&gt;Osamu Ichikawa; Takashi Fukuda; Gakuto Kurata; Steven Rennie&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.30-15.50 - On Design of Robust Deep Models for CHiME-4 Multi-Channel Speech Recognition with Multiple Configurations of Array Microphones&lt;br&gt;&lt;small&gt;Yan-Hui Tu; Jun Du; Lei Sun; Feng Ma; Chin-Hui Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.50-16.10 - Acoustic Modeling for Google Home&lt;br&gt;&lt;small&gt;Bo Li; Tara Sainath; Joe Caroselli; Arun Narayanan; Michiel Bacchiani; Ananya Misra; Izhak Shafran; Hasim Sak; Golan Pundak; K.K. Chin; Khe Chai Sim; Ron Weiss; Kevin Wilson; Ehsan Variani; Chanwoo Kim; Olivier Siohan; Mitchell Weintraub; Erik McDermott; Richard Rose; Matt Shannon&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-16.30 - On multi-domain training and adaptation of end-to-end RNN acoustic models for distant speech recognition&lt;br&gt;&lt;small&gt;Seyedmahdad Mirsamadi; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n                data-category-ids=\"1058\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Far-field Speech Recognition<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E10<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"14:30\">14:30-16:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4254\">\n\n              <div class='time-header' style='visibility: hidden;'>14:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4254\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Swedish Kulning (SweKul). What\\'s so Special About Kulning - The Singing Technique in Traditional Swedish Cattle Calls?\"\n                data-time=\"14:30-16:30\"\n                data-room=\"B3\"\n                data-room-id=\"1072\"\n                data-room-name=\"B3\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4254.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Special event\"\n                data-category-ids=\"1064\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Swedish Kulning (SweKul). What's so Special About Kulning - The Singing Technique in Traditional Swedish Cattle Calls?<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B3<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"14:30\">14:30-16:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4240\">\n\n              <div class='time-header' style='visibility: hidden;'>14:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4240\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Pathological Speech and Language\"\n                data-time=\"14:30-16:30\"\n                data-room=\"A2\"\n                data-room-id=\"1064\"\n                data-room-name=\"A2\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Heidi Christensen; Rafa Orozco&lt;br&gt;&lt;br&gt;14.30-14.50 - Dominant Distortion Classification for Pre-Processing of Vowels in Remote Biomedical Voice Analysis&lt;br&gt;&lt;small&gt;Amir Hossein Poorjam; Jesper Rindom Jensen; Max A. Little; Mads Gr\u00e6sb\u00f8ll Christensen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Automatic Paraphasia Detection from Aphasic Speech: A Preliminary Study&lt;br&gt;&lt;small&gt;Duc Le; Keli Licata; Emily Mower Provost&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Evaluation of the neurological state of people with Parkinson\u2019s disease using i-vectors&lt;br&gt;&lt;small&gt;Nicanor Garcia; Juan Rafael Orozco-Arroyave; Luis Fernando D'Haro; Najim Dehak; Elmar Noeth&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.30-15.50 - Objective Severity Assessment From Disordered Voice Using Estimated Glottal Airflow&lt;br&gt;&lt;small&gt;Yu-Ren Chien; Michal Borsky; Jon Gudnason&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.50-16.10 - Earlier Identification of Children with Autism Spectrum Disorder: An Automatic Vocalisation-based Approach&lt;br&gt;&lt;small&gt;Florian Pokorny; Bj\u00f6rn Schuller; Peter Marschik; Raymond Brueckner; P\u00e4r Nystr\u00f6m; Nicholas Cummins; Sven B\u00f6lte; Christa Einspieler; Terje Falck-Ytter&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-16.30 - Convolutional Neural Network to Model Articulation Impairments in Patients with Parkinson's Disease&lt;br&gt;&lt;small&gt;Juan Camilo V\u00e1squez Correa; Juan Rafael Orozco-Arroyave; Elmar Noeth&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Paralinguistics in Speech and Language\"\n                data-category-ids=\"1052\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Pathological Speech and Language<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">A2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"14:30\">14:30-16:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4252\">\n\n              <div class='time-header' style='visibility: hidden;'>14:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4252\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Show & Tell 1\"\n                data-time=\"14:30-16:30\"\n                data-room=\"E306\"\n                data-room-id=\"1070\"\n                data-room-name=\"E306\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"14.30-16.30 - ChunkitApp: Investigating the relevant units of online speech processing&lt;br&gt;&lt;small&gt;Svetlana Vetchinnikova; Anna Mauranen; Nina Mikusova&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Extending the EMU Speech Database Management System: Cloud Hosting, Team Collaboration, Automatic Revision Control&lt;br&gt;&lt;small&gt;Markus Jochim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - HomeBank: A repository for long-form real-world audio recordings of children&lt;br&gt;&lt;small&gt;Anne Warlaumont; Mark vanDam; Elika Bergelson; Alejandrina Cristia&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - MoPAReST - Mobile Phone Assisted Remote Speech Therapy Platform&lt;br&gt;&lt;small&gt;Chitralekha Bhat; Anjali Kant; Bhavik Vachhani; Sarita Rautara; Ashok Kumar Sinha; Sunil Kumar Kopparapu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Prosograph: A Tool for Prosody Visualisation of Large Speech Corpora&lt;br&gt;&lt;small&gt;Alp Oktem; Mireia Farr\u00fas; Leo Wanner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Show & Tell\"\n                data-category-ids=\"1063\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #EEA2A2;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Show & Tell 1<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E306<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"14:30\">14:30-16:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5249\">\n\n              <div class='time-header'>16:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"5249\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"ISCA General Assembly and Refreshments\"\n                data-time=\"16:30-18:00\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Misc\"\n                data-category-ids=\"1068\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>ISCA General Assembly and Refreshments<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:30\">16:30-18:00 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5250\">\n\n              <div class='time-header'>19:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"5250\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Welcome Reception\"\n                data-time=\"19:00-20:30\"\n                data-room=\"Stockholm City Hall and Teaterbaren\"\n                data-room-id=\"1116\"\n                data-room-name=\"Stockholm City Hall and Teaterbaren\"\n                data-day=\"1\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Social event\"\n                data-category-ids=\"1067\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #C9EE91;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Welcome Reception<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Stockholm City Hall and Teaterbaren<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"19:00\">19:00-20:30 - Monday 21 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          <div class='list_header' data-day='2' style='background-color: rgba(32, 132, 196, 0.83); color: rgba(255, 255, 255, 1);'>Tuesday 22 August<\/div>\n          <div class=\"box-wrapper\"\n              data-id=\"5246\">\n\n              <div class='time-header'>07:45<\/div>\n\n              <div class=\"box\"\n                data-id=\"5246\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Registration\"\n                data-time=\"07:45-17:00\"\n                data-room=\"S\u00f6dra Huset, House A\"\n                data-room-id=\"1112\"\n                data-room-name=\"S\u00f6dra Huset, House A\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"\"\n                data-category-ids=\"\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Registration<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">S\u00f6dra Huset, House A<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"07:45\">07:45-17:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4289\">\n\n              <div class='time-header'>08:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4289\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Keynote 1: James Allen, Dialogue as Collaborative Problem Solving\"\n                data-time=\"08:30-09:30\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4289.html\"\n                data-speaker=\"James Allen\"\n                data-speakercell=\"James Allen\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Joakim Gustafson&lt;br&gt;&lt;br&gt;\n                 &lt;span style=&quot;font-weight: normal;&quot;&gt;The session will also be broadcasted (with two-way communication) to rooms A2 and C6.&lt;\/span&gt;\n                 &lt;br&gt;\n                &lt;br&gt;&lt;br&gt;\"\n                data-category=\"Keynote\"\n                data-category-ids=\"1057\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #72D9EE;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Keynote 1: James Allen, Dialogue as Collaborative Problem Solving<\/span>\n               <br>\n               <div class=\"lecturer\"><span>James Allen<\/span><\/div>\n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"08:30\">08:30-09:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5257\">\n\n              <div class='time-header'>09:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"5257\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Refreshments\"\n                data-time=\"09:30-10:00\"\n                data-room=\"Various locations\"\n                data-room-id=\"1079\"\n                data-room-name=\"Various locations\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Misc\"\n                data-category-ids=\"1068\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Refreshments<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Various locations<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"09:30\">09:30-10:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4308\">\n\n              <div class='time-header'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4308\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Short Utterances Speaker Recognition\"\n                data-time=\"10:00-12:00\"\n                data-room=\"Poster 1\"\n                data-room-id=\"1061\"\n                data-room-name=\"Poster 1\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Anthony Larcher&lt;br&gt;&lt;br&gt;10.00-12.00 - Adversarial Network Bottleneck Features for Noise Robust Speaker Verification&lt;br&gt;&lt;small&gt;Hong Yu; Zheng-Hua Tan; Zhanyu Ma; Jun Guo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Content Normalization for Text-dependent Speaker Verification&lt;br&gt;&lt;small&gt;Subhadeep Dey; Srikanth Madikeri; Petr Motlicek; Marc Ferras&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Deep Speaker Embeddings for Short-Duration Speaker Verification&lt;br&gt;&lt;small&gt;Gautam Bhattacharya; Md Jahangir Alam; Patrick Kenny&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - DNN i-vector Speaker Verification with Short, Text-constrained Test Utterances&lt;br&gt;&lt;small&gt;Jinghua Zhong; Wenping Hu; Frank Soong; Helen Meng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - End-to-End Text-Independent Speaker Verification with Triplet Loss on Short Utterances&lt;br&gt;&lt;small&gt;Chunlei Zhang; Kazuhito Koishida&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Gain Compensation for Fast I-Vector Extraction over Short Duration&lt;br&gt;&lt;small&gt;Kong Aik Lee; Haizhou Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Incorporating Local Acoustic Variability Information into Short Duration Speaker Verification&lt;br&gt;&lt;small&gt;Jianbo Ma; Vidhyasaharan Sethu; Eliathamby Ambikairajah; Kong Aik Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Joint Training of Expanded End-to-end DNN for Text-dependent Speaker Verification&lt;br&gt;&lt;small&gt;Hee-Soo Heo; Jee-Weon Jung; IL-Ho Yang; Sung-Hyun Yoon; Ha-Jin Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Time-Varying Autoregressions for Speaker Verification in Reverberant Conditions&lt;br&gt;&lt;small&gt;Ville Vestman; Dhananjaya Gowda; Md Sahidullah; Paavo Alku; Tomi Kinnunen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Using Voice Quality Features to Improve Short-Utterance, Text-Independent Speaker Verification Systems&lt;br&gt;&lt;small&gt;Soo Jin Park; Gary Yeung; Jody Kreiman; Patricia Keating; Abeer Alwan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - What Does the Speaker Embedding Encode?&lt;br&gt;&lt;small&gt;Shuai Wang; Yanmin Qian; Kai Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speaker and Language Identification\"\n                data-category-ids=\"1054\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Short Utterances Speaker Recognition<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 1<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4293\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4293\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speaker Recognition\"\n                data-time=\"10:00-12:00\"\n                data-room=\"B4\"\n                data-room-id=\"1065\"\n                data-room-name=\"B4\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Jean-Francois Bonastre; Kornel Laskowski&lt;br&gt;&lt;br&gt;10.00-10.20 - Deep Neural Network Embeddings for Text-Independent Speaker Verification&lt;br&gt;&lt;small&gt;David Snyder; Daniel Garcia-Romero; Dan Povey; Sanjeev Khudanpur&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Tied Variational Autoencoder Backends for i-Vector Speaker Recognition&lt;br&gt;&lt;small&gt;Jesus Villalba; Niko Brummer; Najim Dehak&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Improved Gender Independent Speaker Recognition Using Convolutional Neural Network Based Bottleneck Features&lt;br&gt;&lt;small&gt;Shivesh Ranjan; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Autoencoder based Domain Adaptation for Speaker Recognition under Insufficient Channel Information&lt;br&gt;&lt;small&gt;Suwon Shon; Seongkyu Mun; Wooil Kim; Hanseok Ko&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Nonparametrically Trained Probabilistic Linear Discriminant Analysis for i-Vector Speaker Verification&lt;br&gt;&lt;small&gt;Abbas Khosravani; Mohammad Mehdi Homayounpour&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - DNN bottleneck features for speaker clustering&lt;br&gt;&lt;small&gt;Jes\u00fas Jorr\u00edn; Leibny Paola Garcia Perera; Luis Buera&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speaker and Language Identification\"\n                data-category-ids=\"1054\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speaker Recognition<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4291\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4291\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Emotion Recognition\"\n                data-time=\"10:00-12:00\"\n                data-room=\"E10\"\n                data-room-id=\"1060\"\n                data-room-name=\"E10\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Elmar N\u00f6th; Shrikanth Narayanan&lt;br&gt;&lt;br&gt;10.00-10.20 - Efficient Emotion Recognition from Speech Using Deep Learning on Spectrograms&lt;br&gt;&lt;small&gt;Aharon Satt; Shai Rozenberg; Ron Hoory&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Interaction and Transition Model for Speech Emotion Recognition in Dialogue&lt;br&gt;&lt;small&gt;Ruo Zhang; Atsushi Ando; Satoshi Kobashikawa; Yushi Aono&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Progressive Neural Networks for Transfer Learning in Emotion Recognition&lt;br&gt;&lt;small&gt;John Gideon; Soheil Khorram; Zakaria Aldeneh; Dimitrios Dimitriadis; Emily Mower Provost&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Jointly Predicting Arousal, Valence and Dominance with Multi-Task Learning&lt;br&gt;&lt;small&gt;Srinivas Parthasarathy; Carlos Busso&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Discretized Continuous Speech Emotion Recognition with Multi-Task Deep Recurrent Neural Network&lt;br&gt;&lt;small&gt;Duc Le; Zakaria Aldeneh; Emily Mower Provost&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Towards Speech Emotion Recognition \u201cin the wild\u201d using Aggregated Corpora and Deep Multi-Task Learning&lt;br&gt;&lt;small&gt;Jaebok Kim; Gwenn Englebienne; Khiet Truong; Vanessa Evers&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Paralinguistics in Speech and Language\"\n                data-category-ids=\"1052\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Emotion Recognition<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E10<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4318\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4318\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Show & Tell 4\"\n                data-time=\"10:00-12:00\"\n                data-room=\"E397\"\n                data-room-id=\"1071\"\n                data-room-name=\"E397\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"10.00-12.00 - Combining Gaussian mixture models and segmental feature models for speaker recognition&lt;br&gt;&lt;small&gt;Milana Milo\u0161evi\u0107; Ulrike Glavitsch&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Did you laugh enough today? - Deep Neural Networks for Mobile and Wearable Laughter Trackers&lt;br&gt;&lt;small&gt;Gerhard Hagerer; Nicholas Cummins; Florian Eyben; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Evolving recurrent neural networks that process and classify raw audio in a streaming fashion&lt;br&gt;&lt;small&gt;Adrien DANIEL&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Low-Frequency Ultrasonic Communication for Speech Broadcasting in Public Transportation&lt;br&gt;&lt;small&gt;Kwang Myung Jeon; Nam Kyun Kim; Chan Woong Kwak; Jung Min Moon; Hong Kook Kim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Reading validation for pronunciation evaluation in the Digitala project&lt;br&gt;&lt;small&gt;Aku Rouhe; Reima Karhila; Peter Smit; Mikko Kurimo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Real-time Speech Enhancement with GCC-NMF: Demonstration on the Raspberry Pi and NVIDIA Jetson&lt;br&gt;&lt;small&gt;Sean Wood; Jean Rouat&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Show & Tell\"\n                data-category-ids=\"1063\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #EEA2A2;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Show & Tell 4<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E397<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4324\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4324\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Speech and Human-Robot Interaction\"\n                data-time=\"10:00-12:00\"\n                data-room=\"F11\"\n                data-room-id=\"1059\"\n                data-room-name=\"F11\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;G\u00e9rard Bailly; Gabriel Skantze&lt;br&gt;&lt;br&gt;10.00-10.15 - Introduction&lt;br&gt;&lt;small&gt;&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.15-10.30 - Elicitation Design for Acoustic Depression Classification: An Investigation of Articulation Effort, Linguistic Complexity, and Word Affect&lt;br&gt;&lt;small&gt;Brian Stasak; Julien Epps; Roland Goecke&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.30-10.45 - Robustness over time-varying channels in DNN-HMM ASR based human-robot interaction&lt;br&gt;&lt;small&gt;Jose Novoa; Jorge Wuth; Juan Pablo Escudero; Josue Fredes; Rodrigo Mahu; Richard Stern; Nestor Becerra Yoma&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.45-11.00 - Analysis of Engagement and User Experience with a Laughter Responsive Social Robot&lt;br&gt;&lt;small&gt;Bekir Berker T\u00fcrker; Zana Bu\u00e7inca; Engin Erzin; Y\u00fccel Yemez; Metin Sezgin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.15 - Automatic Classification of Autistic Child Vocalisations: A Novel Database and Results&lt;br&gt;&lt;small&gt;Alice Baird; Shahin Amiriparian; Nicholas Cummins; Alyssa M. Alcorn; Anton Batliner; Sergey Pugachevskiy; Michael Freitag; Mauric Gerczuk; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.15-11.30 - Crowd-Sourced Design of Artificial Attentive Listeners&lt;br&gt;&lt;small&gt;Catharine Oertel; Patrik Jonell; Dimosthenis Kontogiorgos; Joseph Mendelson; Jonas Beskow; Joakim Gustafson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.30-11.45 - Studying the link between inter-speaker coordination and speech imitation through human-machine interactions&lt;br&gt;&lt;small&gt;Leonardo Lancia; Thierry Chaminade; No\u00ebl Nguyen; Laurent Pr\u00e9vot&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.45-12.00 - Discussion&lt;br&gt;&lt;small&gt;&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Spoken Dialog Systems and Analysis of Conversation\"\n                data-category-ids=\"1051\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Speech and Human-Robot Interaction<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">F11<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4295\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4295\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech Synthesis Prosody\"\n                data-time=\"10:00-12:00\"\n                data-room=\"D8\"\n                data-room-id=\"1062\"\n                data-room-name=\"D8\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mirjam Wester; Prasanta Ghosh&lt;br&gt;&lt;br&gt;10.00-10.20 - An RNN-based Quantized F0 Model with Multi-tier Feedback Links for Text-to-Speech Synthesis&lt;br&gt;&lt;small&gt;Xin Wang; Shinji Takaki; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Phrase break prediction for long-form reading TTS: exploiting the text structure information&lt;br&gt;&lt;small&gt;Viacheslav Klimkov; Adam Nadolski; Alexis Moinet; Bartosz Putrycz; Roberto Barra-Chicote; Thomas Merritt; Thomas Drugman&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Physically constrained statistical F0 prediction for electrolaryngeal speech enhancement&lt;br&gt;&lt;small&gt;Kou Tanaka; Hirokazu Kameoka; Tomoki Toda; Satoshi Nakamura&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - DNN-SPACE: DNN-HMM-based Generative Model of Voice $F_0$ Contours for Statistical Phrase\/Accent Command Estimation&lt;br&gt;&lt;small&gt;Nobukatsu Hojo; Ohsugi Yasuhito; Yusuke Ijima; Hirokazu Kameoka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Controlling prominence realisation in parametric DNN-based speech synthesis.&lt;br&gt;&lt;small&gt;Zofia Malisz; Harald Berthelsen; Jonas Beskow; Joakim Gustafson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Increasing Recall of Lengthening Detection via Semi-Automatic Classification&lt;br&gt;&lt;small&gt;Simon Betz; Jana Vo\u00dfe; Sina Zarrie\u00df; Petra Wagner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Synthesis and Spoken Language Generation\"\n                data-category-ids=\"1059\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech Synthesis Prosody<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">D8<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4292\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4292\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Models of Speech Production\"\n                data-time=\"10:00-12:00\"\n                data-room=\"A2\"\n                data-room-id=\"1064\"\n                data-room-name=\"A2\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Marcin Wlodarczak; Daryush Mehta&lt;br&gt;&lt;br&gt;10.00-10.20 - Functional principal component analysis of vocal tract area functions&lt;br&gt;&lt;small&gt;Jorge Lucero&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Analysis of acoustic-to-articulatory speech inversion across different accents and languages&lt;br&gt;&lt;small&gt;Ganesh Sivaraman; Carol Espy-Wilson; Martijn Wieling&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Integrated mechanical model of [r]-[l] and [b]-[m]-[w] producing consonant cluster [br]&lt;br&gt;&lt;small&gt;Takayuki Arai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - A Speaker Adaptive DNN Training Approach for Speaker-independent Acoustic Inversion&lt;br&gt;&lt;small&gt;Leonardo Badino; Luca Franceschi; Raman Arora; Michele Donini; Massimiliano Pontil&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Acoustic-to-articulatory mapping based on mixture of probabilistic canonical correlation analysis&lt;br&gt;&lt;small&gt;Hidetsugu Uchida; Daisuke Saito; Nobuaki Minematsu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Test-retest repeatability of articulatory strategies using real-time magnetic resonance imaging&lt;br&gt;&lt;small&gt;Tanner Sorensen; Asterios Toutios; Johannes Toger; Louis Goldstein; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Perception, Production and Acquisition\"\n                data-category-ids=\"1055\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Models of Speech Production<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">A2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4317\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4317\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Show & Tell 3\"\n                data-time=\"10:00-12:00\"\n                data-room=\"E306\"\n                data-room-id=\"1070\"\n                data-room-name=\"E306\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"10.00-12.00 - Applications of the BBN Sage Speech Processing Platform&lt;br&gt;&lt;small&gt;Ralf Meermeier; Sean Colbath&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - A Signal Processing Approach for Speaker Separation using SFF Analysis&lt;br&gt;&lt;small&gt;Nivedita Chennupati; Narayana Murthy BHVS; Bayya Yegnanarayana&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Bob Speaks Kaldi&lt;br&gt;&lt;small&gt;Milos Cernak; Alain Komaty; Amir Mohammadi; Andre Anjos; Sebastien Marcel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - MetaLab: A repository for meta-analyses on language development, and more&lt;br&gt;&lt;small&gt;Sho Tsuji; Christina Bergmann; Molly Lewis; Mika Braginsky; Page Piccinini; Michael C. Frank; Alejandrina Cristia&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Real time pitch shifting with formant structure preservation using the phase vocoder&lt;br&gt;&lt;small&gt;Micha\u0142 Lenarczyk&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Speech Recognition and Understanding on Hardware-Accelerated DSP&lt;br&gt;&lt;small&gt;Georg Stemmer; Munir Georges; Joachim Hofer; Piotr Rozen; Josef Bauer; Jakub Nowicki; Tobias Bocklet; Hannah Colett; Ohad Falik; Michael Deisher; Sylvia Downing&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Show & Tell\"\n                data-category-ids=\"1063\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #EEA2A2;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Show & Tell 3<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E306<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4309\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4309\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speaker Characterization and Recognition\"\n                data-time=\"10:00-12:00\"\n                data-room=\"Poster 2\"\n                data-room-id=\"1067\"\n                data-room-name=\"Poster 2\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Michael Wagner&lt;br&gt;&lt;br&gt;10.00-12.00 - A Distribution Free Formulation of the Total Variability Model&lt;br&gt;&lt;small&gt;Ruchir Travadi; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Alternative Approaches to Neural Network based Speaker Verification&lt;br&gt;&lt;small&gt;Anna Silnova; Lukas Burget; Jan \u010cernock\u00fd&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Analysis of Score Normalization in Multilingual Speaker Recognition&lt;br&gt;&lt;small&gt;Pavel Matejka; Oldrich Plchot; Ond\u0159ej Novotn\u00fd; Lukas Burget; Mireia Diez S\u00e1nchez; Jan \u010cernock\u00fd&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Deep Speaker Feature Learning for Text-independent Speaker Verification&lt;br&gt;&lt;small&gt;Lantian Li; Yixiang Chen; Ying Shi; Zhiyuan Tang; Dong Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Domain mismatch modeling of out-domain i-vectors for PLDA speaker verification&lt;br&gt;&lt;small&gt;Md Hafizur Rahman; Ivan Himawan; David Dean; Sridha Sridharan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Duration mismatch compensation using four-covariance model and deep neural network for speaker verification&lt;br&gt;&lt;small&gt;Pierre-Michel Bousquet; Mickael Rouvier&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Extended Variability Modeling and Unsupervised Adaptation for PLDA Speaker Recognition&lt;br&gt;&lt;small&gt;Alan McCree; Greg Sell; Daniel Garcia-Romero&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Improving the Effectiveness of Speaker Verification Domain Adaptation With Inadequate In-Domain Data&lt;br&gt;&lt;small&gt;Jonas Borgstrom; Elliot Singer; Douglas Reynolds; Seyed Omid Sadjadi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - I-Vector DNN Scoring and Calibration for Noise Robust Speaker Verification&lt;br&gt;&lt;small&gt;Zhili Tan; Manwai Mak&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Speaker Verification via Estimating Total Variability Space Using Probabilistic Partial Least Squares&lt;br&gt;&lt;small&gt;Chen Chen; Jiqing Han; Yilin Pan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speaker and Language Identification\"\n                data-category-ids=\"1054\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speaker Characterization and Recognition<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4290\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4290\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Neural Network Acoustic Models for ASR 1\"\n                data-time=\"10:00-12:00\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mathew Magimai-Doss; Jan \u010cernock\u00fd&lt;br&gt;&lt;br&gt;10.00-10.20 - A Comparison of Sequence-to-Sequence Models for Speech Recognition&lt;br&gt;&lt;small&gt;Rohit Prabhavalkar; Kanishka Rao; Tara Sainath; Bo Li; Leif Johnson; Navdeep Jaitly&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - CTC in the Context of Generalized Full-Sum HMM Training&lt;br&gt;&lt;small&gt;Albert Zeyer; Eugen Beck; Ralf Schl\u00fcter; Hermann Ney&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Advances in Joint CTC-Attention based End-to-End Speech Recognition with a Deep CNN Encoder and RNN-LM&lt;br&gt;&lt;small&gt;Takaaki Hori; Shinji Watanabe; Yu Zhang; William Chan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Multitask Learning with CTC and Segmental CRF for Speech Recognition&lt;br&gt;&lt;small&gt;Liang Lu; Lingpeng Kong; Chris Dyer; Noah Smith&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Direct Acoustics-to-Word Models for English Conversational Speech Recognition&lt;br&gt;&lt;small&gt;Kartik Audhkhasi; Bhuvana Ramabhadran; George Saon; Michael Picheny; David Nahamoo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Reducing the Computational Complexity of Two-Dimensional LSTMs&lt;br&gt;&lt;small&gt;Bo Li; Tara Sainath&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n                data-category-ids=\"1058\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Neural Network Acoustic Models for ASR 1<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4294\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4294\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Phonation and Voice Quality\"\n                data-time=\"10:00-12:00\"\n                data-room=\"C6\"\n                data-room-id=\"1066\"\n                data-room-name=\"C6\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Peter Birkholz; Kikuo Maekawa&lt;br&gt;&lt;br&gt;10.00-10.20 - Creak as a feature of lexical stress in Estonian&lt;br&gt;&lt;small&gt;K\u00e4tlin Aare; P\u00e4rtel Lippus; Juraj \u0160imko&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Cross-speaker Variation in Voice Source Correlates of Focus and Deaccentuation&lt;br&gt;&lt;small&gt;Irena Yanushevskaya; Ailbhe N\u00ed Chasaide; Christer Gobl&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Acoustic Characterization of Word-final Glottal Stops in Mizo and Assam Sora&lt;br&gt;&lt;small&gt;Sishir Kalita; Wendy Lalhminghlui; Luke Horo; Priyankoo Sarmah; S R Mahadeva Prasanna; Samarendra Dandapat&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Iterative Optimal Preemphasis for Improved Glottal-Flow Estimation by Iterative Adaptive Inverse Filtering&lt;br&gt;&lt;small&gt;Parham Mokhtari; Hiroshi Ando&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Automatic Measurement of Pre-aspiration&lt;br&gt;&lt;small&gt;Yaniv Sheena; Michaela Hejna; Yossi Adi; Joseph Keshet&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Acoustic and electroglottographic study of breathy and modal vowels as produced by heritage and native Gujarati speakers&lt;br&gt;&lt;small&gt;Kiranpreet Nara&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Phonetics, Phonology, and Prosody\"\n                data-category-ids=\"1056\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Phonation and Voice Quality<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">C6<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5239\">\n\n              <div class='time-header'>12:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"5239\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Lunch\"\n                data-time=\"12:00-13:30\"\n                data-room=\"Various locations\"\n                data-room-id=\"1079\"\n                data-room-name=\"Various locations\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Misc\"\n                data-category-ids=\"1068\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Lunch<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Various locations<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"12:00\">12:00-13:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4299\">\n\n              <div class='time-header'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4299\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Source Separation and Auditory Scene Analysis\"\n                data-time=\"13:30-15:30\"\n                data-room=\"B4\"\n                data-room-id=\"1065\"\n                data-room-name=\"B4\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mahadeva Prasanna; G\u00e9za N\u00e9meth&lt;br&gt;&lt;br&gt;13.30-13.50 - A Maximum Likelihood Approach to Deep Neural Network Based Nonlinear Spectral Mapping for Single-Channel Speech Separation&lt;br&gt;&lt;small&gt;Yannan Wang; Jun Du; Lirong Dai; Chin-Hui Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Deep clustering-based beamforming for separation with unknown number of sources&lt;br&gt;&lt;small&gt;Takuya Higuchi; Keisuke Kinoshita; Marc Delcroix; Katerina Zmolikova; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Time-frequency masking for blind source separation with preserved spatial cues&lt;br&gt;&lt;small&gt;Shadi Pirhosseinloo; Kostas Kokkinakis&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Variational Recurrent Neural Networks for Speech Separation&lt;br&gt;&lt;small&gt;Jen-Tzung Chien; Kuan-Ting Kuo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Detecting overlapped speech on short timeframes using deep learning&lt;br&gt;&lt;small&gt;Valentin Andrei; Horia Cucu; Corneliu Burileanu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Ideal ratio mask estimation using deep neural networks for monaural speech segregation in noisy reverberant conditions&lt;br&gt;&lt;small&gt;Xu Li; Junfeng Li; Yonghong Yan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Speech and Audio Signals\"\n                data-category-ids=\"1062\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Source Separation and Auditory Scene Analysis<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4310\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4310\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Acoustic Models for ASR 1\"\n                data-time=\"13:30-15:30\"\n                data-room=\"Poster 1\"\n                data-room-id=\"1061\"\n                data-room-name=\"Poster 1\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Michiel Bacchiani&lt;br&gt;&lt;br&gt;13.30-15.30 - A Comparative Evaluation of GMM-Free State Tying Methods for ASR&lt;br&gt;&lt;small&gt;Tam\u00e1s Gr\u00f3sz; G\u00e1bor Gosztolya; L\u00e1szl\u00f3 T\u00f3th&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - An exploration of dropout with LSTMs&lt;br&gt;&lt;small&gt;Gaofeng Cheng; Vijayaditya Peddinti; Dan Povey; Vimal Manohar; Sanjeev Khudanpur; Yonghong Yan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Convolutional Recurrent Neural Networks for Small-Footprint Keyword Spotting&lt;br&gt;&lt;small&gt;Sercan Arik; Markus Kliegl; Rewon Child; Joel Hestness; Andrew Gibiansky; Chris Fougner; Ryan Prenger; Adam Coates&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Deep Activation Mixture Model for Speech Recognition&lt;br&gt;&lt;small&gt;Chunyang Wu; Mark Gales&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Ensembles of Multi-scale VGG Acoustic Models&lt;br&gt;&lt;small&gt;Michael Heck; Masayuki Suzuki; Takashi Fukuda; Gakuto Kurata; Satoshi Nakamura&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Forward-backward Convolutional LSTM for Acoustic Modeling&lt;br&gt;&lt;small&gt;Shigeki Karita; Atsunori Ogawa; Marc Delcroix; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Residual LSTM: Design of a Deep Recurrent Architecture for Distant Speech Recognition&lt;br&gt;&lt;small&gt;Jaeyoung Kim; Mostafa El-Khamy; Jungwon Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Training Context-Dependent DNN Acoustic Models using Probabilistic Sampling&lt;br&gt;&lt;small&gt;Tam\u00e1s Gr\u00f3sz; G\u00e1bor Gosztolya; L\u00e1szl\u00f3 T\u00f3th&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - UNFOLDED DEEP RECURRENT CONVOLUTIONAL NEURAL NETWORK WITH JUMP AHEAD CONNECTIONS FOR ACOUSTIC MODELING&lt;br&gt;&lt;small&gt;Tien Dung Tran; Marc Delcroix; Shigeki Karita; Michael Hentschel; Atsunori Ogawa; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n                data-category-ids=\"1058\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Acoustic Models for ASR 1<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 1<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4319\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4319\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Show & Tell 3\"\n                data-time=\"13:30-15:30\"\n                data-room=\"E306\"\n                data-room-id=\"1070\"\n                data-room-name=\"E306\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"13.30-15.30 - Applications of the BBN Sage Speech Processing Platform&lt;br&gt;&lt;small&gt;Ralf Meermeier; Sean Colbath&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - A Signal Processing Approach for Speaker Separation using SFF Analysis&lt;br&gt;&lt;small&gt;Nivedita Chennupati; Narayana Murthy BHVS; Bayya Yegnanarayana&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Bob Speaks Kaldi&lt;br&gt;&lt;small&gt;Milos Cernak; Alain Komaty; Amir Mohammadi; Andre Anjos; Sebastien Marcel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - MetaLab: A repository for meta-analyses on language development, and more&lt;br&gt;&lt;small&gt;Sho Tsuji; Christina Bergmann; Molly Lewis; Mika Braginsky; Page Piccinini; Michael C. Frank; Alejandrina Cristia&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Real time pitch shifting with formant structure preservation using the phase vocoder&lt;br&gt;&lt;small&gt;Micha\u0142 Lenarczyk&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Speech Recognition and Understanding on Hardware-Accelerated DSP&lt;br&gt;&lt;small&gt;Georg Stemmer; Munir Georges; Joachim Hofer; Piotr Rozen; Josef Bauer; Jakub Nowicki; Tobias Bocklet; Hannah Colett; Ohad Falik; Michael Deisher; Sylvia Downing&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Show & Tell\"\n                data-category-ids=\"1063\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #EEA2A2;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Show & Tell 3<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E306<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4312\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4312\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Dialog Modeling\"\n                data-time=\"13:30-15:30\"\n                data-room=\"Poster 3\"\n                data-room-id=\"1069\"\n                data-room-name=\"Poster 3\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Kristiina Jokinen&lt;br&gt;&lt;br&gt;13.30-15.30 - Analysis of the Relationship between Prosodic Features of Fillers and Its Forms or Occurrence Positions&lt;br&gt;&lt;small&gt;Shizuka Nakamura; Ryosuke Nakanishi; Katsuya Takanashi; Tatsuya Kawahara&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - A Turn-taking Estimation Model based on Joint Embedding of Lexical and Prosodic Contents&lt;br&gt;&lt;small&gt;Chaoran Liu; Carlos Ishi; Hiroshi Ishiguro&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Cross-Subject Continuous Emotion Recognition using Speech and Body Motion in Dyadic Interactions&lt;br&gt;&lt;small&gt;Syeda Narjis Fatima; Engin Erzin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Domain-independent User Satisfaction Reward Estimation for Dialogue Policy Learning&lt;br&gt;&lt;small&gt;Stefan Ultes; Pawe\u0142 Budzianowski; I\u00f1igo Casanueva; Nikola Mrk\u0161i\u0107; Lina M. Rojas Barahona; Pei-Hao Su; Tsung-Hsien Wen; Milica Gasic; Steve Young&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - End-of-Utterance Prediction by Prosodic Features and Phrase-Dependency Structure in Spontaneous Japanese Speech&lt;br&gt;&lt;small&gt;Yuichi Ishimoto; Takehiro Teraoka; Mika Enomoto&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Entrainment in Multi-Party Spoken Dialogues at Multiple Linguistic Levels&lt;br&gt;&lt;small&gt;Zahra Rahimi; Anish Kumar; Diane Litman; Susannah Paletz; Mingzhi Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Hierarchical LSTMs with Joint Learning for Estimating Customer Satisfaction from Contact Center Calls&lt;br&gt;&lt;small&gt;Atsushi Ando; Ryo Masumura; Hosana Kamiyama; Satoshi Kobashikawa; Yushi Aono&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Improving prediction of speech activity using multi-participant respiratory state&lt;br&gt;&lt;small&gt;Marcin Wlodarczak; Kornel Laskowski; Mattias Heldner; K\u00e4tlin Aare&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Issues in Human and Automated Scoring of Fluency, Pronunciation and Intonation During Human--Machine Spoken Dialog Interactions&lt;br&gt;&lt;small&gt;Vikram Ramanarayanan; Patrick Lange; Keelan Evanini; Hillary Molloy; David Suendermann-Oeft&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Measuring Synchrony in Task-based Dialogues&lt;br&gt;&lt;small&gt;Justine Reverdy; Carl Vogel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Online End-of-Turn Detection from Speech based on Stacked Time-Asynchronous Sequential Networks&lt;br&gt;&lt;small&gt;Ryo Masumura; Taichi Asami; Hirokazu Masataki; Ryo Ishii; Ryuichiro Higashinaka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Sequence to Sequence Modeling for User Simulation in Dialog Systems&lt;br&gt;&lt;small&gt;Paul Crook; Alex Marin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Social Signal Detection in Spontaneous Dialogue Using Bidirectional LSTM-CTC&lt;br&gt;&lt;small&gt;Hirofumi Inaguma; Koji Inoue; Masato Mimura; Tatsuya Kawahara&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Towards Deep End-of-Turn Prediction for Situated Spoken Dialogue Systems&lt;br&gt;&lt;small&gt;Angelika Maier; Julian Hough; David Schlangen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Turn-Taking Offsets and Dialogue Context&lt;br&gt;&lt;small&gt;Peter Heeman; Rebecca Lunsford&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Spoken Dialog Systems and Analysis of Conversation\"\n                data-category-ids=\"1051\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Dialog Modeling<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 3<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4297\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4297\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Voice Conversion 1\"\n                data-time=\"13:30-15:30\"\n                data-room=\"E10\"\n                data-room-id=\"1060\"\n                data-room-name=\"E10\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Hema Murthy; S R M Prasanna&lt;br&gt;&lt;br&gt;13.30-13.50 - Voice Conversion Using Sequence-to-Sequence Learning of Context Posterior Probabilities&lt;br&gt;&lt;small&gt;Hiroyuki Miyoshi; Yuki Saito; Shinnosuke Takamichi; Hiroshi Saruwatari&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Learning Latent Representations for Speech Generation and Transformation&lt;br&gt;&lt;small&gt;Wei-Ning Hsu; Yu Zhang; James Glass&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Parallel-data-free Many-to-many Voice Conversion based on DNN Integrated with Eigenspace Using a Non-parallel Speech Corpus&lt;br&gt;&lt;small&gt;Tetsuya Hashimoto; Hidetsugu Uchida; Daisuke Saito; Nobuaki Minematsu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Sequence-to-Sequence Voice Conversion with Similarity Metric Learned Using Generative Adversarial Networks&lt;br&gt;&lt;small&gt;Takuhiro Kaneko; Hirokazu Kameoka; Kaoru Hiramatsu; Kunio Kashino&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - A mouth opening effect based on pole modification for expressive singing voice transformation&lt;br&gt;&lt;small&gt;Luc Ardaillon; Axel Roebel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Siamese Autoencoders for Speech Style Extraction and Switching Applied to Voice Identification and Conversion&lt;br&gt;&lt;small&gt;Seyed Hamidreza Mohammadi; Alexander Kain&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Synthesis and Spoken Language Generation\"\n                data-category-ids=\"1059\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Voice Conversion 1<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E10<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4320\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4320\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Show & Tell 4\"\n                data-time=\"13:30-15:30\"\n                data-room=\"E397\"\n                data-room-id=\"1071\"\n                data-room-name=\"E397\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"13.30-15.30 - Combining Gaussian mixture models and segmental feature models for speaker recognition&lt;br&gt;&lt;small&gt;Milana Milo\u0161evi\u0107; Ulrike Glavitsch&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Did you laugh enough today? - Deep Neural Networks for Mobile and Wearable Laughter Trackers&lt;br&gt;&lt;small&gt;Gerhard Hagerer; Nicholas Cummins; Florian Eyben; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Evolving recurrent neural networks that process and classify raw audio in a streaming fashion&lt;br&gt;&lt;small&gt;Adrien DANIEL&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Low-Frequency Ultrasonic Communication for Speech Broadcasting in Public Transportation&lt;br&gt;&lt;small&gt;Kwang Myung Jeon; Nam Kyun Kim; Chan Woong Kwak; Jung Min Moon; Hong Kook Kim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Reading validation for pronunciation evaluation in the Digitala project&lt;br&gt;&lt;small&gt;Aku Rouhe; Reima Karhila; Peter Smit; Mikko Kurimo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Real-time Speech Enhancement with GCC-NMF: Demonstration on the Raspberry Pi and NVIDIA Jetson&lt;br&gt;&lt;small&gt;Sean Wood; Jean Rouat&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Show & Tell\"\n                data-category-ids=\"1063\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #EEA2A2;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Show & Tell 4<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E397<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4322\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4322\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speaker Recognition for the Next Decade\"\n                data-time=\"13:30-15:30\"\n                data-room=\"B3\"\n                data-room-id=\"1072\"\n                data-room-name=\"B3\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4322.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Special event\"\n                data-category-ids=\"1064\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speaker Recognition for the Next Decade<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B3<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4296\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4296\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"WaveNet and Novel Paradigms\"\n                data-time=\"13:30-15:30\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Peter Cahill; Rob Clark&lt;br&gt;&lt;br&gt;13.30-13.50 - Speaker-dependent WaveNet vocoder&lt;br&gt;&lt;small&gt;Akira Tamamori; Tomoki Hayashi; Kazuhiro Kobayashi; Kazuya Takeda; Tomoki Toda&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Waveform Modeling Using Stacked Dilated Convolutional Neural Networks for Speech Bandwidth Extension&lt;br&gt;&lt;small&gt;Yu Gu; Zhen-Hua Ling&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Direct modeling of frequency spectra and waveform generation based on phase recovery for DNN-based speech synthesis&lt;br&gt;&lt;small&gt;Shinji Takaki; Hirokazu Kameoka; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - A Hierarchical Encoder-Decoder Model for Statistical parametric speech synthesis&lt;br&gt;&lt;small&gt;Srikanth Ronanki; Oliver Watts; Simon King&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Statistical voice conversion with WaveNet-based waveform generation&lt;br&gt;&lt;small&gt;Kazuhiro Kobayashi; Tomoki Hayashi; Akira Tamamori; Tomoki Toda&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Google\u2019s Next-Generation Real-Time Unit-Selection Synthesizer using Sequence-To-Sequence LSTM-based Autoencoders&lt;br&gt;&lt;small&gt;Vincent Wan; Yannis Agiomyrgiannakis; Hanna Silen; Jakub Vit&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Synthesis and Spoken Language Generation\"\n                data-category-ids=\"1059\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>WaveNet and Novel Paradigms<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4325\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4325\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Incremental Processing and Responsive Behaviour\"\n                data-time=\"13:30-15:30\"\n                data-room=\"F11\"\n                data-room-id=\"1059\"\n                data-room-name=\"F11\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Timo Baumann; Ingmar Steiner&lt;br&gt;&lt;br&gt;13.30-13.45 - Introduction&lt;br&gt;&lt;small&gt;Timo Baumann; Thomas Hueber; David Schlangen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.45-14.00 - Adjusting the Frame: Biphasic Performative Control of Speech Rhythm&lt;br&gt;&lt;small&gt;Samuel Delalez; Christophe d'Alessandro&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.00-14.15 - Attentional factors in listeners' uptake of gesture cues during speech processing&lt;br&gt;&lt;small&gt;Raheleh Saryazdi; Craig Chambers&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.15-14.30 - Motion analysis in vocalized surprise expressions&lt;br&gt;&lt;small&gt;Carlos Ishi; Takashi Minato; Hiroshi Ishiguro&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.45 - Enhancing Backchannel Prediction Using Word Embeddings&lt;br&gt;&lt;small&gt;Robin R\u00fcde; Markus M\u00fcller; Sebastian St\u00fcker; Alex Waibel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.45-15.00 - A Computational Model for Phonetically Responsive Spoken Dialogue Systems&lt;br&gt;&lt;small&gt;Eran Raveh; Ingmar Steiner; Bernd M\u00f6bius&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.00-15.15 - Incremental Dialogue Act Recognition: token- vs chunk-based classification&lt;br&gt;&lt;small&gt;Eustace Ebhotemhen; Volha Petukhova; Dietrich Klakow&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.15-15.30 - Discussion&lt;br&gt;&lt;small&gt;&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Spoken Dialog Systems and Analysis of Conversation\"\n                data-category-ids=\"1051\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Incremental Processing and Responsive Behaviour<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">F11<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4301\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4301\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Emotion Modeling\"\n                data-time=\"13:30-15:30\"\n                data-room=\"D8\"\n                data-room-id=\"1062\"\n                data-room-name=\"D8\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Koichi Shinoda; Anton  Batliner&lt;br&gt;&lt;br&gt;13.30-13.50 - Speech Emotion Recognition with Emotion-Pair based Framework Considering Emotion Distribution Information in Dimensional Emotion Space&lt;br&gt;&lt;small&gt;Xi Ma; Zhiyong Wu; Jia Jia; Mingxing Xu; Helen Meng; Lianhong Cai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Adversarial Auto-encoders for Speech Based Emotion Recognition&lt;br&gt;&lt;small&gt;Saurabh Sahu; Rahul Gupta; Ganesh Sivaraman; Wael Abdalmageed; Carol Espy-Wilson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - An Investigation of Emotion Prediction Uncertainty Using Gaussian Mixture Regression&lt;br&gt;&lt;small&gt;Ting Dang; Vidhyasaharan Sethu; Julien Epps; Eliathamby Ambikairajah&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Capturing Long-term Temporal Dependencies with Convolutional Networks for Continuous Emotion Recognition&lt;br&gt;&lt;small&gt;Soheil Khorram; Zakaria Aldeneh; Dimitrios Dimitriadis; Melvin McInnis; Emily Mower Provost&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Voice-to-affect mapping: inferences on language voice baseline settings&lt;br&gt;&lt;small&gt;Ailbhe N\u00ed Chasaide; Irena Yanushevskaya; Christer Gobl&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Attentive Convolutional Neural Network based Speech Emotion Recognition: A Study on the Impact of Input Features, Signal Length, and Acted Speech&lt;br&gt;&lt;small&gt;Michael Neumann; Ngoc Thang Vu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Paralinguistics in Speech and Language\"\n                data-category-ids=\"1052\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Emotion Modeling<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">D8<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4300\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4300\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Prosody: Tone and Intonation\"\n                data-time=\"13:30-15:30\"\n                data-room=\"C6\"\n                data-room-id=\"1066\"\n                data-room-name=\"C6\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mariapaola D'Imperio; Oliver Niebuhr&lt;br&gt;&lt;br&gt;13.30-13.50 - The Vocative Chant and Beyond: German Calling Melodies under Routine and Urgent Contexts&lt;br&gt;&lt;small&gt;Sergio Quiroz; Marzena Zygis&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Comparing languages using hierarchical prosodic analysis&lt;br&gt;&lt;small&gt;Juraj \u0160imko; Antti Suni; Katri Hiovain; Martti Vainio&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Intonation Facilitates Prediction of Focus even in the Presence of Lexical Tones&lt;br&gt;&lt;small&gt;Martin Ho Kwan Ip; Anne Cutler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Mind the peak: When museum is temporarily understood as musical in Australian English&lt;br&gt;&lt;small&gt;Katharina Zahner; Heather Kember; Bettina Braun&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Pashto intonation patterns&lt;br&gt;&lt;small&gt;Luca Rognoni; Judith Bishop; Miriam Corris&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - A new model of final lowering in spontaneous monologue&lt;br&gt;&lt;small&gt;Kikuo Maekawa&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Phonetics, Phonology, and Prosody\"\n                data-category-ids=\"1056\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Prosody: Tone and Intonation<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">C6<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4311\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4311\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Acoustic Models for ASR 2\"\n                data-time=\"13:30-15:30\"\n                data-room=\"Poster 2\"\n                data-room-id=\"1067\"\n                data-room-name=\"Poster 2\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Karen Livescu&lt;br&gt;&lt;br&gt;13.30-15.30 - Acoustic feature learning with deep variational canonical correlation analysis&lt;br&gt;&lt;small&gt;Qingming Tang; Weiran Wang; Karen Livescu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - An Efficient Phone N-gram Forward-backward Computation Using Dense Matrix Multiplication&lt;br&gt;&lt;small&gt;Khe Chai Sim; Arun Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Backstitch: Counteracting Finite-sample Bias via Negative Steps&lt;br&gt;&lt;small&gt;Yiming Wang; Vijayaditya Peddinti; Hainan Xu; Xiaohui Zhang; Dan Povey; Sanjeev Khudanpur&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - End-to-End Training of Acoustic Models for Large Vocabulary Continuous Speech Recognition with TensorFlow&lt;br&gt;&lt;small&gt;Ehsan Variani; Tom Bagby; Erik McDermott; Michiel Bacchiani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Node pruning based on Entropy of Weights and Node Activity for Small-footprint Acoustic Model based on Deep Neural Networks&lt;br&gt;&lt;small&gt;Ryu Takeda; Kazuhiro Nakadai; Kazunori Komatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Parallel Neural Network Features for Improved Tandem Acoustic Modeling&lt;br&gt;&lt;small&gt;Zolt\u00e1n T\u00fcske; Wilfried Michel; Ralf Schl\u00fcter; Hermann Ney&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n                data-category-ids=\"1058\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Acoustic Models for ASR 2<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4298\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4298\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Models of Speech Perception\"\n                data-time=\"13:30-15:30\"\n                data-room=\"A2\"\n                data-room-id=\"1064\"\n                data-room-name=\"A2\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Chris Davis; Frank Zimmerer&lt;br&gt;&lt;br&gt;13.30-13.50 - A Comparison of Sentence-level Speech Intelligibility Metrics&lt;br&gt;&lt;small&gt;Alexander Kain; Max Del Giudice; Kris Tjaden&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - An auditory model of speaker size perception for voiced speech sounds&lt;br&gt;&lt;small&gt;Toshio Irino; Eri Takimoto; Toshie Matsui; Roy Patterson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - The recognition of compounds: a computational account&lt;br&gt;&lt;small&gt;Louis ten Bosch; Lou Boves; Mirjam Ernestus&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Humans do not maximize the probability of correct decision when recognizing DANTALE words in noise&lt;br&gt;&lt;small&gt;Mohsen Zareian Jahromi; Jan \u00d8stergaard; Jesper Jensen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Single-ended prediction of listening effort based on automatic speech recognition&lt;br&gt;&lt;small&gt;Rainer Huber; Constantin Spille; Bernd T. Meyer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Modeling categorical perception with the receptive fields of auditory neurons&lt;br&gt;&lt;small&gt;Chris Neufeld&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Perception, Production and Acquisition\"\n                data-category-ids=\"1055\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Models of Speech Perception<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">A2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5258\">\n\n              <div class='time-header'>15:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"5258\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Refreshments\"\n                data-time=\"15:30-16:00\"\n                data-room=\"Various locations\"\n                data-room-id=\"1079\"\n                data-room-name=\"Various locations\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Misc\"\n                data-category-ids=\"1068\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Refreshments<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Various locations<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"15:30\">15:30-16:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4314\">\n\n              <div class='time-header'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4314\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Voice, Speech and Hearing Disorders\"\n                data-time=\"16:00-18:00\"\n                data-room=\"Poster 2\"\n                data-room-id=\"1067\"\n                data-room-name=\"Poster 2\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Timothy Bunnell&lt;br&gt;&lt;br&gt;16.00-18.00 - Apkinson \u2013 A mobile monitoring solution for Parkinson\u2019s disease&lt;br&gt;&lt;small&gt;Philipp Klumpp; Thomas Janu; Tom\u00e1s Arias-Vergara; Juan Camilo V\u00e1squez Correa; Juan Rafael Orozco-Arroyave; Elmar Noeth&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Automatic Prediction of Speech Evaluation Metrics for Dysarthric Speech&lt;br&gt;&lt;small&gt;Imed Laaridh; Waad Ben Kheder; Corinne Fredouille; Christine Meunier&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Cepstral and entropy analyses in vowels excerpted from continuous speech of dysphonic and control speakers&lt;br&gt;&lt;small&gt;Antonella Castellana; Andreas Selamtzis; Giampiero Salvi; Alessio Carullo; Arianna Astolfi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Classification of bulbar ALS from kinematic features of the jaw and lips: Towards computer-mediated assessment&lt;br&gt;&lt;small&gt;Andrea Bandini; Jordan Green; Lorne Zinman; Yana Yunusova&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Deep Autoencoder based Speech Features for Improved Dysarthric Speech Recognition&lt;br&gt;&lt;small&gt;Bhavik Vachhani; Chitralekha Bhat; Biswajit Das; Sunil Kumar Kopparapu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Dysprosody differentiate between Parkinson\u2019s disease, progressive supranuclear palsy, and multiple system atrophy&lt;br&gt;&lt;small&gt;Jan Hlavni\u010dka; Tereza Tykalov\u00e1; Roman \u010cmejla; Ji\u0159\u00ed Klemp\u00ed\u0159; Ev\u017een R\u016f\u017ei\u010dka; Jan Rusz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Hypernasality Severity Analysis in Cleft Lip and Palate Speech Using Vowel Space Area.&lt;br&gt;&lt;small&gt;Nikitha K; Sishir Kalita; CM Vikram; M. Pushpavathi; S R Mahadeva Prasanna&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Interpretable Objective Assessment of Dysarthric Speech based on Deep Neural Networks&lt;br&gt;&lt;small&gt;Ming Tu; Visar Berisha; Julie Liss&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Prediction of Speech Delay from Acoustic Measurements&lt;br&gt;&lt;small&gt;Jason Lilley; Madhavi Ratnagiri; H Timothy Bunnell&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Production of sustained vowels and categorical perception of tones in Mandarin among cochlear-implanted children&lt;br&gt;&lt;small&gt;Wentao Gu; Jiao Yin; James Mahshie&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - The Frequency Range of \u201cThe Ling Six Sounds\u201d in Standard Chinese&lt;br&gt;&lt;small&gt;Aijun Li; Hua Zhang; Wen Sun&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Zero Frequency Filter Based Analysis of Voice Disorders&lt;br&gt;&lt;small&gt;Nagaraj Adiga; Vikram C M; Keerthi Pullela; S R Mahadeva Prasanna&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Perception, Production and Acquisition\"\n                data-category-ids=\"1055\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Voice, Speech and Hearing Disorders<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4302\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4302\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Neural Network Acoustic Models for ASR 2\"\n                data-time=\"16:00-18:00\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mark Gales; Tara Sainath&lt;br&gt;&lt;br&gt;16.00-16.20 - Recurrent Neural Aligner: An Encoder-Decoder Neural Network Model for Sequence to Sequence Mapping&lt;br&gt;&lt;small&gt;Hasim Sak; Matt Shannon; Kanishka Rao; Francoise Beaufays&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Highway-LSTM and Recurrent Highway Networks for Speech Recognition&lt;br&gt;&lt;small&gt;Golan Pundak; Tara Sainath&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Improving speech recognition by revising gated recurrent units&lt;br&gt;&lt;small&gt;Mirco Ravanelli; Philemon Brakel; Maurizio Omologo; Yoshua Bengio&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Stochastic Recurrent Neural Network for Speech Recognition&lt;br&gt;&lt;small&gt;Jen-Tzung Chien; Chen Shen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - Frame and Segment Level Recurrent Neural Networks for Phone Classification&lt;br&gt;&lt;small&gt;Martin Ratajczak; Sebastian Tschiatschek; Franz Pernkopf&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Deep Learning-based Telephony Speech Recognition in the Wild&lt;br&gt;&lt;small&gt;Kyu Han; Seongjun Hahm; Byung-Hak Kim; Jungsuk Kim; Ian Lane&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n                data-category-ids=\"1058\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Neural Network Acoustic Models for ASR 2<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4313\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4313\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"L1 and L2 Acquisition\"\n                data-time=\"16:00-18:00\"\n                data-room=\"Poster 1\"\n                data-room-id=\"1061\"\n                data-room-name=\"Poster 1\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Aoju Chen&lt;br&gt;&lt;br&gt;16.00-18.00 - A comparison of Danish listeners\u2019 processing cost in judging the truth value of Norwegian, Swedish, and English sentences&lt;br&gt;&lt;small&gt;Ocke-Schwen Bohn; Trine Askj\u00e6r-J\u00f8rgensen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - A data-driven approach for perceptually validated acoustic features for children's sibilant fricative productions&lt;br&gt;&lt;small&gt;Patrick Reidy; Mary Beckman; Jan Edwards; Benjamin Munson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - An Automatically Aligned Corpus of Child-directed Speech&lt;br&gt;&lt;small&gt;Micha Elsner; Kiwako Ito&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Articulation rate in Swedish child-directed speech increases as a function of the age of the child even when surprisal is controlled for&lt;br&gt;&lt;small&gt;Johan Sjons; Thomas H\u00f6rberg; Robert \u00d6stling; Johannes Bjerva&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Changes in early L2 cue-weighting of non-native speech: Evidence from learners of Mandarin Chinese&lt;br&gt;&lt;small&gt;Seth Wiener&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Directing Attention during Perceptual Training: A Preliminary Study of Phonetic Learning in Southern Min by Mandarin Speakers&lt;br&gt;&lt;small&gt;Ying Chen; Eric Pederson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Lexical adaptation to a novel accent in German: A comparison between German, Swedish, and Finnish listeners&lt;br&gt;&lt;small&gt;Adriana Hanulikova; Jenny Ekstr\u00f6m&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Measuring Encoding Efficiency in Swedish and English Language Learner Speech Production&lt;br&gt;&lt;small&gt;Gintare Grigonyte; Gerold Schneider&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Mechanisms of Tone Sandhi Rule Application by Non-native Speakers&lt;br&gt;&lt;small&gt;Si Chen; YUNJUAN HE; Chun Wah Yuen; Bei Li; Yike Yang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - MMN responses in adults after exposure to bimodal and unimodal frequency distributions of rotated speech&lt;br&gt;&lt;small&gt;Ellen Marklund; El\u00edsabet Eir Cortes; Johan Sjons&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - On the role of temporal variability in the acquisition of the German vowel length contrast&lt;br&gt;&lt;small&gt;Felicitas Kleber&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Prosody analysis of L2 English for naturalness evaluation through speech modification&lt;br&gt;&lt;small&gt;Dean Luo; Ruxin Luo; Lixin Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Qualitative differences in L3 learners' neurophysiological response to L1 versus L2 transfer&lt;br&gt;&lt;small&gt;Alejandra Keidel Fern\u00e1ndez; Thomas H\u00f6rberg&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Quality Assessment of ESL Learner\u2019s Sentence Prosody with TTS Synthesized Voice as Reference&lt;br&gt;&lt;small&gt;Yujia Xiao; Frank Soong&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - The relationship between the perception and production of non-native tones&lt;br&gt;&lt;small&gt;Kaile Zhang; Gang Peng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Perception, Production and Acquisition\"\n                data-category-ids=\"1055\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>L1 and L2 Acquisition<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 1<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4315\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4315\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Source Separation and Voice Activity Detection\"\n                data-time=\"16:00-18:00\"\n                data-room=\"Poster 3\"\n                data-room-id=\"1069\"\n                data-room-name=\"Poster 3\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Tom B\u00e4ckstr\u00f6m&lt;br&gt;&lt;br&gt;16.00-18.00 - A Contrast Function and Algorithm for Blind Separation of Audio Signals&lt;br&gt;&lt;small&gt;Wei Gao; Roberto Togneri; Victor Sreeram&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - A Mask Estimation Method Integrating Data Field Model for Speech Enhancement&lt;br&gt;&lt;small&gt;Xianyun Wang; Changchun Bao; Feng Bao&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Audio Content based Geotagging in Multimedia&lt;br&gt;&lt;small&gt;Anurag Kumar; Benjamin Elizalde; Bhiksha Raj&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Bimodal Recurrent Neural Network for Audiovisual Voice Activity Detection&lt;br&gt;&lt;small&gt;Fei Tao; Carlos Busso&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Domain-Specific Utterance End-Point Detection for Speech Recognition&lt;br&gt;&lt;small&gt;Roland Maas; Ariya Rastrow; Kyle Goehner; Gautam Tiwari; Shaun Joseph; Bjorn Hoffmeister&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Excitation Source Features for Improving the Detection of Vowel Onset and Offset Points in a Speech Sequence&lt;br&gt;&lt;small&gt;Gayadhar Pradhan; Avinash Kumar; Syed Shahnawazuddin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Improved end-of-query detection for streaming speech recognition&lt;br&gt;&lt;small&gt;Matt Shannon; Gabor Simko; Shuo-Yiin Chang; Carolina Parada&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Improving Source Separation via Multi-Speaker Representations&lt;br&gt;&lt;small&gt;Jeroen Zegers; Hugo Van hamme&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Multiple Sound Source Counting and Localization Based on Spatial Principal Eigenvector&lt;br&gt;&lt;small&gt;Bing Yang; Hong Liu; Cheng Pang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speaker Direction-of-Arrival Estimation Based On Frequency-Independent Beampattern&lt;br&gt;&lt;small&gt;Feng Guo; Yuhang Cao; Zheng Liu; Jiaen Liang; Baoqing Li; Xiaobing Yuan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speech detection and enhancement using single microphone for distant speech applications in reverberant environments&lt;br&gt;&lt;small&gt;Vinay Kothapally; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Subband selection for binaural speech source localization&lt;br&gt;&lt;small&gt;Karthik Girija Ramesan; Prasanta Ghosh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Time Delay Histogram Based Speech Source Separation Using a Planar Array&lt;br&gt;&lt;small&gt;Zhaoqiong Huang; Zhanzhong Cao; Dongwen Ying; Jielin Pan; Yonghong Yan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Unmixing Convolutive Mixtures by Exploiting Amplitude Co-modulation: Methods and Evaluation on Mandarin Speech Recordings&lt;br&gt;&lt;small&gt;Bo-Rui Chen; Huang-Yi Lee; Yi-Wen Liu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Using Approximated Auditory Roughness as a Pre-filtering Feature for Human Screaming and Affective Speech AED&lt;br&gt;&lt;small&gt;Di He; Zuofu Cheng; Mark Hasegawa-Johnson; Deming Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Weighted Spatial Covariance Matrix Estimation for MUSIC based TDOA Estimation of Speech Source&lt;br&gt;&lt;small&gt;Chenglin Xu; Xiong Xiao; Sining Sun; Wei Rao; Eng Siong Chng; Haizhou Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Speech and Audio Signals\"\n                data-category-ids=\"1062\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Source Separation and Voice Activity Detection<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 3<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4326\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4326\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Acoustic Manifestations of Social Characteristics\"\n                data-time=\"16:00-18:00\"\n                data-room=\"F11\"\n                data-room-id=\"1059\"\n                data-room-name=\"F11\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Stefanie Jannedy; Melanie Weirich&lt;br&gt;&lt;br&gt;16.00-16.05 - Introduction&lt;br&gt;&lt;small&gt;Stefanie Jannedy; Melanie Weirich&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.05-16.25 - Clear Speech - Mere Speech? How segmental and prosodic speech reduction shape the impression that speakers create on listeners&lt;br&gt;&lt;small&gt;Oliver Niebuhr&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.25-16.45 - To see or not to see: Interlocutor visibility and likeability influence convergence in intonation&lt;br&gt;&lt;small&gt;Katrin Schweitzer; Michael Walsh; Antje Schweitzer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.45-17.05 - Acoustic correlates of parental role and gender identity in the speech of expecting parents&lt;br&gt;&lt;small&gt;Melanie Weirich; Adrian Simpson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.05-17.25 - Effects of Talker Dialect, Gender &amp; Race on Accuracy of Bing Speech and YouTube Automatic Captions&lt;br&gt;&lt;small&gt;Rachael Tatman; Conner Kasten&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.25-18.00 - A Semi-Supervised Learning Approach for Acoustic-Prosodic Personality Perception in Under-Resourced Domains&lt;br&gt;&lt;small&gt;Rub\u00e9n Solera-Ure\u00f1a; Helena Moniz; Fernando Batista; Vera Cabarrao; Anna Pompili; Ram\u00f3n Fern\u00e1ndez-Astudillo; Joana Campos; Ana Paiva; Isabel Trancoso&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.25-18.00 - Perceptual and acoustic correlates of gender in the prepubertal voice&lt;br&gt;&lt;small&gt;Adrian Simpson; Riccarda Funk; Frederik Palmer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.25-18.00 - Prosodic analysis of attention-drawing speech&lt;br&gt;&lt;small&gt;Carlos Ishi; Jun Arai; Norihiro Hagita&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.25-18.00 - Relationships between speech timing and perceived hostility in a French corpus of political debates&lt;br&gt;&lt;small&gt;Charlotte Kouklia; Nicolas Audibert&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.25-18.00 - Towards Speaker Characterization: Identifying and Predicting Dimensions of Person Attribution&lt;br&gt;&lt;small&gt;Laura Fern\u00e1ndez Gallardo; Benjamin Weiss&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Phonetics, Phonology, and Prosody\"\n                data-category-ids=\"1056\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Acoustic Manifestations of Social Characteristics<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">F11<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4316\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4316\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech-enhancement\"\n                data-time=\"16:00-18:00\"\n                data-room=\"Poster 4\"\n                data-room-id=\"1068\"\n                data-room-name=\"Poster 4\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Timo Gerkmann&lt;br&gt;&lt;br&gt;16.00-18.00 - A comparison of perceptually motivated loss functions for binary mask estimation in speech separation&lt;br&gt;&lt;small&gt;Danny Websdale; Ben Milner; Danny Websdale; Ben Milner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - A Fully Convolutional Network for Speech Enhancement&lt;br&gt;&lt;small&gt;Serim Park; Jinwon Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - A Post-filtering Approach Based on Locally Linear Embedding Difference Compensation for Speech Enhancement&lt;br&gt;&lt;small&gt;YICHIAO WU; Hsin-Te Hwang; Syu-Siang Wang; Chin-Cheng Hsu; Yu Tsao; Hsin-Min Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Binary mask estimation strategies for constrained imputation-based speech enhancement&lt;br&gt;&lt;small&gt;Ricard Marxer; Jon Barker&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - BINAURAL REVERBERANT SPEECH SEPARATION BASED ON DEEP NEURAL NETWORKS&lt;br&gt;&lt;small&gt;Xueliang Zhang; DeLiang Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Conditional Generative Adversarial Networks for Speech Enhancement and Noise-Robust Speaker Verification&lt;br&gt;&lt;small&gt;Daniel Michelsanti; Zheng-Hua Tan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Improved Example-based Speech Enhancement by Using Deep Neural Network Acoustic Model for Noise Robust Example Search&lt;br&gt;&lt;small&gt;Atsunori Ogawa; Keisuke Kinoshita; Marc Delcroix; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - MixMax Approximation as a Super-Gaussian Log-Spectral Amplitude Estimator for Speech Enhancement&lt;br&gt;&lt;small&gt;Robert Rehr; Timo Gerkmann&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Multi-target Ensemble Learning for Monaural Speech Separation&lt;br&gt;&lt;small&gt;Hui Zhang; Xueliang Zhang; Guanglai Gao&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - On the influence of modifying magnitude and phase spectrum to enhance noisy speech signals&lt;br&gt;&lt;small&gt;Hans-Guenter Hirsch; Michael Gref&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - On the quality and intelligibility of noisy speech processed for near-end listening enhancement&lt;br&gt;&lt;small&gt;Catalin Zorila; Yannis Stylianou&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - REAL-TIME MODULATION ENHANCEMENT OF TEMPORAL ENVELOPES FOR INCREASING SPEECH INTELLIGIBILITY&lt;br&gt;&lt;small&gt;Maria Koutsogiannaki; Holly Francois; Kihyun Choo; Eunmi Oh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speech Enhancement Using Bayesian Wavenet&lt;br&gt;&lt;small&gt;Kaizhi Qian; Yang Zhang; Shiyu Chang; Xuesong Yang; Dinei Florencio; Mark Hasegawa-Johnson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speech enhancement using non-negative spectrogram models with mel-generalized cepstral regularization&lt;br&gt;&lt;small&gt;Li Li; Hirokazu Kameoka; Tomoki Toda; Shoji Makino&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Subjective intelligibility of deep neural network-based speech enhancement&lt;br&gt;&lt;small&gt;Femke B. Gelderblom; Tron V. Tronstad; Erlend M. Viggen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Coding and Enhancement\"\n                data-category-ids=\"1060\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech-enhancement<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4306\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4306\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Prosody: Rhythm, Stress, Quantity and Phrasing\"\n                data-time=\"16:00-18:00\"\n                data-room=\"C6\"\n                data-room-id=\"1066\"\n                data-room-name=\"C6\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Plinio Barbosa; P\u00e4rtel Lippus&lt;br&gt;&lt;br&gt;16.00-16.20 - Similar prosodic structure perceived differently in German and English&lt;br&gt;&lt;small&gt;Heather Kember; Ann-Kathrin Grohe; Katharina Zahner; Bettina Braun; Andrea Weber; Anne Cutler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Disambiguate or not? \u2013 The role of prosody in unambiguous and potentially ambiguous anaphora production in strictly Mandarin parallel structures&lt;br&gt;&lt;small&gt;Luying Hou; Bert Le Bruyn; Ren\u00e9 Kager&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Acoustic Properties of Canonical and Non-Canonical Stress in French, Turkish, Armenian and Brazilian Portuguese&lt;br&gt;&lt;small&gt;Angeliki Athanasopoulou; Irene Vogel; Hossep Dolatian&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Phonological complexity, segment rate and speech tempo perception&lt;br&gt;&lt;small&gt;Leendert Plug; Rachel Smith&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - On the Duration of Mandarin Tones&lt;br&gt;&lt;small&gt;Jing Yang; Yu Zhang; Aijun Li; Li Xu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - The formant dynamics of long close vowels in three varieties of Swedish&lt;br&gt;&lt;small&gt;Otto Ewald; Eva Liina Asu; Susanne Sch\u00f6tz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Phonetics, Phonology, and Prosody\"\n                data-category-ids=\"1056\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Prosody: Rhythm, Stress, Quantity and Phrasing<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">C6<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4304\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4304\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speaker Recognition Evaluation\"\n                data-time=\"16:00-18:00\"\n                data-room=\"A2\"\n                data-room-id=\"1064\"\n                data-room-name=\"A2\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Kong Aik Lee; Rahim Saeidi&lt;br&gt;&lt;br&gt;16.00-16.20 - The I4U Mega Fusion and Collaboration for NIST Speaker Recognition Evaluation 2016&lt;br&gt;&lt;small&gt;Kong Aik Lee; Ville Hautamaki; Tomi Kinnunen; Anthony Larcher; Chunlei Zhang; Andreas Nautsch; Themos Stafylakis; Gang Liu; Mickael Rouvier; Wei Rao; Federico Alegre; Jianbo Ma; Manwai Mak; Achintya Sarkar; H\u00e9ctor Delgado; Rahim Saeidi; Hagai Aronowitz; Aleksandr Sizov; Hanwu Sun; Guangsen Wang; Trung Hieu Nguyen; Bin Ma; Ville Vestman; Md Sahidullah; Miikka Halonen; Anssi Kanervisto; Gael Le Lan; Fahimeh Bahmaninezhad; Sergey Isadskiy; Christian Rathgeb; Christoph Busch; Georgios Tzimiropoulos; Qi Qian; Zhibin Wang; Qingen Zhao; Tianzhou Wang; Hao Li; Jian Xue; Shenghuo Zhu; Rong Jin; Tuo Zhao; Pierre-Michel Bousquet; Moez Ajili; Waad Ben Kheder; Driss Matrouf; Zhi Hao Lim; Chenglin Xu; Haihua Xu; Xiong Xiao; Eng Siong Chng; Benoit Fauve; Vidhyasaharan Sethu; Kaavya Sriskandaraja; W. W. Lin; Zheng-Hua Tan; Dennis Alexander Lehmann Thomsen; Massimiliano Todisco; Nicholas Evans; Haizhou Li; John H.L. Hansen; Jean-Francois Bonastre&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - The MIT-LL, JHU and LRDE NIST 2016 Speaker Recognition Evaluation System&lt;br&gt;&lt;small&gt;Pedro Torres-Carrasquillo; Fred Richardson; Shahan Nercessian; Douglas Sturim; William Campbell; Youngjune Gwon; Swaroop Vattam; Najim Dehak; Harish Mallidi; Phani Sankar Nidadavolu; Ruizhi Li; Reda Dehak&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Nuance - Politecnico di Torino\u2019s 2016 NIST Speaker Recognition Evaluation System&lt;br&gt;&lt;small&gt;Daniele Colibro; Claudio Vair; Emanuele Dalmasso; Kevin Farrell; Gennady Karvitsky; Sandro Cumani; Pietro Laface&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - UTD-CRSS Systems for 2016 NIST Speaker Recognition Evaluation&lt;br&gt;&lt;small&gt;Chunlei Zhang; Fahimeh Bahmaninezhad; Shivesh Ranjan; Chengzhu Yu; Navid Shokouhi; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - Analysis and Description of ABC Submission to NIST SRE 2016&lt;br&gt;&lt;small&gt;Oldrich Plchot; Pavel Matejka; Anna Silnova; Ond\u0159ej Novotn\u00fd; Mireia Diez; Johan Rohdin; Ondrej Glembek; Niko Brummer; Albert Swart; Jes\u00fas Jorr\u00edn; Leibny Paola Garcia Perera; Luis Buera; Patrick Kenny; Md Jahangir Alam; Gautam Bhattacharya&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - The 2016 NIST Speaker Recognition Evaluation&lt;br&gt;&lt;small&gt;Seyed Omid Sadjadi; Timothee Kheyrkhah; Audrey Tong; Craig Greenberg; Douglas Reynolds; Elliot Singer; Lisa Mason; Jaime Hernandez-Cordero&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speaker and Language Identification\"\n                data-category-ids=\"1054\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speaker Recognition Evaluation<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">A2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4323\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4323\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Students Meet Experts\"\n                data-time=\"16:00-18:00\"\n                data-room=\"B3\"\n                data-room-id=\"1072\"\n                data-room-name=\"B3\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4323.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Special event\"\n                data-category-ids=\"1064\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Students Meet Experts<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B3<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4303\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4303\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Stance, Credibility, and Deception\"\n                data-time=\"16:00-18:00\"\n                data-room=\"E10\"\n                data-room-id=\"1060\"\n                data-room-name=\"E10\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Julien Epps; Carlos Busso&lt;br&gt;&lt;br&gt;16.00-16.20 - Inferring Stance from Prosody&lt;br&gt;&lt;small&gt;Nigel Ward; Jason Carlson; Olac Fuentes; Diego Castan; Elizabeth Shriberg; Andreas Tsiartas&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Exploring Dynamic Measures of Stance in Spoken Interaction&lt;br&gt;&lt;small&gt;Gina-Anne Levow; Richard A. Wright&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Opinion Dynamics Modeling for Movie Review Transcripts Classification with Hidden Conditional Random Fields&lt;br&gt;&lt;small&gt;Valentin Barriere; Chlo\u00e9 Clavel; Slim Essid&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - TRANSFER LEARNING BETWEEN CONCEPTS FOR HUMAN BEHAVIOR MODELING: AN APPLICATION TO SINCERITY AND DECEPTION PREDICTION&lt;br&gt;&lt;small&gt;Qinyi Luo; Rahul Gupta; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - The Sound of Deception - What Makes a Speaker Credible?&lt;br&gt;&lt;small&gt;Anne Schr\u00f6der; Simon Stone; Peter Birkholz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Hybrid Acoustic-Lexical Deep Learning Approach for Deception Detection&lt;br&gt;&lt;small&gt;Gideon Mendels; Sarah Ita Levitan; Kai-Zhan Lee; Julia Hirschberg&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Paralinguistics in Speech and Language\"\n                data-category-ids=\"1052\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Stance, Credibility, and Deception<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E10<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4307\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4307\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech Recognition for Langauge Learning\"\n                data-time=\"16:00-18:00\"\n                data-room=\"D8\"\n                data-room-id=\"1062\"\n                data-room-name=\"D8\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Tatsuya Kawahara; Martin Russell&lt;br&gt;&lt;br&gt;16.00-16.20 - Bidirectional LSTM-RNN for Improving Automated Assessment of Non-native Children\u2019s Speech&lt;br&gt;&lt;small&gt;Yao Qian; Keelan Evanini; Xinhao Wang; Chong Min Lee; Matthew Mulholland&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Automatic Scoring of Shadowing Speech based on DNN Posteriors and their DTW&lt;br&gt;&lt;small&gt;Junwei Yue; Fumiya Shiozawa; Shohei Toyama; Yutaka Yamauchi; Kayoko Ito; Daisuke Saito; Nobuaki Minematsu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Off-Topic Spoken Response Detection Using Siamese Convolutional Neural Networks&lt;br&gt;&lt;small&gt;Chong Min Lee; Su-Youn Yoon; Xinhao Wang; Matthew Mulholland; Ikkyu Choi; Keelan Evanini&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Phonological Feature Based Mispronunciation Detection and Diagnosis using Multi-Task DNNs and Active Learning&lt;br&gt;&lt;small&gt;Vipul Arora; Aditi Lahiri; Henning Reetz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - Detection of Mispronunciations and Disfluencies in Children Reading Aloud&lt;br&gt;&lt;small&gt;Jorge Proen\u00e7a; Carla Lopes; Michael Tjalve; Andreas Stolcke; Sara Candeias; Fernando Perdig\u00e3o&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Automatic assessment of non-native prosody by measuring distances on prosodic label sequences&lt;br&gt;&lt;small&gt;David Escudero-Mancebo; C\u00e9sar Gonz\u00e1lez-Ferreras; Eva Estebas-Vilaplana; Lourdes Aguilar&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Technologies and Systems for New Applications\"\n                data-category-ids=\"1066\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech Recognition for Langauge Learning<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">D8<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4305\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4305\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Glottal Source Modeling\"\n                data-time=\"16:00-18:00\"\n                data-room=\"B4\"\n                data-room-id=\"1065\"\n                data-room-name=\"B4\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Jo\u00e3o Cabral; Thomas Drugman&lt;br&gt;&lt;br&gt;16.00-16.20 - A new cosine series antialiasing function and its application to aliasing-free glottal source models for speech and singing synthesis&lt;br&gt;&lt;small&gt;Hideki Kawahara; Ken-Ichi Sakakibara; Hideki Banno; Masanori Morise; Tomoki Toda; Toshio Irino&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Speaking style conversion from normal to Lombard speech using a glottal vocoder and Bayesian GMMs&lt;br&gt;&lt;small&gt;Ana Ram\u00edrez L\u00f3pez; Shreyas Seshadri; Lauri Juvela; Okko R\u00e4s\u00e4nen; Paavo Alku&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Reducing mismatch in training of DNN-based glottal excitation models in a statistical parametric text-to-speech system&lt;br&gt;&lt;small&gt;Lauri Juvela; Bajibabu Bollepalli; Junichi Yamagishi; Paavo Alku&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Semi Parametric Concatenative TTS with Instant Voice Modification Capabilities&lt;br&gt;&lt;small&gt;Alexander Sorin; Slava Shechtman; Asaf Rendel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - Modeling laryngeal muscle activation noise for low-order physiological based speech synthesis&lt;br&gt;&lt;small&gt;Rodrigo Manriquez; Sean Peterson; Pavel Prado; Patricio Orio; Matias Za\u00f1artu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Direct Modelling of Magnitude and Phase Spectra for Statistical Parametric Speech Synthesis&lt;br&gt;&lt;small&gt;Felipe Espic; Cassia Valentini-Botinhao; Simon King&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Synthesis and Spoken Language Generation\"\n                data-category-ids=\"1059\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Glottal Source Modeling<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5251\">\n\n              <div class='time-header'>19:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"5251\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Student Reception\"\n                data-time=\"19:00-00:30\"\n                data-room=\"K\u00e4gelbanan, S\u00f6dra teatern\"\n                data-room-id=\"1078\"\n                data-room-name=\"K\u00e4gelbanan, S\u00f6dra teatern\"\n                data-day=\"2\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;p&gt;&amp;nbsp;&lt;\/p&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Social event\"\n                data-category-ids=\"1067\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #C9EE91;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Student Reception<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">K\u00e4gelbanan, S\u00f6dra teatern<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"19:00\">19:00-00:30 - Tuesday 22 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          <div class='list_header' data-day='3' style='background-color: rgba(32, 132, 196, 0.83); color: rgba(255, 255, 255, 1);'>Wednesday 23 August<\/div>\n          <div class=\"box-wrapper\"\n              data-id=\"5247\">\n\n              <div class='time-header'>07:45<\/div>\n\n              <div class=\"box\"\n                data-id=\"5247\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Registration\"\n                data-time=\"07:45-17:00\"\n                data-room=\"S\u00f6dra Huset, House A\"\n                data-room-id=\"1112\"\n                data-room-name=\"S\u00f6dra Huset, House A\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"\"\n                data-category-ids=\"\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Registration<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">S\u00f6dra Huset, House A<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"07:45\">07:45-17:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4327\">\n\n              <div class='time-header'>08:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4327\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Keynote 2: Catherine Pelachaud, Conversing with social agents that smile and laugh\"\n                data-time=\"08:30-09:30\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4327.html\"\n                data-speaker=\"Catherine Pelachaud\"\n                data-speakercell=\"Catherine Pelachaud\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Bj\u00f6rn Granstr\u00f6m&lt;br&gt;&lt;br&gt;\n                 The session will also be broadcasted (with two-way communication) to rooms A2 and C6.\n                &lt;br&gt;&lt;br&gt;\"\n                data-category=\"Keynote\"\n                data-category-ids=\"1057\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #72D9EE;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Keynote 2: Catherine Pelachaud, Conversing with social agents that smile and laugh<\/span>\n               <br>\n               <div class=\"lecturer\"><span>Catherine Pelachaud<\/span><\/div>\n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"08:30\">08:30-09:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5261\">\n\n              <div class='time-header'>09:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"5261\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Refreshments\"\n                data-time=\"09:30-10:00\"\n                data-room=\"Various locations\"\n                data-room-id=\"1079\"\n                data-room-name=\"Various locations\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Misc\"\n                data-category-ids=\"1068\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Refreshments<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Various locations<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"09:30\">09:30-10:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4331\">\n\n              <div class='time-header'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4331\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Dialog and Prosody\"\n                data-time=\"10:00-12:00\"\n                data-room=\"C6\"\n                data-room-id=\"1066\"\n                data-room-name=\"C6\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Julia Hirschberg; Rolf Carlson&lt;br&gt;&lt;br&gt;10.00-10.20 - Prosodic Event Recognition using Convolutional Neural Networks with Context Information&lt;br&gt;&lt;small&gt;Sabrina Stehwien; Ngoc Thang Vu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Prosodic Facilitation and Interference while Judging on the Veracity of Synthesized Statements&lt;br&gt;&lt;small&gt;Ramiro H. Galvez; \u0160tefan Be\u0148u\u0161; Agustin Gravano; Marian Trnka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - An investigation of pitch matching across adjacent turns in a corpus of spontaneous German&lt;br&gt;&lt;small&gt;Margaret Zellers; Antje Schweitzer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - The Relationship between F0 Synchrony and Speech Convergence in Dyadic Interaction&lt;br&gt;&lt;small&gt;Sankar Mukherjee; Alessandro D'Ausilio; No\u00ebl Nguyen; Luciano Fadiga; Leonardo Badino&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - The role of linguistic and prosodic cues on the prediction of self-reported satisfaction in contact centre phone calls&lt;br&gt;&lt;small&gt;Jordi Luque; Ariadna S\u00e1nchez; Carlos Segura; Mart\u00ed Umbert; Luis \u00c1ngel Galindo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Cross-linguistic study of the production of turn-taking cues in American English and Argentine Spanish&lt;br&gt;&lt;small&gt;Pablo Brusco; Agustin Gravano; Juan Manuel P\u00e9rez&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Spoken Dialog Systems and Analysis of Conversation\"\n                data-category-ids=\"1051\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Dialog and Prosody<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">C6<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4332\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4332\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Social Signals, Styles, and Interaction\"\n                data-time=\"10:00-12:00\"\n                data-room=\"D8\"\n                data-room-id=\"1062\"\n                data-room-name=\"D8\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Khiet Truong; Nigel Ward&lt;br&gt;&lt;br&gt;10.00-10.20 - Emotional Features for Speech Overlaps Classification&lt;br&gt;&lt;small&gt;Olga Egorow; Andreas Wendemuth&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Computing Multimodal Dyadic Behaviors during Spontaneous Diagnosis Interviews toward Automatic Categorization of Autism Spectrum Disorder&lt;br&gt;&lt;small&gt;Chin-Po Chen; Xian-Hong Tseng; Susan Shur-Fen Gau; Chi-Chun Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Deriving Dyad-Level Interaction Representation using Interlocutors Structural and Expressive Multimodal Behavior Features&lt;br&gt;&lt;small&gt;Yun-Shao Lin; Chi-Chun Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Spotting Social Signals in Conversational Speech over IP: A Deep Learning Perspective&lt;br&gt;&lt;small&gt;Raymond Brueckner; Maximilian Schmitt; Maja Pantic; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Optimized Time Series Filters for Detecting Laughter and Filler Events&lt;br&gt;&lt;small&gt;G\u00e1bor Gosztolya&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Visual, Laughter, Applause and Spoken Expression Features for Predicting Engagement within TED Talks.&lt;br&gt;&lt;small&gt;Fasih Haider; Fahim A. Salim; Saturnino Luz; Carl Vogel; Owen Conlan; Nick Campbell&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Paralinguistics in Speech and Language\"\n                data-category-ids=\"1052\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Social Signals, Styles, and Interaction<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">D8<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4345\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4345\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speaker and Language Recognition Applications\"\n                data-time=\"10:00-12:00\"\n                data-room=\"Poster 2\"\n                data-room-id=\"1067\"\n                data-room-name=\"Poster 2\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mitchell McLaren&lt;br&gt;&lt;br&gt;10.00-12.00 - Acoustic Pairing of Original and Dubbed Voices in the Context of Video Game Localization&lt;br&gt;&lt;small&gt;Adrien Gresse; Mickael Rouvier; Richard Dufour; Vincent Labatut; Jean-Francois Bonastre&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - A Generative Model for Score Normalization in Speaker Recognition&lt;br&gt;&lt;small&gt;Albert Swart; Niko Brummer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Bidirectional Modelling for Short Duration Language Identification&lt;br&gt;&lt;small&gt;Sarith Fernando; Vidhyasaharan Sethu; Eliathamby Ambikairajah; Julien Epps&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Calibration Approaches for Language Detection&lt;br&gt;&lt;small&gt;Mitchell McLaren; Luciana Ferrer; Diego Castan; Aaron Lawson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Conditional Generative Adversarial Nets Classifier for Spoken Language Identification&lt;br&gt;&lt;small&gt;Peng Shen; Xugang Lu; Sheng Li; Hisashi Kawai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Domain Adaptation of PLDA models in Broadcast Diarization by means of Unsupervised Speaker Clustering&lt;br&gt;&lt;small&gt;Ignacio Vi\u00f1als; Alfonso Ortega; Jesus Villalba; Antonio Miguel; Eduardo Lleida Solano&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Homogeneity Measure Impact on Target and Non-target Trials in Forensic Voice Comparison&lt;br&gt;&lt;small&gt;Moez Ajili; Jean-Francois Bonastre; Waad Ben Kheder; Solange Rossato; Juliette Kahn&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - IITG-Indigo System for NIST 2016 SRE Challenge&lt;br&gt;&lt;small&gt;Nagendra Kumar; Rohan Kumar Das; Sarfaraz Jelil; Dhanush B K; Harish Kashyap; Sri Rama Murty Kodukula; Sriram Ganapathy; Rohit Sinha; S R Mahadeva Prasanna&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Locally Weighted Linear Discriminant Analysis for Robust Speaker Verification&lt;br&gt;&lt;small&gt;Abhinav Misra; Shivesh Ranjan; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - LSTM Neural Network-based Speaker Segmentation using Acoustic and Language Modelling&lt;br&gt;&lt;small&gt;Miquel Angel India Massana; Jos\u00e9 A. R. Fonollosa; Javier Hernando&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Null-Hypothesis LLR: A proposal for Forensic Automatic Speaker Recognition&lt;br&gt;&lt;small&gt;Yosef A. Solewicz; Michael Jessen; David van der Vloed&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Recursive Whitening Transformation for Speaker Recognition on Language Mismatched Condition&lt;br&gt;&lt;small&gt;Suwon Shon; Seongkyu Mun; Hanseok Ko&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Speaker Clustering by Iteratively Finding Discriminative Feature Space and Cluster Labels&lt;br&gt;&lt;small&gt;Sungrack Yun; Hye Jin Jang; Taesu Kim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - The Opensesame NIST 2016 Speaker Recognition Evaluation System&lt;br&gt;&lt;small&gt;Gang Liu; Qi Qian; Zhibin Wang; Qingen Zhao; Tianzhou Wang; Hao Li; Jian Xue; Shenghuo Zhu; Rong Jin; Tuo Zhao&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Tied Hidden Factors in Neural Networks for End-to-End Speaker Recognition&lt;br&gt;&lt;small&gt;Antonio Miguel; Jorge Llombart; Alfonso Ortega; Eduardo Lleida Solano&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speaker and Language Identification\"\n                data-category-ids=\"1054\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speaker and Language Recognition Applications<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4359\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4359\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Data Collection, Transcription and Annotation Issues in Child Language Acquisition\"\n                data-time=\"10:00-12:00\"\n                data-room=\"F11\"\n                data-room-id=\"1059\"\n                data-room-name=\"F11\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Elika Bergelson; Sho Tsuji&lt;br&gt;&lt;br&gt;10.00-10.20 - Top-down versus bottom-up theories of phonological acquisition: A big data approach&lt;br&gt;&lt;small&gt;Christina Bergmann; Sho Tsuji; Alejandrina Cristia&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - What do babies hear? Analyses of child- and adult-directed speech&lt;br&gt;&lt;small&gt;Marisa  Casillas; Andrei Amatuni; Amanda Seidl; Melanie Soderstrom; Anne Warlaumont; Elika Bergelson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - The LENA system applied to Swedish: Reliability of the Adult Word Count estimate&lt;br&gt;&lt;small&gt;Iris-Corinna Schwarz; Noor Botros; Alekzandra Lord; Amelie Marcusson; Henrik Tidelius; Ellen Marklund&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Which acoustic and phonological factors shape infants' vowel discrimination? Exploiting natural variation in InPhonDB&lt;br&gt;&lt;small&gt;Sho Tsuji; Alejandrina Cristia&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - A New Workflow for Semi-automatized Annotations: Tests with Long-Form Naturalistic Recordings of Children\u2019s Language Environments&lt;br&gt;&lt;small&gt;Marisa  Casillas; Elika Bergelson; Anne S. Warlaumont; Alejandrina Cristia; Melanie Soderstrom; Mark VanDam; Han Sloetjes&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - SLPAnnotator: Tools for implementing Sign Language Phonetic Annotation&lt;br&gt;&lt;small&gt;Kathleen Currie Hall; Scott Mackie; Michael Fry; Oksana Tkachman&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Perception, Production and Acquisition\"\n                data-category-ids=\"1055\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Data Collection, Transcription and Annotation Issues in Child Language Acquisition<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">F11<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4346\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4346\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Spoken Document Processing\"\n                data-time=\"10:00-12:00\"\n                data-room=\"Poster 3\"\n                data-room-id=\"1069\"\n                data-room-name=\"Poster 3\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Tanja Schultz&lt;br&gt;&lt;br&gt;10.00-12.00 - A relevance score estimation for spoken term detection based on RNN-generated pronunciation embeddings&lt;br&gt;&lt;small&gt;Jan \u0160vec; Josef V. Psutka; Lubo\u0161 \u0160m\u00eddl; Jan Trmal&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Automatic Alignment between Classroom Lecture Utterances and Slide Components&lt;br&gt;&lt;small&gt;Masatoshi Tsuchiya; Ryo Minamiguchi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Compensating Gender Variability in Query-by-Example Search on Speech Using Voice Conversion&lt;br&gt;&lt;small&gt;Paula Lopez-Otero; Laura Docio-Fernandez; Carmen Garcia-Mateo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Constructing Acoustic Distances between Subwords and States Obtained from a Deep Neural Network for Spoken Term Detection&lt;br&gt;&lt;small&gt;Daisuke Kaneko; Kazunori Kojima; Kazuyo Tanaka; Shi-wook Lee; Yoshiaki Itoh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Evaluating automatic topic segmentation as a segment retrieval task&lt;br&gt;&lt;small&gt;Abdessalam Bouchekif; Delphine Charlet; Geraldine Damnati; Nathalie Camelin; Yannick Est\u00e8ve&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Exploring the Use of Significant Words Language Modeling for Spoken Document Retrieval&lt;br&gt;&lt;small&gt;Ying-Wen Chen; Kuan-Yu Chen; Hsin-Min Wang; Berlin Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Fast and Accurate OOV Decoder on High-Level Features&lt;br&gt;&lt;small&gt;Yuri Khokhlov; Natalia Tomashenko; Ivan Medennikov; Aleksei Romanenko&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Hierarchical Recurrent Neural Network for Story Segmentation&lt;br&gt;&lt;small&gt;Emiru Tsunoo; Peter Bell; Steve Renals&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Improving Speech Recognizers by Refining Broadcast Data with Inaccurate Subtitle Timestamps&lt;br&gt;&lt;small&gt;Jeong-Uk Bang; Mu-Yeol Choi; Sang-Hun Kim; Oh-Wook Kwon&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Incorporating Acoustic Features for Spontaneous Speech driven Content Retrieval&lt;br&gt;&lt;small&gt;Hiroto Tasaki; Tomoyosi Akiba&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Order-Preserving Abstractive Summarization for Spoken Content Based on Connectionist Temporal Classification&lt;br&gt;&lt;small&gt;Bo Ru Lu; Frank Shyu; Yun-Nung Chen; Hung-yi Lee; Lin-shan Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Query-by-Example Search with Discriminative Neural Acoustic Word Embeddings&lt;br&gt;&lt;small&gt;Shane Settle; Keith Levin; Herman Kamper; Karen Livescu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Zero-Shot Learning across Heterogenous Overlapping Domains&lt;br&gt;&lt;small&gt;Anjishnu Kumar; Pavankumar Muddireddy; Markus Dreyer; Bjorn Hoffmeister&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n                data-category-ids=\"1053\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Spoken Document Processing<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 3<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4360\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4360\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Digital Revolution for Under-resourced Languages 1\"\n                data-time=\"10:00-12:00\"\n                data-room=\"A2\"\n                data-room-id=\"1064\"\n                data-room-name=\"A2\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Alexey Karpov; Kristiina Jokinen&lt;br&gt;&lt;br&gt;10.00-10.20 - Team ELISA System for DARPA LORELEI Speech Evaluation 2016&lt;br&gt;&lt;small&gt;Pavlos Papadopoulos; Ruchir Travadi; Colin Vaz; Nikolaos Malandrakis; Ulf Hermjakob; Nima Pourdamghani; Michael Pust; Boliang Zhang; Xiaoman Pan; Di Lu; Ying Lin; Ondrej Glembek; Murali Karthick B; Martin Karafiat; Lukas Burget; Mark Hasegawa-Johnson; Heng Ji; Jonathan May; Kevin Knight; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - First Results in Developing a Medieval Latin Language Charter Dictation System for the East-Central Europe Region&lt;br&gt;&lt;small&gt;Peter Mihajlik; Lili Szabo; Balazs Tarjan; Andras Balog; Krisztina Rabai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - The motivation and development of MPAi, a M\u0101ori Pronunication Aid.&lt;br&gt;&lt;small&gt;Catherine Watson; Peter Keegan; Margaret Maclagan; Ray Harlow; Jeanette King&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - On the Linguistic Relevance of Speech Units Learned by Unsupervised Acoustic Modeling&lt;br&gt;&lt;small&gt;Siyuan Feng; Tan Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Deep Autoencoder Based Multi-task Learning Using Probabilistic Transcriptions&lt;br&gt;&lt;small&gt;Amit Das; Mark Hasegawa-Johnson; Karel Vesely&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Areal and Phylogenetic Features for Multilingual Speech Synthesis&lt;br&gt;&lt;small&gt;Alexander Gutkin; Richard Sproat&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n                data-category-ids=\"1053\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Digital Revolution for Under-resourced Languages 1<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">A2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4344\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4344\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech Recognition: Technologies for New Applicaitions and Paradigms\"\n                data-time=\"10:00-12:00\"\n                data-room=\"Poster 1\"\n                data-room-id=\"1061\"\n                data-room-name=\"Poster 1\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Kris Demuynck&lt;br&gt;&lt;br&gt;10.00-12.00 - A Mostly Data-driven Approach to Inverse Text Normalization&lt;br&gt;&lt;small&gt;Ernest Pusateri; Bharat Ambati; Elizabeth Brooks; Ondrej Platek; Donald McAllaster; Venki Nagesha&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Automatic Evaluation of Children Reading Aloud on Sentences and Pseudowords&lt;br&gt;&lt;small&gt;Jorge Proen\u00e7a; Carla Lopes; Michael Tjalve; Andreas Stolcke; Sara Candeias; Fernando Perdig\u00e3o&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Automatic Explanation Spot Estimation Method Targeted at Text and Figures in Lecture Slides&lt;br&gt;&lt;small&gt;Shoko Tsujimura; Kazumasa Yamamoto; Seiichi Nakagawa&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Comparison of Non-parametric Bayesian Mixture Models for Syllable Clustering and Zero-Resource Speech Processing&lt;br&gt;&lt;small&gt;Shreyas Seshadri; Ulpu Remes; Okko R\u00e4s\u00e4nen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Developing On-Line Speaker Diarization System&lt;br&gt;&lt;small&gt;Dimitrios Dimitriadis; Petr Fousek&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Distilling Knowledge from an Ensemble of Models for Punctuation Prediction&lt;br&gt;&lt;small&gt;Jiangyan Yi; Jianhua Tao; Zhengqi Wen; Ya Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Experiments in Character-level Neural Network Models for Punctuation&lt;br&gt;&lt;small&gt;William Gale; Sarangarajan Parthasarathy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Improving Mispronunciation Detection for Non-Native Learners with Multisource Information and LSTM-Based Deep Models&lt;br&gt;&lt;small&gt;Wei Li; Nancy F Chen; Sabato Marco Siniscalchi; Chin-Hui Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Mismatched Crowdsourcing From Multiple Annotator Languages For Recognizing Zero-resourced Languages: A Nullspace Clustering Approach&lt;br&gt;&lt;small&gt;Wenda Chen; Mark Hasegawa-Johnson; Nancy Chen; Boon Pang Lim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Multi-Channel Apollo Mission Speech Transcript Calibration&lt;br&gt;&lt;small&gt;Lakshmish Kaushik; Abhijeet Sangwan; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Multiview Representation Learning via Deep CCA for Silent Speech Recognition&lt;br&gt;&lt;small&gt;Myungjong Kim; Beiming Cao; Ted Mau; Jun Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Off-topic Spoken Response Detection with Word Embeddings&lt;br&gt;&lt;small&gt;Su-Youn Yoon; Chong Min Lee; Ikkyu Choi; Xinhao Wang; Matthew Mulholland; Keelan Evanini&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Use of Graphemic Lexicons for Spoken Language Assessment&lt;br&gt;&lt;small&gt;Kate Knill; Mark Gales; Kostas Kyriakopoulos; Anton Ragni; Yu Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Technologies and Systems for New Applications\"\n                data-category-ids=\"1066\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech Recognition: Technologies for New Applicaitions and Paradigms<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 1<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4347\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4347\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech Intelligibility\"\n                data-time=\"10:00-12:00\"\n                data-room=\"Poster 4\"\n                data-room-id=\"1068\"\n                data-room-name=\"Poster 4\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Prasanta Ghosh&lt;br&gt;&lt;br&gt;10.00-12.00 - Intelligibilities of Mandarin Chinese Sentences with Spectral \u201cHoles\u201d&lt;br&gt;&lt;small&gt;Yafan Chen; Yong Xu; Jun Yang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Listening in the dips: Comparing relevant features for speech recognition in humans and machines&lt;br&gt;&lt;small&gt;Constantin Spille; Bernd T. Meyer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - On the use of Band Importance Weighting in the Short-Time Objective Intelligibility Measure&lt;br&gt;&lt;small&gt;Asger Heidemann Andersen; Jan Mark de Haan; Zheng-Hua Tan; Jesper Jensen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Predicting Automatic Speech Recognition Performance over Communication Channels from Instrumental Speech Quality and Intelligibility Scores&lt;br&gt;&lt;small&gt;Laura Fern\u00e1ndez Gallardo; Sebastian M\u00f6ller; John Beerends&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Predicting Speech Intelligibility Using a Gammachirp Envelope Distortion Index Based on the Signal-to-Distortion Ratio&lt;br&gt;&lt;small&gt;Katsuhiko Yamamoto; Toshio Irino; Toshie Matsui; Shoko Araki; Keisuke Kinoshita; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Speech intelligibility in cars: the effect of speaking style, noise and listener age&lt;br&gt;&lt;small&gt;Cassia Valentini-Botinhao; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - The effect of situation-specific non-speech acoustic cues on the intelligibility of speech in noise&lt;br&gt;&lt;small&gt;Lauren Ward; Ben Shirley; Yan Tang; William Davies&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Coding and Enhancement\"\n                data-category-ids=\"1060\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech Intelligibility<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4328\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4328\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech Production and Physiology\"\n                data-time=\"10:00-12:00\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Felicitas Kleber; Elizabeth Godoy&lt;br&gt;&lt;br&gt;10.00-10.20 - Aerodynamic features of French fricatives&lt;br&gt;&lt;small&gt;Rosario Signorello; Sergio Hassid; Didier Demolin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Inter-speaker variability: speaker normalisation and quantitative estimation of articulatory invariants in speech production for French&lt;br&gt;&lt;small&gt;Antoine Serrurier; Pierre Badin; Louis-Jean Boe; Laurent Lamalle; Christiane Neuschaefer-Rube&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Comparison of Basic Beatboxing Articulations between Expert and Novice Artists using Real-Time Magnetic Resonance Imaging&lt;br&gt;&lt;small&gt;Nimisha Patil; Timothy Greer; Reed Blaylock; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Speaker-specific Biomechanical Model-based Investigation of a Simple Speech Task based on Tagged-MRI&lt;br&gt;&lt;small&gt;Keyi Tang; Negar Mohaghegh Harandi; Jonghye Woo; Georges El Fakhri; Maureen Stone; Sidney Fels&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Sounds of the Human Vocal Tract&lt;br&gt;&lt;small&gt;Reed Blaylock; Nimisha Patil; Timothy Greer; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - A simulation study on the effect of glottal boundary conditions on vocal tract formants&lt;br&gt;&lt;small&gt;Yasufumi Uezu; Tokihiko Kaburagi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Perception, Production and Acquisition\"\n                data-category-ids=\"1055\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech Production and Physiology<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4355\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4355\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Show & Tell 5\"\n                data-time=\"10:00-12:00\"\n                data-room=\"E306\"\n                data-room-id=\"1070\"\n                data-room-name=\"E306\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"10.00-12.00 - A Thematicity-based Prosody Enrichment Tool for CTS&lt;br&gt;&lt;small&gt;Monica Dominguez; Mireia Farr\u00fas; Leo Wanner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Creating a Voice for MiRo, the World\u2019s First Commercial Biomimetic Robot&lt;br&gt;&lt;small&gt;Roger Moore; Ben Mitchinson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - SIAK - A Game for Foreign Language Pronunciation Learning&lt;br&gt;&lt;small&gt;Reima Karhila; Sari Ylinen; Seppo Enarvi; Kalle Palom\u00e4ki; Aleksander Nikulin; Olli Rantula; Vertti Viitanen; Krupakar Dhinakaran; Anna-Riikka Smolander; Heini Kallio; Maria Uther; Katja Junttila; Perttu H\u00e4m\u00e4l\u00e4inen; Mikko Kurimo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - TBT(Toolkit to Build TTS): A High Performance Framework to build Multiple Language HTS Voice&lt;br&gt;&lt;small&gt;Atish Ghone; Rachana Nerpagar; Pranaw Kumar; Arun Baby; Aswin Shanmugam; Sasikumar Mukundan; Hema Murthy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - WebSubDub - Experimental system for creating high-quality alternative audio track for TV broadcasting&lt;br&gt;&lt;small&gt;Martin Gr\u016fber; Jindrich Matousek; Zden\u011bk Hanzl\u00ed\u010dek; Jakub V\u00edt; Daniel Tihelka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Voice Conservation and TTS System for People Facing Total Laryngectomy&lt;br&gt;&lt;small&gt;Mark\u00e9ta J\u016fzov\u00e1; Daniel Tihelka; Jindrich Matousek; Zdenek Hanzlicek&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Show & Tell\"\n                data-category-ids=\"1063\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #EEA2A2;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Show & Tell 5<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E306<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4329\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4329\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Acoustic Model Adaptation\"\n                data-time=\"10:00-12:00\"\n                data-room=\"E10\"\n                data-room-id=\"1060\"\n                data-room-name=\"E10\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Catherine Breslin; George Saon&lt;br&gt;&lt;br&gt;10.00-10.20 - Large-Scale Domain Adaptation via Teacher-Student Learning&lt;br&gt;&lt;small&gt;Jinyu Li; Michael Seltzer; Xi Wang; Rui Zhao; Yifan Gong&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Improving Children's Speech Recognition through Explicit Pitch Scaling based on Iterative Spectrogram Inversion&lt;br&gt;&lt;small&gt;Waquar Ahmad; Syed Shahnawazuddin; Hemant Kumar Kathania; Gayadhar Pradhan; A. B. Samaddar&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - RNN-LDA Clustering for Feature Based DNN Adaptation&lt;br&gt;&lt;small&gt;Xurong Xie; Xunying Liu; Tan Lee; Lan Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Robust online i-vectors for unsupervised adaptation of DNN acoustic models: A study in the context of digital voice assistants&lt;br&gt;&lt;small&gt;Harish Arsikere; Sri Garimella&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Semi-supervised Learning with Semantic Knowledge Extraction for Improved Speech Recognition in Air Traffic Control&lt;br&gt;&lt;small&gt;Ajay Srinivasamurthy; Petr Motlicek; Ivan Himawan; Gyorgy Szaszak; Youssef Oualil; Hartmut Helmke&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Dynamic Layer Normalization for Adaptive Neural Acoustic Modeling in Speech Recognition&lt;br&gt;&lt;small&gt;Taesup Kim; Inchul Song; Yoshua Bengio&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n                data-category-ids=\"1058\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Acoustic Model Adaptation<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E10<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4330\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4330\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech and Harmonic Analysis\"\n                data-time=\"10:00-12:00\"\n                data-room=\"B4\"\n                data-room-id=\"1065\"\n                data-room-name=\"B4\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Abeer Alwan; Franz Pernkopf&lt;br&gt;&lt;br&gt;10.00-10.20 - A robust and alternative approach to zero frequency filtering method for epoch extraction&lt;br&gt;&lt;small&gt;Gangamohan Paidi; Bayya Yegnanarayana&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Improving YANGsaf F0 Estimator with Adaptive Kalman Filter&lt;br&gt;&lt;small&gt;Kanru Hua&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - A Spectro-Temporal Demodulation Technique for Pitch Estimation&lt;br&gt;&lt;small&gt;Jitendra Dhiman; Nagaraj Adiga; Chandra Sekhar Seelamantula&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Robust method for estimating F0 of complex tone based on pitch perception of amplitude modulated signal&lt;br&gt;&lt;small&gt;Kenichiro Miwa; Masashi Unoki&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Low-Complexity Pitch Estimation Based on Phase Differences Between Low-Resolution Spectra&lt;br&gt;&lt;small&gt;Simon Graf; Tobias Herbig; Markus Buck; Gerhard Schmidt&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Harvest: A high-performance fundamental frequency estimator from speech signals&lt;br&gt;&lt;small&gt;Masanori Morise&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Speech and Audio Signals\"\n                data-category-ids=\"1062\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech and Harmonic Analysis<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4356\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4356\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Show & Tell 6\"\n                data-time=\"10:00-12:00\"\n                data-room=\"E397\"\n                data-room-id=\"1071\"\n                data-room-name=\"E397\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"10.00-12.00 - A Robust Medical Speech-to-Speech\/Speech-to-Sign Phraselator&lt;br&gt;&lt;small&gt;Farhia Ahmed; Pierrette Bouillon; Chelle Destefano; Johanna Gerlach; Sonia Halimi; Angela Hooper; Manny Rayner; Herv\u00e9 Spechbach; Irene Strasly; Nikos Tsourakis&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Integrating the Talkamatic Dialogue Manager with Alexa&lt;br&gt;&lt;small&gt;Staffan Larsson; Fredrik Kronlid; Andreas Krona; Alex Berman&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Modifying Amazon\u2019s Alexa ASR Grammar and Lexicon \u2013 A Case Study&lt;br&gt;&lt;small&gt;Aman Kumar; Hassan Alam; Manan Vyas; Tina Werner; Rachmat Hartono&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Nora the Empathetic Psychologist&lt;br&gt;&lt;small&gt;Genta Indra Winata; Onno Kampman; Yang Yang; Anik Dey; Pascale Fung&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Towards an Autarkic Embedded Cognitive User Interface&lt;br&gt;&lt;small&gt;Frank Duckhorn; Markus Huber; Werner Meyer; Oliver Jokisch; Constanze Tsch\u00f6pe; Matthias Wolff&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Show & Tell\"\n                data-category-ids=\"1063\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #EEA2A2;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Show & Tell 6<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E397<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5253\">\n\n              <div class='time-header'>12:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"5253\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Lunch\"\n                data-time=\"12:00-13:30\"\n                data-room=\"Various locations\"\n                data-room-id=\"1079\"\n                data-room-name=\"Various locations\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Misc\"\n                data-category-ids=\"1068\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Lunch<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Various locations<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"12:00\">12:00-13:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4348\">\n\n              <div class='time-header'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4348\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Articulatory and Acoustic Phonetics\"\n                data-time=\"13:30-15:30\"\n                data-room=\"Poster 2\"\n                data-room-id=\"1067\"\n                data-room-name=\"Poster 2\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mattias Heldner&lt;br&gt;&lt;br&gt;13.30-15.30 - Acoustic cues to the singleton-geminate contrast: the case of Libyan Arabic sonorants&lt;br&gt;&lt;small&gt;Amel Issa&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - An ultrasound study of alveolar and retroflex consonants in Arrernte: stressed and unstressed syllables&lt;br&gt;&lt;small&gt;Marija Tabain; Richard Beare&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - A Preliminary Phonetic Investigation of Alphabetic Words in Mandarin Chinese&lt;br&gt;&lt;small&gt;Hongwei Ding; Yuanyuan Zhang; Hongchao Liu; Chu-Ren Huang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - A Quantitative Measure of the Impact of Coarticulation on Phone Discriminability&lt;br&gt;&lt;small&gt;Thomas Schatz; Rory Turnbull; Francis Bach; Emmanuel Dupoux&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Effect of formant and F0 discontinuity on perceived vowel duration: Impacts for concatenative speech synthesis&lt;br&gt;&lt;small&gt;Tom\u00e1\u0161 Bo\u0159il; Pavel \u0160turm; Radek Skarnitzl; Jan Vol\u00edn&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Kinematic signatures of prosody in Lombard speech&lt;br&gt;&lt;small&gt;\u0160tefan Be\u0148u\u0161; Juraj \u0160imko; Mona Lehtinen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Locating burst onsets using SFF envelope and phase information&lt;br&gt;&lt;small&gt;Bhanu Teja Nellore; RaviShankar Prasad; Sudarsana Reddy Kadiri; Suryakanth V Gangashetty; Bayya Yegnanarayana&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Mel-cepstral distortion of German vowels in different information density contexts&lt;br&gt;&lt;small&gt;Erika Brandt; Frank Zimmerer; Bistra Andreeva; Bernd M\u00f6bius&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Mental Representation of Japanese Mora: focusing on intrinsic duration&lt;br&gt;&lt;small&gt;Kosuke Sugai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Reshaping the transformed LF model: generating the glottal source from the waveshape parameter Rd&lt;br&gt;&lt;small&gt;Christer Gobl&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Temporal Dynamics of Lateral Channel Formation in \/l\/: 3D EMA Data from Australian English&lt;br&gt;&lt;small&gt;Jia Ying; Christopher Carignan; Jason Shaw; Michael Proctor; Donald Derrick; Catherine Best&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - What do Finnish and Central Bavarian have in common? Towards an acoustically based quantity typology&lt;br&gt;&lt;small&gt;Markus Jochim; Felicitas Kleber&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Vowel and Consonant Sequences in three Bavarian varieties in Austria&lt;br&gt;&lt;small&gt;Nicola Klingler; Sylvia Moosm\u00fcller; Hannes Scheutz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Phonetics, Phonology, and Prosody\"\n                data-category-ids=\"1056\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Articulatory and Acoustic Phonetics<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4361\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4361\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Digital Revolution for Under-resourced Languages 2\"\n                data-time=\"13:30-15:30\"\n                data-room=\"Poster 1\"\n                data-room-id=\"1061\"\n                data-room-name=\"Poster 1\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Shyam Agrawal; Oddur Kjartansson&lt;br&gt;&lt;br&gt;13.30-15.30 - Building an ASR corpus using Althingi's Parliamentary Speeches&lt;br&gt;&lt;small&gt;Inga R\u00fan Helgad\u00f3ttir; R\u00f3bert Kjaran; Anna Bj\u00f6rk Nikul\u00e1sd\u00f3ttir; Jon Gudnason&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Building ASR corpora using Eyra&lt;br&gt;&lt;small&gt;Jon Gudnason; Matth\u00edas P\u00e9tursson; R\u00f3bert Kjaran; Simon Kluepfel; Anna Nikul\u00e1sd\u00f3ttir&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Eliciting meaningful units from speech&lt;br&gt;&lt;small&gt;Daniil Kocharov; Tatiana Kachkovskaia; Pavel Skrelin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Extracting Situation Frames from non-English Speech: Evaluation Framework and Pilot Results&lt;br&gt;&lt;small&gt;Nikolaos Malandrakis; Ondrej Glembek; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Implementation of a Radiology Speech Recognition System for Estonian using Open Source Software&lt;br&gt;&lt;small&gt;Tanel Alum\u00e4e; Andrus Paats; Ivo Fridolin; Einar Meister&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Improving DNN Bluetooth Narrowband Acoustic Models by Cross-bandwidth and Cross-lingual Initialization&lt;br&gt;&lt;small&gt;Xiaodan Zhuang; Arnab Ghoshal; Antti-Veikko Rosti; Matthias Paulik; Daben Liu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Joint Estimation of Articulatory Features and Acoustic models for Low-Resource Languages&lt;br&gt;&lt;small&gt;Basil Abraham; Srinivasan Umesh; Neethu Mariam Joy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Leveraging Text Data for Word Segmentation for Underresourced Languages&lt;br&gt;&lt;small&gt;Thomas Glarner; Benedikt Boenninghoff; Oliver Walter; Reinhold Haeb-Umbach&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Machine Assisted Analysis of Vowel Length Contrasts in Wolof&lt;br&gt;&lt;small&gt;Elodie Gauthier; Laurent Besacier; Sylvie Voisin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Nativization of foreign names in TTS for automatic reading of world news in Swahili&lt;br&gt;&lt;small&gt;Joseph Mendelson; Pilar Oplustil; Oliver Watts; Simon King&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Panelist poster 1&lt;br&gt;&lt;small&gt;Claudia Soria&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Panelist poster 2&lt;br&gt;&lt;small&gt;Alexey Karpov&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Panelist poster 3&lt;br&gt;&lt;small&gt;Emmanuel Dupoux&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Panelist poster 4&lt;br&gt;&lt;small&gt;Mary Harper&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Panelist poster 5&lt;br&gt;&lt;small&gt;Sebastian Stueker&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Panelist poster 6&lt;br&gt;&lt;small&gt;Sanjeev Khudanpur&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Panelist poster 7&lt;br&gt;&lt;small&gt;Linne Ha&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Rapid development of TTS corpora for four South African languages&lt;br&gt;&lt;small&gt;Daniel Van Niekerk; Charl Van Heerden; Marelie Davel; Neil Kleynhans; Oddur Kjartansson; Martin Jansche; Linne Ha&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - The ABAIR initiative: Bringing Spoken Irish into the Digital Space&lt;br&gt;&lt;small&gt;Ailbhe N\u00ed Chasaide; Neasa N\u00ed Chiar\u00e1in; Christoph Wendler; Harald Berthelsen; Andy Murphy; Christer Gobl&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Transfer Learning and Distillation Techniques to Improve the Acoustic Modeling of Low Resource Languages&lt;br&gt;&lt;small&gt;Basil Abraham; Tejaswi Seeram; Srinivasan Umesh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Uniform Multilingual Multi-Speaker Acoustic Model for Statistical Parametric Speech Synthesis of Low-Resourced Languages&lt;br&gt;&lt;small&gt;Alexander Gutkin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Unsupervised Speech Signal to Symbol Transformation for Zero Resource Speech Applications&lt;br&gt;&lt;small&gt;Saurabhchand Bhati; Shekhar Nayak; Sri Rama Murty Kodukula&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Very low resource radio browsing for agile developmental and humanitarian monitoring&lt;br&gt;&lt;small&gt;Armin Saeb; Raghav Menon; Hugh Cameron; William Kibira; John Quinn; Thomas Niesler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n                data-category-ids=\"1053\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Digital Revolution for Under-resourced Languages 2<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 1<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4350\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4350\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Disorders Related to Speech and Language\"\n                data-time=\"13:30-15:30\"\n                data-room=\"Poster 4\"\n                data-room-id=\"1068\"\n                data-room-name=\"Poster 4\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Jan Rusz&lt;br&gt;&lt;br&gt;13.30-15.30 - Acoustic evaluation of nasality in cerebellar syndromes&lt;br&gt;&lt;small&gt;Michal Novotn\u00fd; Jan Rusz; Karel Sp\u00e1lenka; Ji\u0159\u00ed Klemp\u00ed\u0159; Dana Hor\u00e1kov\u00e1; Ev\u017een R\u016f\u017ei\u010dka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - An Affect Prediction Approach through Depression Severity Parameter Incorporation in Neural Networks&lt;br&gt;&lt;small&gt;Rahul Gupta; Saurabh Sahu; Carol Espy-Wilson; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - An avatar-based system for identifying individuals likely to develop dementia&lt;br&gt;&lt;small&gt;Bahman Mirheidari; Daniel Blackburn; Kirsty Harkness; Traci Walker; Annalena Venneri; Markus Reuber; Heidi Christensen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - An N-Gram Based Approach to the Automatic Diagnosis of Alzheimer's Disease from Spoken Language&lt;br&gt;&lt;small&gt;Sebastian Wankerl; Elmar Noeth; Stefan Evert&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Cross-Database Models for the Classification of Dysarthria Presence&lt;br&gt;&lt;small&gt;Stephanie Gillespie; Yash-Yee Logan; Elliot Moore; Jacqueline Laures-Gore; Scott Russell; Rupal Patel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Cross-Domain Classification of Drowsiness in Speech: The Case of Alcohol Intoxication and Sleep Deprivation&lt;br&gt;&lt;small&gt;Yue Zhang; Felix Weninger; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Depression Detection Using Automatic Transcriptions of De-Identified Speech&lt;br&gt;&lt;small&gt;Paula Lopez-Otero; Laura Docio-Fernandez; Alberto Abad; Carmen Garcia-Mateo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Emotional Speech of Mentally and Physically Disabled Individuals: Introducing The EmotAsS Database and First Findings&lt;br&gt;&lt;small&gt;Simone Hantke; Hesam Sagha; Nicholas Cummins; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Exploiting Intra-annotator Rating Consistency through Copeland's Method for Estimation of Ground Truth Labels in Couples' Therapy&lt;br&gt;&lt;small&gt;Karel Mundnich; Md Nasir; Panayiotis Georgiou; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Float Like a Butterfly Sting Like a Bee: Changes in Speech Preceded Parkinsonism Diagnosis for Muhammad Ali&lt;br&gt;&lt;small&gt;Visar Berisha; Julie Liss; Timothy Huston; Alan Wisler; Yishan Jiao; Jonathan Eig&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Manual and Automatic Transcriptions in Dementia Detection from Speech&lt;br&gt;&lt;small&gt;Jochen Weiner; Mathis Engelbart; Tanja Schultz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Phonological markers of Oxytocin and MDMA ingestion&lt;br&gt;&lt;small&gt;Carla Agurto; Raquel Norel; Rachel Ostrand; Gillinder Bedi; Harriet de Wit; Matthew J. Baggott; Matthew G. Kirkpatrick; Margaret Wardle; Guillermo Cecchi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Rhythmic Characteristics of Parkinsonian Speech: A Study on Mandarin and Polish&lt;br&gt;&lt;small&gt;Massimo Pettorino; Wentao Gu; Pawe\u0142 P\u00f3\u0142rola; Ping Fan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Paralinguistics in Speech and Language\"\n                data-category-ids=\"1052\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Disorders Related to Speech and Language<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4335\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4335\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Noise Robust Speech Recognition\"\n                data-time=\"13:30-15:30\"\n                data-room=\"A2\"\n                data-room-id=\"1064\"\n                data-room-name=\"A2\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Yifan Gong; Izhak Shafran&lt;br&gt;&lt;br&gt;13.30-13.50 - Speech Representation Learning Using Unsupervised Data-Driven Modulation Filtering for Robust ASR&lt;br&gt;&lt;small&gt;Purvi Agrawal; Sriram Ganapathy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Combined Multi-channel NMF-based Robust Beamforming for Noisy Speech Recognition&lt;br&gt;&lt;small&gt;Masato Mimura; Yoshiaki Bando; Kazuki Shimada; Shinsuke Sakai; Kazuyoshi Yoshii; Tatsuya Kawahara&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Recognizing Multi-talker Speech with Permutation Invariant Training&lt;br&gt;&lt;small&gt;Dong Yu; Xuankai Chang; Yanmin Qian&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Coupled initialization of multi-channel non-negative matrix factorization based on spatial and spectral information&lt;br&gt;&lt;small&gt;Yuuki Tachioka; Tomohiro Narita; Iori Miura; Takanobu Uramoto; Natsuki Monta; Shingo Uenohara; Ken'ichi Furuya; Shinji Watanabe; Jonathan Le Roux&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Channel Compensation in the Generalised Vector Taylor Series Approach to Robust ASR&lt;br&gt;&lt;small&gt;Erfan Loweimi; Jon Barker; Thomas Hain&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Robust Speech Recognition Via Anchor Word Representations&lt;br&gt;&lt;small&gt;Brian King; I-Fan Chen; Yonatan Vaizman; Yuzong Liu; Roland Maas; SHK (Hari) Parthasarathi; Bjorn Hoffmeister&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n                data-category-ids=\"1058\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Noise Robust Speech Recognition<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">A2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4349\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4349\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Music and Audio Processing\"\n                data-time=\"13:30-15:30\"\n                data-room=\"Poster 3\"\n                data-room-id=\"1069\"\n                data-room-name=\"Poster 3\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Unto Laine; Rohit Sinha&lt;br&gt;&lt;br&gt;13.30-15.30 - Acoustic Scene Classification using a CNN-SuperVector system trained with Auditory and Spectrogram Image Features&lt;br&gt;&lt;small&gt;Rakib Hyder; Shabnam Ghaffarzadegan; Zhe Feng; John H.L. Hansen; Taufiq Hasan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - A Domain Knowledge-Assisted Nonlinear Model for Head-Related Transfer Functions Based on Bottleneck Deep Neural Network&lt;br&gt;&lt;small&gt;Xiaoke Qi; Jianhua Tao&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - An audio based piano performance evaluation method using deep neural network based acoustic modeling&lt;br&gt;&lt;small&gt;Jing Pan; Ming Li; Zhanmei Song; Xin Li; Xiaolin Liu; Hua Yi; Manman Zhu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - AN ENVIRONMENTAL FEATURE REPRESENTATION FOR ROBUST SPEECH RECOGNITION AND FOR ENVIRONMENT IDENTIFICATION&lt;br&gt;&lt;small&gt;Xue Feng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - A Note Based Query By Humming System using Convolutional Neural Network&lt;br&gt;&lt;small&gt;Naziba Mostafa; Pascale Fung&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - A Transfer Learning Based Feature Extractor for Polyphonic Sound Event Detection Using Connectionist Temporal Classification&lt;br&gt;&lt;small&gt;Yun Wang; Florian Metze&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Attention and Localization based on a Deep Convolutional Recurrent Model for Weakly Supervised Audio Tagging&lt;br&gt;&lt;small&gt;Yong Xu; Qiuqiang Kong; Qiang Huang; Wenwu Wang; Mark D. Plumbley&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Audio Scene Classification with Deep Recurrent Neural Networks&lt;br&gt;&lt;small&gt;Huy Phan; Philipp Koch; Fabrice Katzberg; Marco Maass; Radoslaw Mazur; Alfred Mertins&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Automatic time-frequency analysis of echolocation signals using the matched Gaussian multitaper spectrogram&lt;br&gt;&lt;small&gt;Maria Sandsten; Isabella Reinhold; Josefin Starkhammar&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Classification-Based Detection of Glottal Closure Instants from Speech Signals&lt;br&gt;&lt;small&gt;Jindrich Matousek; Daniel Tihelka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Laryngeal Articulation during Trumpet Performance: An Exploratory Study&lt;br&gt;&lt;small&gt;Luis M.T. Jesus; Bruno Rocha; Andreia Hall&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Matrix of Polynomials Model based Polynomial Dictionary Learning Method for Acoustic Impulse Response Modeling&lt;br&gt;&lt;small&gt;Jian Guan; Xuan Wang; Pengming Feng; Jing Dong; Wenwu Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Music Tempo Estimation Using Sub-band Synchrony&lt;br&gt;&lt;small&gt;Shreyan Chowdhury; Tanaya Guha; Rajesh Hegde&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Novel Shifted Real Spectrum for Exact Signal Reconstruction&lt;br&gt;&lt;small&gt;Meet Soni; Rishabh Tak; Hemant Patil&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Sinusoidal Partials Tracking for Singing Analysis Using the Heuristic of the Minimal Frequency and Magnitude Difference&lt;br&gt;&lt;small&gt;Kin Wah Edward Lin; Hans Anderson; Clifford So; Simon Lui&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Unsupervised Filterbank Learning Using Convolutional Restricted Boltzmann Machine for Environmental Sound Classification&lt;br&gt;&lt;small&gt;Hardik Sailor; Dharmesh Agrawal; Hemant Patil&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Speech and Audio Signals\"\n                data-category-ids=\"1062\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Music and Audio Processing<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 3<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4334\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4334\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Language Recognition\"\n                data-time=\"13:30-15:30\"\n                data-room=\"E10\"\n                data-room-id=\"1060\"\n                data-room-name=\"E10\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Yao Qian; Vidhyasaharan Sethu&lt;br&gt;&lt;br&gt;13.30-13.50 - Spoken Language Identification using LSTM-based Angular Proximity&lt;br&gt;&lt;small&gt;Gregory Gelly; Jean-Luc Gauvain&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - End-to-End Language Identification Using High-Order Utterance Representation with Bilinear Pooling&lt;br&gt;&lt;small&gt;Ma Jin; Yan Song; Ian McLoughlin; Wu Guo; Lirong Dai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Dialect Recognition Based on Unsupervised Bottleneck Features&lt;br&gt;&lt;small&gt;Qian Zhang; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Investigating Scalability in Hierarchical Language Identification System&lt;br&gt;&lt;small&gt;Saad Irtza; Vidhyasaharan Sethu; Eliathamby Ambikairajah; Haizhou Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Improving Sub-phone Modeling for Better Native Language Identification with Non-native English Speech&lt;br&gt;&lt;small&gt;Yao Qian; Keelan Evanini; Xinhao Wang; David Suendermann-Oeft; Robert A Pugh; Patrick L Lange; Hillary R Molloy; Frank K Soong&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - QMDIS: QCRI-MIT Advanced Dialect Identification System&lt;br&gt;&lt;small&gt;Sameer Khurana; Maryam Najafian; Ahmed Ali; Tuka Al Hanai; Yonatan Belinkov; James Glass&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speaker and Language Identification\"\n                data-category-ids=\"1054\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Language Recognition<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E10<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4357\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4357\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Show & Tell 5\"\n                data-time=\"13:30-15:30\"\n                data-room=\"E306\"\n                data-room-id=\"1070\"\n                data-room-name=\"E306\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"13.30-15.30 - A Thematicity-based Prosody Enrichment Tool for CTS&lt;br&gt;&lt;small&gt;Monica Dominguez; Mireia Farr\u00fas; Leo Wanner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Creating a Voice for MiRo, the World\u2019s First Commercial Biomimetic Robot&lt;br&gt;&lt;small&gt;Roger Moore; Ben Mitchinson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - SIAK - A Game for Foreign Language Pronunciation Learning&lt;br&gt;&lt;small&gt;Reima Karhila; Sari Ylinen; Seppo Enarvi; Kalle Palom\u00e4ki; Aleksander Nikulin; Olli Rantula; Vertti Viitanen; Krupakar Dhinakaran; Anna-Riikka Smolander; Heini Kallio; Maria Uther; Katja Junttila; Perttu H\u00e4m\u00e4l\u00e4inen; Mikko Kurimo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - TBT(Toolkit to Build TTS): A High Performance Framework to build Multiple Language HTS Voice&lt;br&gt;&lt;small&gt;Atish Ghone; Rachana Nerpagar; Pranaw Kumar; Arun Baby; Aswin Shanmugam; Sasikumar Mukundan; Hema Murthy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - WebSubDub - Experimental system for creating high-quality alternative audio track for TV broadcasting&lt;br&gt;&lt;small&gt;Martin Gr\u016fber; Jindrich Matousek; Zden\u011bk Hanzl\u00ed\u010dek; Jakub V\u00edt; Daniel Tihelka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Voice Conservation and TTS System for People Facing Total Laryngectomy&lt;br&gt;&lt;small&gt;Mark\u00e9ta J\u016fzov\u00e1; Daniel Tihelka; Jindrich Matousek; Zdenek Hanzlicek&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Show & Tell\"\n                data-category-ids=\"1063\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #EEA2A2;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Show & Tell 5<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E306<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4321\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4321\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speaker Comparison for Forensic and Investigative Applications 3\"\n                data-time=\"13:30-15:30\"\n                data-room=\"B3\"\n                data-room-id=\"1072\"\n                data-room-name=\"B3\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4321.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Special event\"\n                data-category-ids=\"1064\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speaker Comparison for Forensic and Investigative Applications 3<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B3<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4338\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4338\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Lexical and Pronunciation Modeling\"\n                data-time=\"13:30-15:30\"\n                data-room=\"D8\"\n                data-room-id=\"1062\"\n                data-room-name=\"D8\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Izhak Shafran; Helen Meng&lt;br&gt;&lt;br&gt;13.30-13.50 - Multitask Sequence-to-Sequence Models for Grapheme-to-Phoneme Conversion&lt;br&gt;&lt;small&gt;Benjamin Milde; Christoph Schmidt; Joachim K\u00f6hler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Acoustic data-driven lexicon learning based on a greedy pronunciation selection framework&lt;br&gt;&lt;small&gt;Xiaohui Zhang; Vimal Manohar; Dan Povey; Sanjeev Khudanpur&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Semi-Supervised Learning of a Pronunciation Dictionary from Disjoint Phonemic Transcripts and Text&lt;br&gt;&lt;small&gt;Takahiro Shinozaki; Shinji Watanabe; Daichi Mochihashi; Graham Neubig&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Improved subword modeling for WFST-based speech recognition&lt;br&gt;&lt;small&gt;Peter Smit; Sami Virpioja; Mikko Kurimo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Pronunciation learning with RNN-transducers&lt;br&gt;&lt;small&gt;Antoine Bruguier; Danushen Gnanapragasam; Leif Johnson; Kanishka Rao; Francoise Beaufays&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Learning Similarity Functions for Pronunciation Variations&lt;br&gt;&lt;small&gt;Einat Naaman; Yossi Adi; Joseph Keshet&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Architecture, Search, and Linguistic Components\"\n                data-category-ids=\"1061\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Lexical and Pronunciation Modeling<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">D8<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4362\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4362\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Computational Models in Child Language Acquisition\"\n                data-time=\"13:30-15:30\"\n                data-room=\"F11\"\n                data-room-id=\"1059\"\n                data-room-name=\"F11\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Alejandrina Cristia; Kristina Nilsson Bj\u00f6rkenstam&lt;br&gt;&lt;br&gt;13.30-13.50 - Relating unsupervised word segmentation to reported vocabulary acquisition&lt;br&gt;&lt;small&gt;Elin Larsen; Alejandrina Cristia; Emmanuel Dupoux&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Approximating phonotactic input in children\u2019s linguistic environments from orthographic transcripts&lt;br&gt;&lt;small&gt;Sofia Str\u00f6mbergsson; Jens Edlund; Jana G\u00f6tze; Kristina Nilsson Bj\u00f6rkenstam&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Computational simulations of temporal vocalization behavior in adult-child interaction&lt;br&gt;&lt;small&gt;Ellen Marklund; David Pagmar; Tove Gerholm; Lisa Gustavsson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Modelling the Informativeness of Non-Verbal Cues in Parent\u2013Child Interaction&lt;br&gt;&lt;small&gt;Mats Wir\u00e9n; Kristina Nilsson Bj\u00f6rkenstam; Robert \u00d6stling&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Learning weakly-supervised multimodal phoneme embeddings&lt;br&gt;&lt;small&gt;Rahma Chaabouni; Ewan Dunbar; Neil Zeghidour; Emmanuel Dupoux&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Multi-Task Learning for Mispronunciation Detection on Singapore Children\u2019s Mandarin Speech&lt;br&gt;&lt;small&gt;Rong Tong; Nancy Chen; Bin Ma&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Perception, Production and Acquisition\"\n                data-category-ids=\"1055\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Computational Models in Child Language Acquisition<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">F11<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4336\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4336\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Topic Spotting, Entity Extraction and Semantic Analysis\"\n                data-time=\"13:30-15:30\"\n                data-room=\"B4\"\n                data-room-id=\"1065\"\n                data-room-name=\"B4\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Ville Hautamaki; Lin-shan Lee&lt;br&gt;&lt;br&gt;13.30-13.50 - Towards Zero-Shot Frame Semantic Parsing for Domain Scaling&lt;br&gt;&lt;small&gt;Ankur Bapna; Gokhan Tur; Dilek Hakkani-Tur; Larry Heck&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - ClockWork-RNN based architectures for Slot Filling&lt;br&gt;&lt;small&gt;Despoina Georgiadou; Vassilios Diakoloukas; Vassilios Tsiaras; Vassilios Digalakis&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Investigating the Effect of ASR tuning on Named Entity Recognition&lt;br&gt;&lt;small&gt;Mohamed Ben Jannet; Olivier Galibert; Martine Adda-Decker; Sophie Rosset&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Label-dependency coding in Simple Recurrent Networks for Spoken Language Understanding&lt;br&gt;&lt;small&gt;Marco Dinarelli; Vedran Vukotic; Christian Raymond&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Minimum Semantic Error Cost Training of Deep Long Short-Term Memory Networks for Topic Spotting on Conversational Speech&lt;br&gt;&lt;small&gt;Zhong Meng; Biing-Hwang (Fred) Juang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Topic Identification for Speech without ASR&lt;br&gt;&lt;small&gt;Chunxi Liu; Jan Trmal; Matthew Wiesner; Craig Harman; Sanjeev Khudanpur&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n                data-category-ids=\"1053\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Topic Spotting, Entity Extraction and Semantic Analysis<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4337\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4337\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Dialog Systems\"\n                data-time=\"13:30-15:30\"\n                data-room=\"C6\"\n                data-room-id=\"1066\"\n                data-room-name=\"C6\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Gabriel Skantze; Timo Baumann&lt;br&gt;&lt;br&gt;13.30-13.50 - An End-to-End Trainable Neural Network Model with Belief Tracking for Task-Oriented Dialog&lt;br&gt;&lt;small&gt;Bing Liu; Ian Lane&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Deep Reinforcement Learning of Dialogue Policies with Less Weight Updates&lt;br&gt;&lt;small&gt;Heriberto Cuayahuitl; Seunghak Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Towards End-to-End Spoken Dialogue Systems with Turn Embeddings&lt;br&gt;&lt;small&gt;Ali Orkan Bayer; Evgeny Stepanov; Giuseppe Riccardi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Speech and Text Analysis for Multimodal Addressee Detection in Human-Human-Computer Interaction&lt;br&gt;&lt;small&gt;Oleg Akhtiamov; Maxim Sidorov; Alexey Karpov; Wolfgang Minker&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Rushing to Judgement: How Do Laypeople Rate Caller Engagement in Thin-Slice Videos of Human--Machine Dialog?&lt;br&gt;&lt;small&gt;Vikram Ramanarayanan; Chee Wee (Ben) Leong; David Suendermann-Oeft&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Hyperarticulation of Corrections in Multilingual Dialogue Systems&lt;br&gt;&lt;small&gt;Ivan Kraljevski; Diane Hirschfeld&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Spoken Dialog Systems and Analysis of Conversation\"\n                data-category-ids=\"1051\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Dialog Systems<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">C6<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4358\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4358\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Show & Tell 6\"\n                data-time=\"13:30-15:30\"\n                data-room=\"E397\"\n                data-room-id=\"1071\"\n                data-room-name=\"E397\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"13.30-15.30 - A Robust Medical Speech-to-Speech\/Speech-to-Sign Phraselator&lt;br&gt;&lt;small&gt;Farhia Ahmed; Pierrette Bouillon; Chelle Destefano; Johanna Gerlach; Sonia Halimi; Angela Hooper; Manny Rayner; Herv\u00e9 Spechbach; Irene Strasly; Nikos Tsourakis&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Integrating the Talkamatic Dialogue Manager with Alexa&lt;br&gt;&lt;small&gt;Staffan Larsson; Fredrik Kronlid; Andreas Krona; Alex Berman&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Modifying Amazon\u2019s Alexa ASR Grammar and Lexicon \u2013 A Case Study&lt;br&gt;&lt;small&gt;Aman Kumar; Hassan Alam; Manan Vyas; Tina Werner; Rachmat Hartono&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Nora the Empathetic Psychologist&lt;br&gt;&lt;small&gt;Genta Indra Winata; Onno Kampman; Yang Yang; Anik Dey; Pascale Fung&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Towards an Autarkic Embedded Cognitive User Interface&lt;br&gt;&lt;small&gt;Frank Duckhorn; Markus Huber; Werner Meyer; Oliver Jokisch; Constanze Tsch\u00f6pe; Matthias Wolff&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Show & Tell\"\n                data-category-ids=\"1063\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #EEA2A2;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Show & Tell 6<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E397<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4333\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4333\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Cognition and Brain Studies\"\n                data-time=\"13:30-15:30\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Odette Scharenborg; Isabel Trancoso&lt;br&gt;&lt;br&gt;13.30-13.50 - An entrained rhythm's frequency, not phase, influences temporal sampling of speech&lt;br&gt;&lt;small&gt;Hans Rutger Bosker; Anne K\u00f6sem&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Context regularity indexed by auditory N1 and P2 event-related potentials&lt;br&gt;&lt;small&gt;Xiao Wang; Yanhui Zhang; Gang Peng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Discovering Language in Marmoset Vocalization&lt;br&gt;&lt;small&gt;Sakshi Verma; Lok Prateek Kotha; Karthik Pandia D S; Nauman Dawalatabad; Rogier Landman; Jitendra Sharma; Mriganka Sur; Hema Murthy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Subject-independent Classification of Japanese Spoken Sentences by Multiple Frequency Bands Phase Pattern of EEG Response during Speech Perception&lt;br&gt;&lt;small&gt;Hiroki Watanabe; Hiroki Tanaka; Sakriani Sakti; Satoshi Nakamura&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - The phonological status of the French Initial Accent and its role in semantic processing: an Event-Related Potentials study&lt;br&gt;&lt;small&gt;Noemie te Rietmolen; Radouane El Yagoubi; Alain Ghio; Corine Ast\u00e9sano&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - A Neuro-Experimental Evidence for the Motor Theory of Speech Perception&lt;br&gt;&lt;small&gt;Bin Zhao; Jianwu Dang; Gaoyan Zhang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Perception, Production and Acquisition\"\n                data-category-ids=\"1055\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Cognition and Brain Studies<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5259\">\n\n              <div class='time-header'>15:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"5259\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Refreshments\"\n                data-time=\"15:30-16:00\"\n                data-room=\"Various locations\"\n                data-room-id=\"1079\"\n                data-room-name=\"Various locations\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Misc\"\n                data-category-ids=\"1068\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Refreshments<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Various locations<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"15:30\">15:30-16:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4342\">\n\n              <div class='time-header'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4342\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Multi-channel Speech Enhancement\"\n                data-time=\"16:00-18:00\"\n                data-room=\"C6\"\n                data-room-id=\"1066\"\n                data-room-name=\"C6\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Hynek Boril; Reinhold Haeb-Umbach&lt;br&gt;&lt;br&gt;16.00-16.20 - Tight integration of spatial and spectral features for BSS with Deep Clustering embeddings&lt;br&gt;&lt;small&gt;Lukas Drude; Reinhold Haeb-Umbach&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Speaker-aware neural network based beamformer for speaker extraction in speech mixtures&lt;br&gt;&lt;small&gt;Katerina Zmolikova; Marc Delcroix; Keisuke Kinoshita; Takuya Higuchi; Atsunori Ogawa; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Eigenvector-based Speech Mask Estimation using Logistic Regression&lt;br&gt;&lt;small&gt;Lukas Pfeifenberger; Matthias Z\u00f6hrer; Franz Pernkopf&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Real-time Speech Enhancement with GCC-NMF&lt;br&gt;&lt;small&gt;Sean Wood; Jean Rouat&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - Coherence-based dual-channel noise reduction algorithm in a complex noisy environment&lt;br&gt;&lt;small&gt;Youna Ji; Jun Byun; Young-cheol Park&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Glottal Model Based Speech Beamforming for Ad-Hoc Microphone Arrays&lt;br&gt;&lt;small&gt;Yang Zhang; Dinei Florencio; Mark Hasegawa-Johnson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Coding and Enhancement\"\n                data-category-ids=\"1060\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Multi-channel Speech Enhancement<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">C6<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4354\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4354\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Voice Conversion 2\"\n                data-time=\"16:00-18:00\"\n                data-room=\"Poster 4\"\n                data-room-id=\"1068\"\n                data-room-name=\"Poster 4\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Chandra Sekhar Seelamantula&lt;br&gt;&lt;br&gt;16.00-18.00 - CAB: An Energy-Based Speaker Clustering Model for Rapid Adaptation in Non-Parallel Voice Conversion&lt;br&gt;&lt;small&gt;Toru Nakashika&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Denoising Recurrent Neural Network for Deep Bidirectional LSTM based Voice Conversion&lt;br&gt;&lt;small&gt;Jie Wu; Dongyan Huang; Lei Xie; Haizhou Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Emotional Voice Conversion with Adaptive Scales F0 based on Wavelet Transform using Limited Amount of Emotional Data&lt;br&gt;&lt;small&gt;Zhaojie Luo; Jinhui Chen; Tetsuya Takiguchi; Yasuo Ariki&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Generative adversarial network-based glottal waveform model for statistical parametric speech synthesis&lt;br&gt;&lt;small&gt;Bajibabu Bollepalli; Lauri Juvela; Paavo Alku&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Generative Adversarial Network-based Postfilter for STFT Spectrograms&lt;br&gt;&lt;small&gt;Takuhiro Kaneko; Shinji Takaki; Hirokazu Kameoka; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Phoneme-Discriminative Features for Dysarthric Speech Conversion&lt;br&gt;&lt;small&gt;Ryo Aihara; Tetsuya Takiguchi; Yasuo Ariki&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Segment Level Voice Conversion with Recurrent Neural Networks&lt;br&gt;&lt;small&gt;Miguel Ramos; Alan W Black; Ram\u00f3n Astudillo; Isabel Trancoso; Nuno Fonseca&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speaker adaptation in DNN-based speech synthesis using d-vectors&lt;br&gt;&lt;small&gt;Rama Sanand Doddipatla; Norbert Braunschweiler; Ranniery Maia&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speaker Dependent Approach for Enhancing a Glossectomy Patient's Speech via GMM-based Voice Conversion&lt;br&gt;&lt;small&gt;Kei Tanaka; Sunao Hara; Masanobu Abe; Masaaki Sato; Shogo Minagi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Spectro-Temporal Modelling with Time-Frequency LSTM and Structured Output Layer for Voice Conversion&lt;br&gt;&lt;small&gt;Runnan Li; Zhiyong Wu; Yishuang Ning; Lifa Sun; Helen Meng; Lianhong Cai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Voice Conversion from Unaligned Corpora Using Variational Autoencoding Wasserstein Generative Adversarial Networks&lt;br&gt;&lt;small&gt;Chin-Cheng Hsu; Hsin-Te Hwang; YICHIAO WU; Yu Tsao; Hsin-Min Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Synthesis and Spoken Language Generation\"\n                data-category-ids=\"1059\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Voice Conversion 2<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4340\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4340\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Language models for ASR\"\n                data-time=\"16:00-18:00\"\n                data-room=\"E10\"\n                data-room-id=\"1060\"\n                data-room-name=\"E10\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Yannick Est\u00e8ve; Dilek Hakkani-T\u00fcr&lt;br&gt;&lt;br&gt;16.00-16.20 - Effectively Building Tera Scale MaxEnt Language Models Incorporating Non-Linguistic Signals&lt;br&gt;&lt;small&gt;Fadi Biadsy; Mohammadreza Ghodsi; Diamantino Caseiro&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Semi-supervised Adaptation of RNNLMs by Fine-tuning with Domain-specific Auxiliary Features&lt;br&gt;&lt;small&gt;Salil Deena; Raymond W. M. Ng; Pranava Madhyastha; Lucia Specia; Thomas Hain&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Approximated and domain-adapted LSTM language models for first-pass decoding in speech recognition&lt;br&gt;&lt;small&gt;Mittul Singh; Youssef Oualil; Dietrich Klakow&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Sparse Non-negative Matrix Language Modeling: Maximum Entropy Flexibility on the Cheap&lt;br&gt;&lt;small&gt;Ciprian Chelba; Diamantino Caseiro; Fadi Biadsy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - Multi-scale Context Adaptation for Improving Child Automatic Speech Recognition in Child-Adult Spoken Interactions&lt;br&gt;&lt;small&gt;Manoj Kumar; Daniel Bone; Kelly McWilliams; Shanna Williams; Thomas Lyon; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Using Knowledge Graph And Search Query Click Logs in Statistical Language Model For Speech Recognition&lt;br&gt;&lt;small&gt;Weiwu Zhu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Architecture, Search, and Linguistic Components\"\n                data-category-ids=\"1061\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Language models for ASR<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E10<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4339\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4339\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speaker Database and Anti-spoofing\"\n                data-time=\"16:00-18:00\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Nicholas Evans; Karthika Vijayan&lt;br&gt;&lt;br&gt;16.00-16.20 - Detection of Replay Attacks using Single Frequency Filtering Cepstral Coefficients&lt;br&gt;&lt;small&gt;K N R K Raju Alluri; Sivanand Achanta; Sudarsana Reddy Kadiri; Suryakanth V Gangashetty; Anil Kumar Vuppala&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Unsupervised Representation Learning Using Convolutional Restricted Boltzmann Machine for Spoof Speech Detection&lt;br&gt;&lt;small&gt;Hardik Sailor; Madhu Kamble; Hemant Patil&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Independent Modelling of High and Low Energy Speech Frames for Spoofing Detection&lt;br&gt;&lt;small&gt;Gajan Suthokumar; Kaavya Sriskandaraja; Vidhyasaharan Sethu; Chamith Wijenayake; Eliathamby Ambikairajah&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Improving Speaker Verification Performance in Presence of Spoofing Attacks Using Out-of-Domain Spoofed Data&lt;br&gt;&lt;small&gt;Achintya Sarkar; Md Sahidullah; Zheng-Hua Tan; Tomi Kinnunen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - VoxCeleb: A large-scale speaker identification dataset&lt;br&gt;&lt;small&gt;Arsha Nagrani; Joon Son Chung; Andrew Zisserman&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Call My Net Corpus: A Multilingual Corpus for Evaluation of Speaker Recognition Technology&lt;br&gt;&lt;small&gt;Karen Jones; Stephanie Strassel; Kevin Walker; David Graff; Jonathan Wright&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speaker and Language Identification\"\n                data-category-ids=\"1054\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speaker Database and Anti-spoofing<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4353\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4353\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Language Understanding and Generation\"\n                data-time=\"16:00-18:00\"\n                data-room=\"Poster 3\"\n                data-room-id=\"1069\"\n                data-room-name=\"Poster 3\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Jose David Lopes; Heriberto Cuayahuitl&lt;br&gt;&lt;br&gt;16.00-18.00 - ASR error management for improving spoken language understanding&lt;br&gt;&lt;small&gt;Edwin Simonnet; Sahar Ghannay; Nathalie Camelin; Yannick Est\u00e8ve; Renato de Mori&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Character-based Embedding Models and Reranking Strategies for Understanding Natural Language Meal Descriptions&lt;br&gt;&lt;small&gt;Mandy Korpusik; Zachary Collins; James Glass&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Internal Memory Gate for Recurrent Neural Networks with Application to Spoken Language Understanding&lt;br&gt;&lt;small&gt;Mohamed Morchid&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Jointly Trained Sequential Labeling and Classification by Sparse Attention Neural Networks&lt;br&gt;&lt;small&gt;Mingbo Ma; Kai Zhao; Liang Huang; Bing Xiang; Bowen Zhou&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Online adaptation of an attention-based neural network for natural language generation&lt;br&gt;&lt;small&gt;Matthieu Riou; Bassam Jabaian; St\u00e9phane Huet; Fabrice Lefevre&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - OpenMM: An Open-source Multimodal Feature Extraction Tool&lt;br&gt;&lt;small&gt;Michelle Morales; Stefan Scherer; Rivka Levitan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Parallel Hierarchical Attention Networks with Shared Memory Reader for Multi-Stream Conversational Document Classification&lt;br&gt;&lt;small&gt;Naoki Sawada; Ryo Masumura; Hiromitsu Nishizaki&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Quaternion Denoising Encoder-Decoder for Theme Identification of Telephone Conversations&lt;br&gt;&lt;small&gt;Titouan Parcollet; Mohamed Morchid; Georges Linares&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Spanish Sign Language Recognition with Different Topology Hidden Markov Models&lt;br&gt;&lt;small&gt;Carlos-D. Mart\u00ednez-Hinarejos; Zuzanna Parcheta&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speaker Dependency Analysis, Audiovisual Fusion Cues and A Multimodal BLSTM for Conversational Engagement Recognition&lt;br&gt;&lt;small&gt;Yuyun Huang; Emer Gilmartin; Nick Campbell&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - To Plan or not to Plan? Discourse planning in slot-value informed sequence to sequence models for language generation&lt;br&gt;&lt;small&gt;Neha Nayak; Dilek Hakkani-Tur; Marilyn Walker; Larry Heck&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Zero-shot Learning for Natural Language Understanding using Domain-Independent Sequential Structure and Question Types&lt;br&gt;&lt;small&gt;Kugatsu Sadamitsu; Yukinori Homma; Ryuichiro Higashinaka; Yoshihiro Matsuo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Spoken Dialog Systems and Analysis of Conversation\"\n                data-category-ids=\"1051\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Language Understanding and Generation<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 3<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4341\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4341\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech Translation\"\n                data-time=\"16:00-18:00\"\n                data-room=\"B4\"\n                data-room-id=\"1065\"\n                data-room-name=\"B4\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Nicholas Ruiz; Roland Kuhn&lt;br&gt;&lt;br&gt;16.00-16.20 - Sequence-to-Sequence Models Can Directly Translate Foreign Speech&lt;br&gt;&lt;small&gt;Ron Weiss; Jan Chorowski; Navdeep Jaitly; Yonghui Wu; Zhifeng Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Structured-based Curriculum Learning for End-to-end English-Japanese Speech Translation&lt;br&gt;&lt;small&gt;Takatomo Kano; Sakriani Sakti; Satoshi Nakamura&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Assessing the tolerance of Neural Machine Translation systems against Speech Recognition Errors&lt;br&gt;&lt;small&gt;Nicholas Ruiz; Mattia Antonino Di Gangi; Nicola Bertoldi; Marcello Federico&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Toward Expressive Speech Translation: A Unified Sequence-to-Sequence LSTMs Approach for Translating Words and Emphasis&lt;br&gt;&lt;small&gt;Quoc Truong Do; Sakriani Sakti; Satoshi Nakamura&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - NMT-based Segmentation and Punctuation Insertion for Real-time Spoken Language Translation&lt;br&gt;&lt;small&gt;Eunah Cho; Jan Niehues; Alex Waibel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n                data-category-ids=\"1053\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech Translation<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4343\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4343\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech Recognition: Applications in Medical Practice\"\n                data-time=\"16:00-18:00\"\n                data-room=\"D8\"\n                data-room-id=\"1062\"\n                data-room-name=\"D8\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Phil Green; Torbj\u00f8rn Svendsen&lt;br&gt;&lt;br&gt;16.00-16.20 - Acoustic Assessment of Disordered Voice with Continuous Speech Based on Utterance-level ASR Posterior Features&lt;br&gt;&lt;small&gt;Yuanyuan Liu; Tan Lee; P.C. Ching; Thomas K.T. Law; Kathy Y.S. Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Multi-Stage DNN Training for Automatic Recognition of Dysarthric Speech&lt;br&gt;&lt;small&gt;Emre Yilmaz; Mario Ganzeboom; Catia Cucchiarini; Helmer Strik&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Improving child speech disorder assessment by incorporating out-of-domain adult speech&lt;br&gt;&lt;small&gt;Daniel Smith; Alex Sneddon; Lauren Ward; Andreas Duenser; Jill Freyne; David Silvera-Tawil; Angela Morgan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - On Improving Acoustic Models For TORGO Dysarthric Speech Database&lt;br&gt;&lt;small&gt;Neethu Mariam Joy; Srinivasan Umesh; Basil Abraham&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - Glottal Source Features for Automatic Speech-based Depression Assessment&lt;br&gt;&lt;small&gt;Olympia Simantiraki; Paulos Charonyktakis; Anastasia Pampouchidou; Manolis Tsiknakis; Martin Cooke&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Speech Processing Approach for Diagnosing Dementia in an Early Stage&lt;br&gt;&lt;small&gt;Roozbeh Sadeghian; J. David Schaffer; Stephen Zahorian&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Technologies and Systems for New Applications\"\n                data-category-ids=\"1066\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech Recognition: Applications in Medical Practice<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">D8<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4352\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4352\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speaker States and Traits\"\n                data-time=\"16:00-18:00\"\n                data-room=\"Poster 2\"\n                data-room-id=\"1067\"\n                data-room-name=\"Poster 2\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Emily Provost&lt;br&gt;&lt;br&gt;16.00-18.00 - An Investigation of Emotion Dynamics and Kalman Filtering for Speech-based Emotion Prediction&lt;br&gt;&lt;small&gt;Zhaocheng Huang; Julien Epps&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Approaching Human Performance in Behavior Estimation in Couples Therapy Using Deep Sentence Embeddings&lt;br&gt;&lt;small&gt;Shao-Yen Tseng; Brian Baucom; Panayiotis Georgiou&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Attention Networks for Modeling Behavior in Addiction Counseling&lt;br&gt;&lt;small&gt;James Gibson; Dogan Can; Panayiotis Georgiou; David Atkins; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Big Five vs. Prosodic Features as Cues to Detect Abnormality in SSPNET-Personality Corpus&lt;br&gt;&lt;small&gt;C\u00e9dric Fayet; Arnaud Delhay; Damien Lolive; Pierre-Francois Marteau&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Bilingual Word Embeddings for Cross-Lingual Personality Recognition Using Convolutional Neural Nets&lt;br&gt;&lt;small&gt;Farhad Bin Siddique; Pascale Fung&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Complexity in speech and its relation to emotional bond in therapist-patient interactions during suicide risk assessment interviews&lt;br&gt;&lt;small&gt;Md Nasir; Brian Baucom; Craig J. Bryan; Shrikanth Narayanan; Panayiotis Georgiou&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Computational Analysis of Acoustic Descriptors in Psychotic Patients&lt;br&gt;&lt;small&gt;Torsten W\u00f6rtwein; Tadas Baltru\u0161aitis; Eugene Laksana; Luciana Pennant; Elizabeth Liebson; Dost \u00d6ng\u00fcr; Justin Baker; Louis-Philippe Morency&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Emotion category mapping to emotional space by cross-corpus emotion labeling&lt;br&gt;&lt;small&gt;Yoshiko Arimoto; Hiroki Mori&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Implementing gender-dependent vowel-level analysis for boosting speech-based depression recognition&lt;br&gt;&lt;small&gt;Bogdan Vlasenko; Hesam Sagha; Nicholas Cummins; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Modeling Perceivers Neural-Responses using Lobe-dependent Convolutional Neural Network to Improve Speech Emotion Recognition&lt;br&gt;&lt;small&gt;Ya-Tse Wu; Hsuan-Yu Chen; Yu-Hsien Liao; Li-Wei Kuo; Chi-Chun Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speech Rate Comparison when Talking to a System and Talking to a Human: A study from a Speech-to-Speech, Machine Translation mediated Map Task&lt;br&gt;&lt;small&gt;Akira Hayakawa; Carl Vogel; Saturnino Luz; Nick Campbell&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - The Perception of Emotions in Noisified Nonsense Speech&lt;br&gt;&lt;small&gt;Emilia Parada-Cabaleiro; Alice Baird; Anton Batliner; Nicholas Cummins; Simone Hantke; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Paralinguistics in Speech and Language\"\n                data-category-ids=\"1052\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speaker States and Traits<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4363\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4363\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Voice Attractiveness\"\n                data-time=\"16:00-18:00\"\n                data-room=\"F11\"\n                data-room-id=\"1059\"\n                data-room-name=\"F11\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"16.00-16.10 - Introduction&lt;br&gt;&lt;small&gt;Melissa Barkat-Defradas; John Ohala&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - A gender bias in the acoustic-melodic features of charismatic speech?&lt;br&gt;&lt;small&gt;Eszter Novak-Tot; Oliver Niebuhr; Aoju Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - Attractiveness of French voices for German listeners - results from native and non-native read speech&lt;br&gt;&lt;small&gt;Juergen Trouvain; Frank Zimmerer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - Does Posh English Sound Attractive?&lt;br&gt;&lt;small&gt;Li Jiao; Chengxia Wang; Cristiane Hsu; Peter Birkholz; Yi Xu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - Large-scale Speaker Ranking from Crowdsourced Pairwise Listener Ratings&lt;br&gt;&lt;small&gt;Timo Baumann&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - Perceptual Ratings of Voice Likability Collected through In-Lab Listening Tests vs. Mobile-Based Crowdsourcing&lt;br&gt;&lt;small&gt;Laura Fern\u00e1ndez Gallardo; Rafael Zequeira Jim\u00e9nez; Sebastian M\u00f6ller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - Personalized Quantification of Voice Attractiveness in Multidimensional Merit Space&lt;br&gt;&lt;small&gt;Yasunari Obuchi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - Pitch convergence as an effect of perceived attractiveness and likability&lt;br&gt;&lt;small&gt;Jan Michalsky; Heike Schoormann&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - Social Attractiveness in Dialogs&lt;br&gt;&lt;small&gt;Antje Schweitzer; Natalie Lewandowski; Daniel Duran&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - The role of temporal amplitude modulations in the political arena: Hillary Clinton vs. Donald Trump&lt;br&gt;&lt;small&gt;Hans Rutger Bosker&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Discussion&lt;br&gt;&lt;small&gt;&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Paralinguistics in Speech and Language\"\n                data-category-ids=\"1052\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Voice Attractiveness<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">F11<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4364\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4364\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Digital Revolution for Under-resourced Languages 3\"\n                data-time=\"16:00-18:00\"\n                data-room=\"A2\"\n                data-room-id=\"1064\"\n                data-room-name=\"A2\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4364.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Joseph Mariani; Sakriani Sakti&lt;br&gt;&lt;br&gt;&lt;p&gt;Panel discussion with panelists:&lt;\/p&gt;&lt;p&gt;&lt;\/p&gt;&lt;ul&gt;&lt;li&gt;Claudia Soria, SIG ELRA-LRL&lt;br&gt;&lt;\/li&gt;&lt;li&gt;Alexey Karpov, SLTU Board&lt;br&gt;&lt;\/li&gt;&lt;li&gt;Emmanuel Dupoux, Zero Resource Speech Challenge&lt;br&gt;&lt;\/li&gt;&lt;li&gt;Mary Harper, BABEL Program&lt;br&gt;&lt;\/li&gt;&lt;li&gt;Sebastian Stueker, BULB Project&lt;\/li&gt;&lt;li&gt;Sanjeev Khundapur, John-Hopkins Summer Workshops&lt;\/li&gt;&lt;li&gt;Linne Ha, Google Research&lt;\/li&gt;&lt;li&gt;Irmgarda Kasinkaite-Buddeberg, UNESCO&lt;\/li&gt;&lt;li&gt;Marja-Liisa Olthuis, North-Europe Representative&lt;\/li&gt;&lt;li&gt;Ulrike Janke, South-Africa Representative&lt;\/li&gt;&lt;li&gt;Shyam Agrawal, South-Asia Representative&lt;\/li&gt;&lt;\/ul&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n                data-category-ids=\"1053\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Digital Revolution for Under-resourced Languages 3<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">A2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4351\">\n\n              <div class='time-header' style='visibility: hidden;'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4351\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Prosody\"\n                data-time=\"16:00-18:00\"\n                data-room=\"Poster 1\"\n                data-room-id=\"1061\"\n                data-room-name=\"Poster 1\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Stefanie Jannedy&lt;br&gt;&lt;br&gt;16.00-18.00 - Canonical Correlation Analysis and Prediction of Perceived Rhythmic Prominences and Pitch Tones in Speech&lt;br&gt;&lt;small&gt;Elizabeth Godoy; James Williamson; Thomas Quatieri&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Creaky voice as a function of tonal categories and prosodic boundaries&lt;br&gt;&lt;small&gt;Jianjing Kuang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Evaluation of Spectral Tilt Measures for Sentence Prominence Under Different Noise Conditions&lt;br&gt;&lt;small&gt;Sofoklis Kakouros; Okko R\u00e4s\u00e4nen; Paavo Alku&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Exploring multidimensionality: Acoustic and articulatory correlates of Swedish word accents&lt;br&gt;&lt;small&gt;Malin Svensson Lundmark; Gilbert Ambrazaitis; Otto Ewald&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Focus Acoustics in Mandarin Nominals&lt;br&gt;&lt;small&gt;Yu-Yin Hsu; Anqi Xu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - How does the absence of shared knowledge between interlocutors affect the production of French prosodic forms?&lt;br&gt;&lt;small&gt;Amandine Michelas; C\u00e9cile Cau; Maud Champagne-Lavau&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Intonation of contrastive topic in Estonian&lt;br&gt;&lt;small&gt;Heete Sahkai; Meelis Mihkla&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Reanalyze Fundamental Frequency Peak Delay in Mandarin&lt;br&gt;&lt;small&gt;Lixia Hao; Wei Zhang; Yanlu Xie; Jinsong Zhang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - The Acoustics of Word Stress in Czech as a Function of Speaking Style&lt;br&gt;&lt;small&gt;Radek Skarnitzl; Anders Eriksson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - The Perception of English Intonation Patterns by German L2 speakers of English&lt;br&gt;&lt;small&gt;Karin Puga; Robert Fuchs; Jane Setter; Peggy Mok&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Three Dimensions of Sentence Prosody and their (Non-)Interactions&lt;br&gt;&lt;small&gt;Michael Wagner; Michael McAuliffe&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Trisyllabic tone 3 sandhi patterns in Mandarin produced by Cantonese speakers&lt;br&gt;&lt;small&gt;Jung-Yueh Tu; Janice Wing-Sze Wong; Jih-Ho Cha&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Using Prosody to Classify Discourse Relations&lt;br&gt;&lt;small&gt;Janine Kleinhans; Mireia Farr\u00fas; Agustin Gravano; Juan Manuel P\u00e9rez; Catherine Lai; Leo Wanner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - What You See Is What You Get Prosodically Less - Visibility Shapes Prosodic Prominence Production in Spontaneous Interaction&lt;br&gt;&lt;small&gt;Petra Wagner; Nataliya Bryhadyr&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Phonetics, Phonology, and Prosody\"\n                data-category-ids=\"1056\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Prosody<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 1<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-18:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5252\">\n\n              <div class='time-header'>19:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"5252\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Banquet\"\n                data-time=\"19:00-23:00\"\n                data-room=\"Tekniska Museet and Etnografiska museet\"\n                data-room-id=\"1081\"\n                data-room-name=\"Tekniska Museet and Etnografiska museet\"\n                data-day=\"3\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Social event\"\n                data-category-ids=\"1067\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #C9EE91;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Banquet<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Tekniska Museet and Etnografiska museet<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"19:00\">19:00-23:00 - Wednesday 23 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          <div class='list_header' data-day='4' style='background-color: rgba(32, 132, 196, 0.83); color: rgba(255, 255, 255, 1);'>Thursday 24 August<\/div>\n          <div class=\"box-wrapper\"\n              data-id=\"5248\">\n\n              <div class='time-header'>07:45<\/div>\n\n              <div class=\"box\"\n                data-id=\"5248\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Registration\"\n                data-time=\"07:45-17:00\"\n                data-room=\"S\u00f6dra Huset, House A\"\n                data-room-id=\"1112\"\n                data-room-name=\"S\u00f6dra Huset, House A\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"\"\n                data-category-ids=\"\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Registration<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">S\u00f6dra Huset, House A<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"07:45\">07:45-17:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4269\">\n\n              <div class='time-header'>08:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4269\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Keynote 3: Bj\u00f6rn Lindblom, Re-inventing speech \u2013 the biological way\"\n                data-time=\"08:30-09:30\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4269.html\"\n                data-speaker=\"Bj\u00f6rn Lindblom\"\n                data-speakercell=\"Bj\u00f6rn Lindblom\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Olov Engwall&lt;br&gt;&lt;br&gt;\n                 &lt;p&gt;The session will also be broadcasted (with two-way communication) to rooms A2 and C6.&lt;br&gt;&lt;\/p&gt;\n                &lt;br&gt;&lt;br&gt;\"\n                data-category=\"Keynote\"\n                data-category-ids=\"1057\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #72D9EE;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Keynote 3: Bj\u00f6rn Lindblom, Re-inventing speech \u2013 the biological way<\/span>\n               <br>\n               <div class=\"lecturer\"><span>Bj\u00f6rn Lindblom<\/span><\/div>\n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"08:30\">08:30-09:30 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5262\">\n\n              <div class='time-header'>09:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"5262\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Refreshments\"\n                data-time=\"09:30-10:00\"\n                data-room=\"Various locations\"\n                data-room-id=\"1079\"\n                data-room-name=\"Various locations\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Misc\"\n                data-category-ids=\"1068\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Refreshments<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Various locations<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"09:30\">09:30-10:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4276\">\n\n              <div class='time-header'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4276\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speaker Diarization\"\n                data-time=\"10:00-12:00\"\n                data-room=\"A2\"\n                data-room-id=\"1064\"\n                data-room-name=\"A2\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Eduardo Lleida; Kai Yu&lt;br&gt;&lt;br&gt;10.00-10.20 - Speaker Diarization Using Convolutional Neural Network for Statistics Accumulation Refinement&lt;br&gt;&lt;small&gt;Zbynek Zajic; Marek Hruz; Ludek Muller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Speaker2Vec: Unsupervised Learning and Adaptation of a Speaker Manifold using Deep Neural Networks with an Evaluation on Speaker Segmentation&lt;br&gt;&lt;small&gt;Arindam Jati; Panayiotis Georgiou&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - A Triplet Ranking-based Neural Network for Speaker Diarization and Linking&lt;br&gt;&lt;small&gt;Ga\u00ebl Le Lan; Delphine Charlet; Anthony Larcher; Sylvain Meignier&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Estimating Speaker Clustering Quality Using Logistic Regression&lt;br&gt;&lt;small&gt;Yishai Cohen; Itshak Lapidot&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Combining speaker turn embedding and incremental structure prediction for low-latency speaker diarization&lt;br&gt;&lt;small&gt;Guillaume Wisniewski; Herv\u00e9 Bredin; Gregory Gelly; Claude Barras&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - pyannote.metrics: a toolkit for reproducible evaluation, diagnostic, and error analysis of speaker diarization systems&lt;br&gt;&lt;small&gt;Herv\u00e9 Bredin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speaker and Language Identification\"\n                data-category-ids=\"1054\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speaker Diarization<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">A2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4278\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4278\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Noise Reduction\"\n                data-time=\"10:00-12:00\"\n                data-room=\"C6\"\n                data-room-id=\"1066\"\n                data-room-name=\"C6\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Yan Huang; Tim Fingscheidt&lt;br&gt;&lt;br&gt;10.00-10.20 - Deep Recurrent Neural Network based Monaural Speech Separation using Recurrent Temporal Restricted Boltzmann Machines&lt;br&gt;&lt;small&gt;Suman Samui; Indrajit Chakrabarti; Soumya Kanti Ghosh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Improved Codebook-based Speech Enhancement based on MBE Model&lt;br&gt;&lt;small&gt;Qizheng Huang; Changchun Bao; Xianyun Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Improving mask learning based speech enhancement system with restoration layers and residual connection&lt;br&gt;&lt;small&gt;Zhuo Chen; Yan Huang; Jinyu Li; Yifan Gong&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Exploring Low-Dimensional Structures of Modulation Spectra for Robust Speech Recognition&lt;br&gt;&lt;small&gt;Bi-Cheng Yan; Chin-Hong Shih; Shih-Hung Liu; Berlin Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - SEGAN: Speech Enhancement Generative Adversarial Network&lt;br&gt;&lt;small&gt;Santiago Pascual; Antonio Bonafonte; Joan Serr\u00e0&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Concatenative resynthesis using twin networks&lt;br&gt;&lt;small&gt;Soumi Maiti; Michael Mandel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Coding and Enhancement\"\n                data-category-ids=\"1060\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Noise Reduction<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">C6<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4277\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4277\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Spoken Term Detection\"\n                data-time=\"10:00-12:00\"\n                data-room=\"B4\"\n                data-room-id=\"1065\"\n                data-room-name=\"B4\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Sanjeev Khudanpur; Murat Saraclar&lt;br&gt;&lt;br&gt;10.00-10.20 - A Rescoring Approach for Keyword Search Using Lattice Context Information&lt;br&gt;&lt;small&gt;Zhipeng Chen; Ji Wu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - The Kaldi OpenKWS System: Improving Low Resource Keyword Search&lt;br&gt;&lt;small&gt;Jan Trmal; Matthew Wiesner; Vijayaditya Peddinti; Xiaohui Zhang; Pegah Ghahremani; Vimal Manohar; Yiming Wang; Hainan Xu; Dan Povey; Sanjeev Khudanpur&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - The STC Keyword Search System For OpenKWS 2016 Evaluation&lt;br&gt;&lt;small&gt;Yuri Khokhlov; Ivan Medennikov; Aleksei Romanenko; Valentin Mendelev; Maxim Korenevsky; Alexey Prudnikov; Natalia Tomashenko; Alexander Zatvornitskiy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Compressed time delay neural network for small-footprint keyword spotting&lt;br&gt;&lt;small&gt;Ming Sun; David Snyder; Yixin Gao; Varun Nagaraja; Mike Rodehorst; Sankaran Panchapagesan; Nikko Strom; Spyros Matsoukas; Shiv Vitaladevuni&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Symbol sequence search from telephone conversation&lt;br&gt;&lt;small&gt;Masayuki Suzuki; Gakuto Kurata; Abhinav Sethy; Bhuvana Ramabhadran; Kenneth Church; Mark Drake&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Similarity Learning Based Query Modeling for Keyword Search&lt;br&gt;&lt;small&gt;Batuhan Gundogdu; Murat Saraclar&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n                data-category-ids=\"1053\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Spoken Term Detection<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4279\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4279\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech Recognition: Multimodal Systems\"\n                data-time=\"10:00-12:00\"\n                data-room=\"D8\"\n                data-room-id=\"1062\"\n                data-room-name=\"D8\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Patrick Wambacq; Florian Metze&lt;br&gt;&lt;br&gt;10.00-10.20 - Combining Residual Networks with LSTMs for Lipreading&lt;br&gt;&lt;small&gt;Themos Stafylakis; Georgios Tzimiropoulos&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Improving computer lipreading via DNN sequence discriminative training techniques&lt;br&gt;&lt;small&gt;Kwanchiva Thangthai; Richard Harvey&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Improving Speaker-Independent Lipreading with Domain-Adversarial Training&lt;br&gt;&lt;small&gt;Michael Wand; J\u00fcrgen Schmidhuber&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Turbo Decoders for Audio-visual Continuous Speech Recognition&lt;br&gt;&lt;small&gt;Ahmed Hussen Abdelaziz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - DNN-based Ultrasound-to-Speech Conversion for a Silent Speech Interface&lt;br&gt;&lt;small&gt;Tam\u00e1s G\u00e1bor Csap\u00f3; Tam\u00e1s Gr\u00f3sz; G\u00e1bor Gosztolya; L\u00e1szl\u00f3 T\u00f3th; Alexandra Mark\u00f3&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Visually grounded learning of keyword prediction from untranscribed speech&lt;br&gt;&lt;small&gt;Herman Kamper; Shane Settle; Gregory Shakhnarovich; Karen Livescu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Technologies and Systems for New Applications\"\n                data-category-ids=\"1066\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech Recognition: Multimodal Systems<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">D8<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4280\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4280\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Noise Robust and Far-field ASR\"\n                data-time=\"10:00-12:00\"\n                data-room=\"Poster 1\"\n                data-room-id=\"1061\"\n                data-room-name=\"Poster 1\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Volker Leutnant\u00a0&lt;br&gt;&lt;br&gt;10.00-12.00 - Adaptive Multichannel Dereverberation for Automatic Speech Recognition&lt;br&gt;&lt;small&gt;Joe Caroselli; Izhak Shafran; Arun Narayanan; Richard Rose&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Attention-based LSTM with Multi-task Learning for Distant Speech Recognition&lt;br&gt;&lt;small&gt;Yu Zhang; Pengyuan Zhang; Yonghong Yan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - End-to-End Speech Recognition with Auditory Attention for Multi-Microphone Distance Speech Recognition&lt;br&gt;&lt;small&gt;Suyoun Kim; Ian Lane&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Factored deep convolutional neural networks for noise robust speech recognition&lt;br&gt;&lt;small&gt;Masakiyo Fujimoto&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Global SNR Estimation of Speech Signals for Unknown Noise Conditions using Noise Adapted Non-linear Regression&lt;br&gt;&lt;small&gt;Pavlos Papadopoulos; Ruchir Travadi; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Improved Automatic Speech Recognition using Subband Temporal Envelope Features and Time-delay Neural Network Denoising Autoencoder&lt;br&gt;&lt;small&gt;Cong-Thanh Do; Yannis Stylianou&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Joint Training of Multi-channel-condition Dereverberation and Acoustic Modeling of Microphone Array Speech for Robust Distant Speech Recognition&lt;br&gt;&lt;small&gt;Fengpei Ge; Kehuang Li; Bo Wu; Sabato Marco Siniscalchi; Yonghong Yan; Chin-Hui Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Robust Speech Recognition Based on Binaural Auditory Processing&lt;br&gt;&lt;small&gt;Anjali Menon; Chanwoo Kim; Richard Stern&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - To Improve the Robustness of LSTM-RNN Acoustic Models Using Higher-order Feedback From Multiple Histories&lt;br&gt;&lt;small&gt;Hengguan Huang; Brian Mak&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Uncertainty decoding with adaptive sampling for noise robust DNN-based acoustic modeling&lt;br&gt;&lt;small&gt;Tien Dung Tran; Marc Delcroix; Atsunori Ogawa; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n                data-category-ids=\"1058\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Noise Robust and Far-field ASR<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 1<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4287\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4287\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Interspeech 2017 Computational Paralinguistics ChallengE (ComParE) 1\"\n                data-time=\"10:00-12:00\"\n                data-room=\"E10\"\n                data-room-id=\"1060\"\n                data-room-name=\"E10\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Bj\u00f6rn Schuller; Anton Batliner&lt;br&gt;&lt;br&gt;10.00-10.15 - The INTERSPEECH 2017 Computational Paralinguistics Challenge: Addressee, Cold &amp; Snoring&lt;br&gt;&lt;small&gt;Bj\u00f6rn Schuller; Stefan Steidl; Anton Batliner; Elika Bergelson; Jarek Krajewski; Christoph Janott; Andrei Amatuni; Marisa  Casillas; Amanda Seidl; Melanie Soderstrom; Anne Warlaumont; Guillermo Hidalgo; Sebastian Schnieder; Clemens Heiser; Winfried Hohenhorst; Michael Herzog; Maximilian Schmitt; Kun Qian; Yue Zhang; George Trigeorgis; Panagiotis Tzirakis; Stefanos Zafeiriou&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.15-10.25 - Description of the UPPER RESPIRATORY TRACT INFECTION CORPUS (URTIC)&lt;br&gt;&lt;small&gt;Jarek Krajewski; Sebastian Schieder; Anton Batliner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.25-10.35 - Description of the Munich-Passau Snore Sound Corpus (MPSSC)&lt;br&gt;&lt;small&gt;Christoph Janott; Anton Batliner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.35-10.45 - Description of the HOMEBANK CHILD\/ADULT ADDRESSEE CORPUS (HB-CHAAC)&lt;br&gt;&lt;small&gt;Elika Bergelson; Andrei Amatuni; Marisa  Casillas; Amanda Seidl; Melanie Soderstorm; Anne Warlaumont  &lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.45-11.00 - It sounds like you have a cold! Testing voice features for the Interspeech 2017 Computational Paralinguistics Cold Challenge&lt;br&gt;&lt;small&gt;Mark Huckvale; Andr\u00e1s Beke&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.15 - End-to-End Deep Learning Framework for Speech Paralinguistics Detection Based on Perception Aware Spectrum&lt;br&gt;&lt;small&gt;Danwei Cai; Zhidong Ni; Wenbo Liu; Weicheng Cai; Gang Li; Ming Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.15-11.30 - Infected Phonemes: How a Cold Impairs Speech on a Phonetic Level&lt;br&gt;&lt;small&gt;Johannes Wagner; Thiago Fraga-Silva; Yvan Josse; Dominik Schiller; Andreas Seiderer; Elisabeth Andr\u00e9&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.30-11.45 - Phoneme state posteriorgram features for speech based automatic classification of speakers in cold and healthy conditions&lt;br&gt;&lt;small&gt;Akshay Kalkunte Suresh; Srinivasa Raghavan K M; Prasanta Ghosh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.45-12.00 - An Integrated Solution for Snoring Sound Classification Using Bhattacharyya Distance based GMM Supervectors with SVM, Feature Selection with Random Forest and Spectrogram with CNN&lt;br&gt;&lt;small&gt;Tin Lay Nwe; Tran Huy Dat; Ng Wen Zheng Terence; Bin Ma&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Paralinguistics in Speech and Language\"\n                data-category-ids=\"1052\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Interspeech 2017 Computational Paralinguistics ChallengE (ComParE) 1<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E10<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4282\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4282\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech Synthesis: Data, Evaluation, and Novel Paradigms\"\n                data-time=\"10:00-12:00\"\n                data-room=\"Poster 4\"\n                data-room-id=\"1068\"\n                data-room-name=\"Poster 4\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;S\u00e9bastien Le Maguer&lt;br&gt;&lt;br&gt;10.00-12.00 - A Neural Parametric Singing Synthesizer&lt;br&gt;&lt;small&gt;Merlijn Blaauw; Jordi Bonada&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - An Expanded Taxonomy of Semiotic Classes for Text Normalization&lt;br&gt;&lt;small&gt;Daan van Esch; Richard Sproat&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Bias and Statistical Significance in Evaluating Speech Synthesis with Mean Opinion Scores&lt;br&gt;&lt;small&gt;Andrew Rosenberg; Bhuvana Ramabhadran&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Complex-valued restricted Boltzmann machine for direct learning of frequency spectra&lt;br&gt;&lt;small&gt;Toru Nakashika; Shinji Takaki; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Evaluation of a Silent Speech Interface based on Magnetic Sensing and Deep Learning for a Phonetically Rich Vocabulary&lt;br&gt;&lt;small&gt;Jose A. Gonzalez; Lam A. Cheah; Phil D. Green; James M. Gilbert; Stephen R. Ell; Roger Moore; Ed Holdsworth&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Phase Modeling using Integrated Linear Prediction Residual for Statistical Parametric Speech Synthesis.&lt;br&gt;&lt;small&gt;Nagaraj Adiga; S R Mahadeva Prasanna&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Predicting Head Pose from Speech with a Conditional Variational Autoencoder&lt;br&gt;&lt;small&gt;David Greenwood; Stephen Laycock; Iain Matthews&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Principles for learning controllable TTS from annotated and latent variation&lt;br&gt;&lt;small&gt;Gustav Eje Henter; Jaime Lorenzo-Trueba; Xin Wang; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Real-time reactive speech synthesis: incorporating interruptions&lt;br&gt;&lt;small&gt;Mirjam Wester; David Braude; Blaise Potard; Matthew Aylett; Francesca Shaw&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Sampling-based speech parameter generation using moment-matching networks&lt;br&gt;&lt;small&gt;Shinnosuke Takamichi; Tomoki Koriyama; Hiroshi Saruwatari&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Siri On-Device Deep Learning-Guided Unit Selection Text-to-Speech System&lt;br&gt;&lt;small&gt;Tim Capes; Paul Coles; Alistair Conkie; Ladan Golipour; Abie Hadjitarkhani; Qiong Hu; Nancy Huddleston; Melvyn Hunt; Jiangchuan Li; Matthias Neeracher; Kishore Prahallad; Tuomo Raitio; Ramya Rasipuram; Greg Townsend; Becci Williamson; David Winarsky; Zhizheng Wu; Hepeng Zhang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Tacotron: Towards End-To-End Speech Synthesis&lt;br&gt;&lt;small&gt;Yuxuan Wang; RJ Skerry-Ryan; Daisy Stanton; Yonghui Wu; Ron Weiss; Navdeep Jaitly; Zongheng Yang; Ying Xiao; Zhifeng Chen; Samy Bengio; Quoc Le; Yannis Agiomyrgiannakis; Rob Clark; Rif A. Saurous&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Unit selection with Hierarchical Cascaded Long Short Term Memory Bidirectional Recurrent Neural Nets&lt;br&gt;&lt;small&gt;Vincent Pollet; Enrico Zovato; Sufian Irhimeh; Pier Batzu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Utterance Selection for Optimizing Intelligibility of TTS Voices Trained on ASR Data&lt;br&gt;&lt;small&gt;Erica Cooper; Xinyue Wang; Alison Chang; Yocheved Levitan; Julia Hirschberg&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Synthesis and Spoken Language Generation\"\n                data-category-ids=\"1059\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech Synthesis: Data, Evaluation, and Novel Paradigms<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4275\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4275\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Discriminative Training for ASR\"\n                data-time=\"10:00-12:00\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Hagen Soltau; William  Hartmann&lt;br&gt;&lt;br&gt;10.00-10.20 - Multitask Learning with Low-Level Auxiliary Tasks for Encoder-Decoder Based Speech Recognition&lt;br&gt;&lt;small&gt;Shubham Toshniwal; Hao Tang; Liang Lu; Karen Livescu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Optimizing expected word error rate via sampling for speech recognition&lt;br&gt;&lt;small&gt;Matt Shannon&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Annealed F-smoothing as a Mechanism to Speed up Neural Network Training&lt;br&gt;&lt;small&gt;Tara Sainath; Vijay Peddinti; Olivier Siohan; Arun Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Non-Uniform MCE Training of Deep Long Short-Term Memory Recurrent Neural Networks for Keyword Spotting&lt;br&gt;&lt;small&gt;Zhong Meng; Biing-Hwang (Fred) Juang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Exploiting Eigenposteriors for Semi-supervised Training of DNN Acoustic Models with Sequence Discrimination&lt;br&gt;&lt;small&gt;Pranay Dighe; Afsaneh Asaei; Herve Bourlard&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Discriminative Autoencoders for Acoustic Modeling&lt;br&gt;&lt;small&gt;Ming-Han Yang; Hung-Shin Lee; Yu-Ding Lu; Kuan-Yu Chen; Yu Tsao; Berlin Chen; Hsin-Min Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n                data-category-ids=\"1058\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Discriminative Training for ASR<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4288\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4288\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: State of the Art in Physics-based Voice Simulation\"\n                data-time=\"10:00-12:00\"\n                data-room=\"F11\"\n                data-room-id=\"1059\"\n                data-room-name=\"F11\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Sten Ternstr\u00f6m; Oriol Guasch&lt;br&gt;&lt;br&gt;10.00-10.20 - Acoustic analysis of detailed three-dimensional shape of the human nasal cavity and paranasal sinuses&lt;br&gt;&lt;small&gt;Tatsuya Kitamura; Hironori Takemoto; Hisanori Makinae; Tetsutaro Yamaguchi; Kotaro Maki&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - A semi-polar grid strategy for the three-dimensional finite element simulation of vowel-vowel sequences&lt;br&gt;&lt;small&gt;Marc Arnela; Saeed Dabbaghchian; Oriol Guasch; Olov Engwall&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - A Fast Robust 1D Flow Model for a Self-Oscillating Coupled 2D FEM Vocal Fold Simulation&lt;br&gt;&lt;small&gt;Arvind Vasudevan; Victor Zappi; Peter Anderson; Sidney Fels&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Waveform patterns in pitch glides near a vocal tract resonance&lt;br&gt;&lt;small&gt;Tiina Murtola; Jarmo Malinen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - A unified numerical simulation of vowel production that comprises phonation and the emitted sound&lt;br&gt;&lt;small&gt;Niyazi Cem Degirmenci; Johan Jansson; Johan Hoffman; Marc Arnela; Patricia Sanchez-Martin; Oriol Guasch; Pr. Sten Ternstr\u00f6m&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Synthesis of VV Utterances from Muscle Activation to Sound with a 3D Model&lt;br&gt;&lt;small&gt;Saeed Dabbaghchian; Marc Arnela; Olov Engwall; Oriol Guasch&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Synthesis and Spoken Language Generation\"\n                data-category-ids=\"1059\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: State of the Art in Physics-based Voice Simulation<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">F11<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4281\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4281\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Styles, Varieties, Forensics and Tools\"\n                data-time=\"10:00-12:00\"\n                data-room=\"Poster 3\"\n                data-room-id=\"1069\"\n                data-room-name=\"Poster 3\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Kiyoko Yoneyama&lt;br&gt;&lt;br&gt;10.00-12.00 - Automatic Labelling of Prosodic Prominence, Phrasing and Disfluencies in French Speech by Simulating the Perception of Nai\u0308ve and Expert Listeners&lt;br&gt;&lt;small&gt;George Christodoulides; Mathieu Avanzi; Anne Catherine Simon&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Cross-linguistic Distinctions between Professional and Non-Professional Speaking Styles&lt;br&gt;&lt;small&gt;Plinio Barbosa; Sandra Madureira; Philippe Boula de Mare\u00fcil&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Developing an Embosi (Bantu C25) Speech Variant Dictionary to Model Vowel Elision and Morpheme Deletion&lt;br&gt;&lt;small&gt;Jamison Cooper-Leavitt; Lori Lamel; Annie Rialland; Martine Adda-Decker; Gilles Adda&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Don't Count on ASR to Transcribe for You: Breaking Bias with Two Crowds&lt;br&gt;&lt;small&gt;Michael Levit; Yan Huang; Shuangyu Chang; Yifan Gong&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Effect of Language, Speaking Style and Speaker on Long-term F0 Estimation&lt;br&gt;&lt;small&gt;Pablo Arantes; Anders Eriksson; Suska Gutzeit&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Effects of training data variety in generating glottal pulses from acoustic features with DNNs&lt;br&gt;&lt;small&gt;Manu Airaksinen; Paavo Alku&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Electrophysiological correlates of familiar voice recognition&lt;br&gt;&lt;small&gt;Julien Plante-Hebert; Victor Boucher; Boutheina Jemel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Glottal source estimation from coded telephone speech using a deep neural network&lt;br&gt;&lt;small&gt;Narendra N P; Manu Airaksinen; Paavo Alku&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Mapping across feature spaces in forensic voice comparison: the contribution of auditory-based voice quality to (semi-)automatic system testing&lt;br&gt;&lt;small&gt;Vincent Hughes; Philip Harrison; Paul Foulkes; Peter French; Colleen Kavanagh; Eugenia San Segundo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Perception and production of word-final \/\u0281\/ in broadcast and spontaneous French&lt;br&gt;&lt;small&gt;Cedric Gendrot&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Polyglot and Speech Corpus Tools: a system for representing, integrating, and querying speech corpora&lt;br&gt;&lt;small&gt;Michael McAuliffe; Elias Stengel-Eskin; Michaela Socolof; Morgan Sonderegger&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Rd as a control parameter to explore affective correlates of the tense-lax continuum&lt;br&gt;&lt;small&gt;Andy Murphy; Irena Yanushevskaya; Ailbhe N\u00ed Chasaide; Christer Gobl&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Stability of prosodic characteristics across age and gender groups&lt;br&gt;&lt;small&gt;Jan Vol\u00edn; Tereza Tykalova; Tom\u00e1\u0161 Bo\u0159il&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - The effects of real and placebo alcohol on deaffrication&lt;br&gt;&lt;small&gt;Urban Zihlmann&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Towards Intelligent Crowdsourcing for Audio Data Annotation: Integrating Active Learning in the Real World&lt;br&gt;&lt;small&gt;Simone Hantke; Zixing Zhang; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Phonetics, Phonology, and Prosody\"\n                data-category-ids=\"1056\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Styles, Varieties, Forensics and Tools<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Poster 3<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4284\">\n\n              <div class='time-header' style='visibility: hidden;'>10:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4284\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Show & Tell 7\"\n                data-time=\"10:00-12:00\"\n                data-room=\"E306\"\n                data-room-id=\"1070\"\n                data-room-name=\"E306\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"10.00-12.00 - Remote articulation test system based on WebRTC&lt;br&gt;&lt;small&gt;Ikuyo Masuda-Katsuse&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Soundtracing for realtime speech adjustment to environmental conditions in 3D simulations&lt;br&gt;&lt;small&gt;Szymon Pa\u0142ka; Tomasz P\u0119dzim\u0105\u017c; Bartosz Ziolko&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - The ModelTalker Project: A web-based voice banking pipeline for ALS\/MND patients&lt;br&gt;&lt;small&gt;H Timothy Bunnell; Jason Lilley; Kathleen McGrath&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Visible Vowels: a Tool for the Visualization of Vowel Variation&lt;br&gt;&lt;small&gt;Wilbert Heeringa; Hans Van de Velde&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Vocal-tract Model with Static Articulators: Lips, Teeth, Tongue, and More&lt;br&gt;&lt;small&gt;Takayuki Arai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Show & Tell\"\n                data-category-ids=\"1063\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #EEA2A2;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Show & Tell 7<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E306<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"10:00\">10:00-12:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5254\">\n\n              <div class='time-header'>12:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"5254\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Lunch\"\n                data-time=\"12:00-13:30\"\n                data-room=\"Various locations\"\n                data-room-id=\"1079\"\n                data-room-name=\"Various locations\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Misc\"\n                data-category-ids=\"1068\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Lunch<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Various locations<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"12:00\">12:00-13:30 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4285\">\n\n              <div class='time-header'>13:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"4285\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Open Doors Event\"\n                data-time=\"13:00-15:30\"\n                data-room=\"Various locations\"\n                data-room-id=\"1079\"\n                data-room-name=\"Various locations\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\/abs\/4285.html\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;p&gt;&lt;strong&gt;Location:&lt;\/strong&gt;&amp;nbsp;Tobii Technology and Furhat Robotics&lt;\/p&gt;&lt;p&gt;ISCA-SAC is organising a company visit event on this year\u2019s INTERSPEECH. Students will be given the opportunity to visit the headquarters of Stockholm companies interested in speech communication. The companies will demonstrate their technologies and products and let students try out different equipment, following an open discussion and networking. This year the students will visit Furhat Robotics and Tobii Pro. The event will take place on Thursday, August 24 and aims to bring students and researchers together discussing potential collaboration or even possibly hiring opportunities.&lt;\/p&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Special event\"\n                data-category-ids=\"1064\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Open Doors Event<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Various locations<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:00\">13:00-15:30 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4286\">\n\n              <div class='time-header'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4286\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Special Session: Interspeech 2017 Computational Paralinguistics ChallengE (ComParE) 2\"\n                data-time=\"13:30-15:30\"\n                data-room=\"E10\"\n                data-room-id=\"1060\"\n                data-room-name=\"E10\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Bj\u00f6rn Schuller; Anton Batliner&lt;br&gt;&lt;br&gt;13.30-13.45 - A dual source-filter model of snore audio for snorer group classification&lt;br&gt;&lt;small&gt;Achuth Rao MV; Shivani Yadav; Prasanta Ghosh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.45-14.00 - An 'End-to-Evolution' Hybrid Approach for Snore Sound Classification&lt;br&gt;&lt;small&gt;Michael Freitag; Shahin Amiriparian; Nicholas Cummins; Maurice Gerczuk; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.00-14.15 - Snore Sound Classification Using Image-based Deep Spectrum Features&lt;br&gt;&lt;small&gt;Shahin Amiriparian; Maurice Gerczuk; Sandra Ottl; Nicholas Cummins; Michael Freitag; Sergey Pugachevskiy; Alice Baird; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.15-14.30 - Exploring Fusion Methods and Feature Space for the Classification of Paralinguistic Information&lt;br&gt;&lt;small&gt;David Tavarez; Xabier Sarasola; Agustin Alonso; Jon Sanchez; Luis Serrano; Eva Navas; Inma Hern\u00e1ez&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.45 - DNN-based Feature Extraction and Classifier Combination for Child-Directed Speech, Cold and Snoring Identification&lt;br&gt;&lt;small&gt;G\u00e1bor Gosztolya; R\u00f3bert Busa-Fekete; Tam\u00e1s Gr\u00f3sz; L\u00e1szl\u00f3 T\u00f3th&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.45-15.00 - Introducing Weighted Kernel Classifiers for Handling Imbalanced Paralinguistic Corpora: Snoring, Addressee and Cold&lt;br&gt;&lt;small&gt;Heysem Kaya; Alexey Karpov&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.00-15.15 - The INTERSPEECH 2017 Computational Paralinguistics Challenge: A Summary of Results&lt;br&gt;&lt;small&gt;Stefan Steidl  &lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.15-15.30 - Discussion&lt;br&gt;&lt;small&gt;Bj\u00f6rn Schuller; Anton Batliner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Paralinguistics in Speech and Language\"\n                data-category-ids=\"1052\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Special Session: Interspeech 2017 Computational Paralinguistics ChallengE (ComParE) 2<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E10<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4283\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4283\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Show & Tell 7\"\n                data-time=\"13:30-15:30\"\n                data-room=\"E306\"\n                data-room-id=\"1070\"\n                data-room-name=\"E306\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"13.30-15.30 - Remote articulation test system based on WebRTC&lt;br&gt;&lt;small&gt;Ikuyo Masuda-Katsuse&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Soundtracing for realtime speech adjustment to environmental conditions in 3D simulations&lt;br&gt;&lt;small&gt;Szymon Pa\u0142ka; Tomasz P\u0119dzim\u0105\u017c; Bartosz Ziolko&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - The ModelTalker Project: A web-based voice banking pipeline for ALS\/MND patients&lt;br&gt;&lt;small&gt;H Timothy Bunnell; Jason Lilley; Kathleen McGrath&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Visible Vowels: a Tool for the Visualization of Vowel Variation&lt;br&gt;&lt;small&gt;Wilbert Heeringa; Hans Van de Velde&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Vocal-tract Model with Static Articulators: Lips, Teeth, Tongue, and More&lt;br&gt;&lt;small&gt;Takayuki Arai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Show & Tell\"\n                data-category-ids=\"1063\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: #EEA2A2;\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Show & Tell 7<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">E306<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4273\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4273\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Multimodal Resources and Annotation\"\n                data-time=\"13:30-15:30\"\n                data-room=\"B4\"\n                data-room-id=\"1065\"\n                data-room-name=\"B4\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Stephanie Strassel; Febe De Wet&lt;br&gt;&lt;br&gt;13.30-13.50 - CALYOU: A Comparable Spoken Algerian Corpus Harvested from YouTube&lt;br&gt;&lt;small&gt;Karima Abidi; Mohamed amine Menacer; Kamel Smaili&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - PRAV: A Phonetically Rich Audio Visual Corpus&lt;br&gt;&lt;small&gt;Abhishek Avinash Narwekar; Prasanta Ghosh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - NTCD-TIMIT: A New Database and Baseline for Noise-robust Audio-visual Speech Recognition&lt;br&gt;&lt;small&gt;Ahmed Hussen Abdelaziz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - The Extended SPaRKy Restaurant Corpus: designing a corpus with variable information density&lt;br&gt;&lt;small&gt;David M. Howcroft; Dietrich Klakow; Vera Demberg&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Automatic Construction of the Finnish Parliament Speech Corpus&lt;br&gt;&lt;small&gt;Andr\u00e9 Mansikkaniemi; Peter Smit; Mikko Kurimo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Building audio-visual phonetically annotated Arabic corpus for expressive text to speech&lt;br&gt;&lt;small&gt;Omnia Abdo; Sherif Abdou; Mervat Fashal&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n                data-category-ids=\"1053\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Multimodal Resources and Annotation<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">B4<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4272\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4272\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Robust Speaker Recognition\"\n                data-time=\"13:30-15:30\"\n                data-room=\"A2\"\n                data-room-id=\"1064\"\n                data-room-name=\"A2\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;John Hansen; Tomi Kinnunen&lt;br&gt;&lt;br&gt;13.30-13.50 - CNN-based joint mapping of short and long utterance i-vectors for speaker verification using short utterances&lt;br&gt;&lt;small&gt;Jinxi Guo; Usha Nookala; Abeer Alwan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Curriculum Learning based Probabilistic Linear Discriminant Analysis for Noise Robust Speaker Recognition&lt;br&gt;&lt;small&gt;Shivesh Ranjan; Abhinav Misra; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - I-vector Transformation Using a Novel Discriminative Denoising Autoencoder for Noise-robust Speaker Recognition&lt;br&gt;&lt;small&gt;Shivangi Mahto; Hitoshi Yamamoto; Takafumi Koshinaka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Unsupervised Discriminative Training of PLDA for Domain Adaptation in Speaker Verification&lt;br&gt;&lt;small&gt;Qiongqiong Wang; Takafumi Koshinaka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Speaker Verification Under Adverse Conditions Using I-vector Adaptation and Neural Networks&lt;br&gt;&lt;small&gt;Md Jahangir Alam; Patrick Kenny; Gautam Bhattacharya; Marcel Kockmann&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Improving Robustness of Speaker Recognition to New Conditions Using Unlabeled Data&lt;br&gt;&lt;small&gt;Diego Castan; Mitchell McLaren; Luciana Ferrer; Aaron Lawson; Alicia Lozano-Diez&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speaker and Language Identification\"\n                data-category-ids=\"1054\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Robust Speaker Recognition<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">A2<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4271\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4271\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Speech and Audio Segmentation and Classification 1\"\n                data-time=\"13:30-15:30\"\n                data-room=\"F11\"\n                data-room-id=\"1059\"\n                data-room-name=\"F11\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mahadeva Prasanna; Tomoki Toda&lt;br&gt;&lt;br&gt;13.30-13.50 - Occupancy Detection in Commercial and Residential Environments Using Audio Signal&lt;br&gt;&lt;small&gt;Shabnam Ghaffarzadegan; Attila Reiss; Mirko Ruhs; Robert Duerichen; Zhe Feng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Data Augmentation, Missing Feature Mask and Kernel Classification for Through-The-Wall Acoustic Surveillance&lt;br&gt;&lt;small&gt;Tran-Huy Dat; Wen Zheng Terence Ng; Yi Ren Leng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Endpoint detection using grid long short-term memory network for streaming speech recognition&lt;br&gt;&lt;small&gt;Shuo-Yiin Chang; Bo Li; Tara Sainath; Gabor Simko; Carolina Parada&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Deep Learning Techniques in Tandem with Signal Processing Cues for Phonetic Segmentation for Text to Speech Synthesis in Indian Languages&lt;br&gt;&lt;small&gt;Arun Baby; Jeena Prakash; Rupak Vignesh; Hema Murthy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Gate Activation Signal Analysis for Gated Recurrent Neural Networks and Its Correlation with Phoneme Boundaries&lt;br&gt;&lt;small&gt;Yu-Hsuan Wang; Cheng-Tao Chung; Hung-yi Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Speaker Change Detection in Broadcast TV using Bidirectional Long Short-Term Memory Networks&lt;br&gt;&lt;small&gt;Ruiqing Yin; Herv\u00e9 Bredin; Claude Barras&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Analysis of Speech and Audio Signals\"\n                data-category-ids=\"1062\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Speech and Audio Segmentation and Classification 1<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">F11<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4274\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4274\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Forensic Phonetics and Sociophonetic Varieties\"\n                data-time=\"13:30-15:30\"\n                data-room=\"D8\"\n                data-room-id=\"1062\"\n                data-room-name=\"D8\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Agustin Gravano; Melanie Weirich&lt;br&gt;&lt;br&gt;13.30-13.50 - What is the relevant population? Considerations for the computation of likelihood ratios in forensic voice comparison&lt;br&gt;&lt;small&gt;Vincent Hughes; Paul Foulkes&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Voice disguise vs. Impersonation: Acoustic and perceptual measurements of vocal flexibility in non experts&lt;br&gt;&lt;small&gt;Veronique Delvaux; Lise Caucheteux; Kathy Huet; Myriam Piccaluga; Bernard Harmegnies&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Schwa Realization in French: Using Automatic Speech Processing to Study Phonological and Socio-linguistic Factors in Large Corpora&lt;br&gt;&lt;small&gt;Yaru WU; Martine Adda-Decker; Cecile Fougeron; Lori Lamel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - The Social Life of Tswana Ejectives&lt;br&gt;&lt;small&gt;Daniel Duran; Jagoda Bruni; Grzegorz Dogil; Justus Roux&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - How long is too long? How pause features after requests affect the perceived willingness of affirmative answers&lt;br&gt;&lt;small&gt;Lea S. Kohtz; Oliver Niebuhr&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Shadowing Synthesized Speech \u2013 Segmental Analysis of Phonetic Convergence&lt;br&gt;&lt;small&gt;Iona Gessinger; Eran Raveh; S\u00e9bastien Le Maguer; Bernd M\u00f6bius; Ingmar Steiner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Phonetics, Phonology, and Prosody\"\n                data-category-ids=\"1056\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Forensic Phonetics and Sociophonetic Varieties<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">D8<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"4270\">\n\n              <div class='time-header' style='visibility: hidden;'>13:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"4270\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Neural Network Acoustic Models for ASR 3\"\n                data-time=\"13:30-15:30\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Bhuvana Ramabhadran; Rohit Prabhavalkar&lt;br&gt;&lt;br&gt;13.30-13.50 - Deep Neural Factorization for Speech Recognition&lt;br&gt;&lt;small&gt;Jen-Tzung Chien; Chen Shen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Semi-supervised DNN training with word selection for ASR&lt;br&gt;&lt;small&gt;Karel Vesely; Lukas Burget; Jan \u010cernock\u00fd&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Gaussian Prediction based Attention for Online End-to-End Speech Recognition&lt;br&gt;&lt;small&gt;Junfeng Hou; ShiLiang Zhang; Lirong Dai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Efficient knowledge distillation from an ensemble of teachers&lt;br&gt;&lt;small&gt;Takashi Fukuda; Masayuki Suzuki; Gakuto Kurata; Samuel Thomas; Jia Cui; Bhuvana Ramabhadran&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - An Analysis of &quot;Attention&quot; in Sequence-to-Sequence Models&lt;br&gt;&lt;small&gt;Rohit Prabhavalkar; Tara Sainath; Bo Li; Kanishka Rao; Navdeep Jaitly&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Neural Speech Recognizer: Acoustic-to-Word LSTM Model for Large Vocabulary Speech Recognition&lt;br&gt;&lt;small&gt;Hagen Soltau; Hank Liao; Hasim Sak&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n                data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n                data-category-ids=\"1058\"\n                data-span-all=\"\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Neural Network Acoustic Models for ASR 3<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"13:30\">13:30-15:30 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5260\">\n\n              <div class='time-header'>15:30<\/div>\n\n              <div class=\"box\"\n                data-id=\"5260\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Refreshments\"\n                data-time=\"15:30-16:00\"\n                data-room=\"Various locations\"\n                data-room-id=\"1079\"\n                data-room-name=\"Various locations\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\"\n                data-category=\"Misc\"\n                data-category-ids=\"1068\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Refreshments<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Various locations<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"15:30\">15:30-16:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n          <div class=\"box-wrapper\"\n              data-id=\"5263\">\n\n              <div class='time-header'>16:00<\/div>\n\n              <div class=\"box\"\n                data-id=\"5263\"\n                data-project=\"project_242_2017_01_12\"\n                data-title=\"Closing session\"\n                data-time=\"16:00-17:00\"\n                data-room=\"Aula Magna\"\n                data-room-id=\"1063\"\n                data-room-name=\"Aula Magna\"\n                data-day=\"4\"\n                data-abs-nbr=\"\"\n                data-abs-path=\"\"\n                data-speaker=\"\"\n                data-speakercell=\"\"\n                data-info=\"\n                 &lt;p&gt;The session will also be broadcasted (with two-way communication) to rooms A2 and C6.&lt;br&gt;&lt;\/p&gt;\n                &lt;br&gt;&lt;br&gt;\"\n                data-category=\"Misc\"\n                data-category-ids=\"1068\"\n                data-span-all=\"1\"\n              >\n\n              \n\n              <div class=\"vertical_bar\" style=\"background-color: rgba(255, 255, 255, 0);\"><\/div>\n              <div class=\"box_inner\">\n                                <span>Closing session<\/span>\n               <br>\n               \n               <div class=\"room_div \">\n                 <i class=\"list-room\">Aula Magna<\/i>\n               <\/div>\n\n               <div class=\"name_time_div\">\n                <i class=\"list-time\" data-time=\"16:00\">16:00-17:00 - Thursday 24 August<\/i>\n               <\/div>\n              <\/div>\n             <\/div>\n          <\/div>\n\n          \n      <\/div>\n\n      <div id=\"scroll-down-for-more\">\n          <i class=\"fa fa-3x fa-chevron-down\" aria-hidden=\"true\"><\/i>\n          <br\/>\n          <span>Scroll down for more<\/span>\n      <\/div>\n\n    <\/div>\n\n    <div id=\"graphical_day_chooser_div\" class=\"row\" style=\"margin-right: 0px; margin-left: 0px; \">\n\n      \n      <div class=\"col-md-10\" style=\"padding-left: 0px;\">\n       <div class=\"btn-group\" id=\"days-group\">\n\n          <button type=\"button\" class=\"btn btn-default btn_day\" data-day=\"0\" value=\"date_select_0\"\n                  style=\"float: none !important;\"><span class=\"hidden-xs\">Sun<\/span><br class=\"hidden-xs\"><span>Aug 20<\/span><\/button><button type=\"button\" class=\"btn btn-default btn_day\" data-day=\"1\" value=\"date_select_1\"\n                  style=\"float: none !important;\"><span class=\"hidden-xs\">Mon<\/span><br class=\"hidden-xs\"><span>Aug 21<\/span><\/button><button type=\"button\" class=\"btn btn-default btn_day\" data-day=\"2\" value=\"date_select_2\"\n                  style=\"float: none !important;\"><span class=\"hidden-xs\">Tue<\/span><br class=\"hidden-xs\"><span>Aug 22<\/span><\/button><button type=\"button\" class=\"btn btn-default btn_day\" data-day=\"3\" value=\"date_select_3\"\n                  style=\"float: none !important;\"><span class=\"hidden-xs\">Wed<\/span><br class=\"hidden-xs\"><span>Aug 23<\/span><\/button><button type=\"button\" class=\"btn btn-default btn_day\" data-day=\"4\" value=\"date_select_4\"\n                  style=\"float: none !important;\"><span class=\"hidden-xs\">Thu<\/span><br class=\"hidden-xs\"><span>Aug 24<\/span><\/button>\n        <\/div>\n      <\/div>\n    <\/div>\n\n    <div id='scroll-right-container' style='display: none; cursor: pointer; margin-top: 73px; height: 70px; width: 75px; background-color: rgba(130, 130, 130, 0.56); position: relative; float: right; z-index: 10000000; text-align: center;'>\n        <i class=\"fa fa-3x fa-chevron-right\" aria-hidden=\"true\" style=\"color: white; position: absolute; left: 35%; top: 20%;\"><\/i>\n    <\/div>\n\n    <div id='scroll-left-container' style='display: none; cursor: pointer; margin-top: 73px; height: 70px; width: 75px; background-color: rgba(130, 130, 130, 0.56); position: relative; float: left; margin-left: 60px; z-index: 10000000; text-align: center;'>\n        <i class=\"fa fa-3x fa-chevron-left\" aria-hidden=\"true\" style=\"color: white; position: absolute; left: 30%; top: 20%;\"><\/i>\n    <\/div>\n\n    <div id=\"calendar_container\" class=\"dragscroll\" style=\"cursor: move; height: 1000px; background-color: #30332e;px; \">\n\n\n      \n  \t\t<!-- top bar with date and rooms name -->\n  \t\t<div class=\"xaxis xaxis-extra fixed top\">\n  \t      <div id=\"top_left_room_box\" class=\"preroom0 preroom0-extra\">\n            <div class=\"room0\">\n                <select style=\"visibility: hidden;\"><\/select>\n    \t      <\/div>\n          <\/div>\n\n  \t      <div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1064 id='preroom_id_1064'><div class='room room-extra' style='width: 140px;' data-id=0>A2<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1063 id='preroom_id_1063'><div class='room room-extra' style='width: 140px;' data-id=1>Aula Magna<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1072 id='preroom_id_1072'><div class='room room-extra' style='width: 140px;' data-id=2>B3<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1065 id='preroom_id_1065'><div class='room room-extra' style='width: 140px;' data-id=3>B4<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1075 id='preroom_id_1075'><div class='room room-extra' style='width: 140px;' data-id=4>B5<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1066 id='preroom_id_1066'><div class='room room-extra' style='width: 140px;' data-id=5>C6<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1076 id='preroom_id_1076'><div class='room room-extra' style='width: 140px;' data-id=6>C307<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1115 id='preroom_id_1115'><div class='room room-extra' style='width: 140px;' data-id=7>C389<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1077 id='preroom_id_1077'><div class='room room-extra' style='width: 140px;' data-id=8>C397<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1062 id='preroom_id_1062'><div class='room room-extra' style='width: 140px;' data-id=9>D8<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1060 id='preroom_id_1060'><div class='room room-extra' style='width: 140px;' data-id=10>E10<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1070 id='preroom_id_1070'><div class='room room-extra' style='width: 140px;' data-id=11>E306<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1071 id='preroom_id_1071'><div class='room room-extra' style='width: 140px;' data-id=12>E397<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1074 id='preroom_id_1074'><div class='room room-extra' style='width: 140px;' data-id=13>F0 (KTH)<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1059 id='preroom_id_1059'><div class='room room-extra' style='width: 140px;' data-id=14>F11<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1073 id='preroom_id_1073'><div class='room room-extra' style='width: 140px;' data-id=15>Fantum (KTH)<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1061 id='preroom_id_1061'><div class='room room-extra' style='width: 140px;' data-id=16>Poster 1<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1067 id='preroom_id_1067'><div class='room room-extra' style='width: 140px;' data-id=17>Poster 2<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1069 id='preroom_id_1069'><div class='room room-extra' style='width: 140px;' data-id=18>Poster 3<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1068 id='preroom_id_1068'><div class='room room-extra' style='width: 140px;' data-id=19>Poster 4<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1078 id='preroom_id_1078'><div class='room room-extra' style='width: 140px;' data-id=20>K\u00e4gelbanan, S\u00f6dra teatern<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1081 id='preroom_id_1081'><div class='room room-extra' style='width: 140px;' data-id=21>Tekniska Museet and Etnografiska museet<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1079 id='preroom_id_1079'><div class='room room-extra' style='width: 140px;' data-id=22>Various locations<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1112 id='preroom_id_1112'><div class='room room-extra' style='width: 140px;' data-id=23>S\u00f6dra Huset, House A<\/div>\n<\/div><div class='room_holder room_holder-extra' style='width: 150px;' data-room-id=1116 id='preroom_id_1116'><div class='room room-extra' style='width: 140px;' data-id=24>Stockholm City Hall and Teaterbaren<\/div>\n<\/div>\n      <\/div>\n            \n      <div class='vertical-schedule-line' data-room-id='1064' style='left: 210px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1063' style='left: 360px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1072' style='left: 510px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1065' style='left: 660px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1075' style='left: 810px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1066' style='left: 960px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1076' style='left: 1110px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1115' style='left: 1260px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1077' style='left: 1410px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1062' style='left: 1560px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1060' style='left: 1710px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1070' style='left: 1860px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1071' style='left: 2010px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1074' style='left: 2160px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1059' style='left: 2310px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1073' style='left: 2460px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1061' style='left: 2610px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1067' style='left: 2760px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1069' style='left: 2910px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1068' style='left: 3060px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1078' style='left: 3210px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1081' style='left: 3360px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1079' style='left: 3510px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1112' style='left: 3660px; height: 100%;'><\/div><div class='vertical-schedule-line' data-room-id='1116' style='left: 3810px; height: 100%;'><\/div>\n      <div id='time_0' name='visible_0' data-day='0' class='yaxis major time_all time_0  data-time='0800' style='top: 61px; display: block;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>08:00\n<\/div><\/div><\/div><div id = 'time_0' name='visible_1' data-day='0' class='yaxis time_all time_0' data-time='0815' style='top: 81px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_2' data-day='0' class='yaxis time_all time_0' data-time='0830' style='top: 101px; display: block;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>08:30\n<\/div><\/div><\/div><div id = 'time_0' name='visible_3' data-day='0' class='yaxis time_all time_0' data-time='0845' style='top: 121px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_4' data-day='0' class='yaxis major time_all time_0  data-time='0900' style='top: 141px; display: block;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>09:00\n<\/div><\/div><\/div><div id = 'time_0' name='visible_5' data-day='0' class='yaxis time_all time_0' data-time='0915' style='top: 161px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_6' data-day='0' class='yaxis time_all time_0' data-time='0930' style='top: 181px; display: block;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>09:30\n<\/div><\/div><\/div><div id = 'time_0' name='visible_7' data-day='0' class='yaxis time_all time_0' data-time='0945' style='top: 201px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_8' data-day='0' class='yaxis major time_all time_0  data-time='1000' style='top: 221px; display: block;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>10:00\n<\/div><\/div><\/div><div id = 'time_0' name='visible_9' data-day='0' class='yaxis time_all time_0' data-time='1015' style='top: 241px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_10' data-day='0' class='yaxis time_all time_0' data-time='1030' style='top: 261px; display: block;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>10:30\n<\/div><\/div><\/div><div id = 'time_0' name='visible_11' data-day='0' class='yaxis time_all time_0' data-time='1045' style='top: 281px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_12' data-day='0' class='yaxis major time_all time_0  data-time='1100' style='top: 301px; display: block;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>11:00\n<\/div><\/div><\/div><div id = 'time_0' name='visible_13' data-day='0' class='yaxis time_all time_0' data-time='1115' style='top: 321px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_14' data-day='0' class='yaxis time_all time_0' data-time='1130' style='top: 341px; display: block;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>11:30\n<\/div><\/div><\/div><div id = 'time_0' name='visible_15' data-day='0' class='yaxis time_all time_0' data-time='1145' style='top: 361px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_16' data-day='0' class='yaxis major time_all time_0  data-time='1200' style='top: 381px; display: block;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>12:00\n<\/div><\/div><\/div><div id = 'time_0' name='visible_17' data-day='0' class='yaxis time_all time_0' data-time='1215' style='top: 401px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_18' data-day='0' class='yaxis time_all time_0' data-time='1230' style='top: 421px; display: block;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>12:30\n<\/div><\/div><\/div><div id = 'time_0' name='visible_19' data-day='0' class='yaxis time_all time_0' data-time='1245' style='top: 441px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_20' data-day='0' class='yaxis major time_all time_0  data-time='1300' style='top: 461px; display: block;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>13:00\n<\/div><\/div><\/div><div id = 'time_0' name='visible_21' data-day='0' class='yaxis time_all time_0' data-time='1315' style='top: 481px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_22' data-day='0' class='yaxis time_all time_0' data-time='1330' style='top: 501px; display: block;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>13:30\n<\/div><\/div><\/div><div id = 'time_0' name='visible_23' data-day='0' class='yaxis time_all time_0' data-time='1345' style='top: 521px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_24' data-day='0' class='yaxis major time_all time_0  data-time='1400' style='top: 541px; display: block;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>14:00\n<\/div><\/div><\/div><div id = 'time_0' name='visible_25' data-day='0' class='yaxis time_all time_0' data-time='1415' style='top: 561px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_26' data-day='0' class='yaxis time_all time_0' data-time='1430' style='top: 581px; display: block;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>14:30\n<\/div><\/div><\/div><div id = 'time_0' name='visible_27' data-day='0' class='yaxis time_all time_0' data-time='1445' style='top: 601px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_28' data-day='0' class='yaxis major time_all time_0  data-time='1500' style='top: 621px; display: block;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>15:00\n<\/div><\/div><\/div><div id = 'time_0' name='visible_29' data-day='0' class='yaxis time_all time_0' data-time='1515' style='top: 641px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_30' data-day='0' class='yaxis time_all time_0' data-time='1530' style='top: 661px; display: block;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>15:30\n<\/div><\/div><\/div><div id = 'time_0' name='visible_31' data-day='0' class='yaxis time_all time_0' data-time='1545' style='top: 681px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_32' data-day='0' class='yaxis major time_all time_0  data-time='1600' style='top: 701px; display: block;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>16:00\n<\/div><\/div><\/div><div id = 'time_0' name='visible_33' data-day='0' class='yaxis time_all time_0' data-time='1615' style='top: 721px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_0' name='visible_34' data-day='0' class='yaxis time_all time_0' data-time='1630' style='top: 741px; display: block;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>16:30\n<\/div><\/div><\/div><div id = 'time_0' name='visible_35' data-day='0' class='yaxis time_all time_0' data-time='1645' style='top: 761px; display: block;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_36' data-day='1' class='yaxis major time_all time_1  data-time='0800' style='top: 61px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>08:00\n<\/div><\/div><\/div><div id = 'time_1' name='visible_37' data-day='1' class='yaxis time_all time_1' data-time='0815' style='top: 81px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_38' data-day='1' class='yaxis time_all time_1' data-time='0830' style='top: 101px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>08:30\n<\/div><\/div><\/div><div id = 'time_1' name='visible_39' data-day='1' class='yaxis time_all time_1' data-time='0845' style='top: 121px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_40' data-day='1' class='yaxis major time_all time_1  data-time='0900' style='top: 141px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>09:00\n<\/div><\/div><\/div><div id = 'time_1' name='visible_41' data-day='1' class='yaxis time_all time_1' data-time='0915' style='top: 161px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_42' data-day='1' class='yaxis time_all time_1' data-time='0930' style='top: 181px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>09:30\n<\/div><\/div><\/div><div id = 'time_1' name='visible_43' data-day='1' class='yaxis time_all time_1' data-time='0945' style='top: 201px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_44' data-day='1' class='yaxis major time_all time_1  data-time='1000' style='top: 221px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>10:00\n<\/div><\/div><\/div><div id = 'time_1' name='visible_45' data-day='1' class='yaxis time_all time_1' data-time='1015' style='top: 241px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_46' data-day='1' class='yaxis time_all time_1' data-time='1030' style='top: 261px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>10:30\n<\/div><\/div><\/div><div id = 'time_1' name='visible_47' data-day='1' class='yaxis time_all time_1' data-time='1045' style='top: 281px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_48' data-day='1' class='yaxis major time_all time_1  data-time='1100' style='top: 301px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>11:00\n<\/div><\/div><\/div><div id = 'time_1' name='visible_49' data-day='1' class='yaxis time_all time_1' data-time='1115' style='top: 321px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_50' data-day='1' class='yaxis time_all time_1' data-time='1130' style='top: 341px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>11:30\n<\/div><\/div><\/div><div id = 'time_1' name='visible_51' data-day='1' class='yaxis time_all time_1' data-time='1145' style='top: 361px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_52' data-day='1' class='yaxis major time_all time_1  data-time='1200' style='top: 381px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>12:00\n<\/div><\/div><\/div><div id = 'time_1' name='visible_53' data-day='1' class='yaxis time_all time_1' data-time='1215' style='top: 401px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_54' data-day='1' class='yaxis time_all time_1' data-time='1230' style='top: 421px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>12:30\n<\/div><\/div><\/div><div id = 'time_1' name='visible_55' data-day='1' class='yaxis time_all time_1' data-time='1245' style='top: 441px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_56' data-day='1' class='yaxis major time_all time_1  data-time='1300' style='top: 461px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>13:00\n<\/div><\/div><\/div><div id = 'time_1' name='visible_57' data-day='1' class='yaxis time_all time_1' data-time='1315' style='top: 481px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_58' data-day='1' class='yaxis time_all time_1' data-time='1330' style='top: 501px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>13:30\n<\/div><\/div><\/div><div id = 'time_1' name='visible_59' data-day='1' class='yaxis time_all time_1' data-time='1345' style='top: 521px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_60' data-day='1' class='yaxis major time_all time_1  data-time='1400' style='top: 541px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>14:00\n<\/div><\/div><\/div><div id = 'time_1' name='visible_61' data-day='1' class='yaxis time_all time_1' data-time='1415' style='top: 561px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_62' data-day='1' class='yaxis time_all time_1' data-time='1430' style='top: 581px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>14:30\n<\/div><\/div><\/div><div id = 'time_1' name='visible_63' data-day='1' class='yaxis time_all time_1' data-time='1445' style='top: 601px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_64' data-day='1' class='yaxis major time_all time_1  data-time='1500' style='top: 621px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>15:00\n<\/div><\/div><\/div><div id = 'time_1' name='visible_65' data-day='1' class='yaxis time_all time_1' data-time='1515' style='top: 641px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_66' data-day='1' class='yaxis time_all time_1' data-time='1530' style='top: 661px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>15:30\n<\/div><\/div><\/div><div id = 'time_1' name='visible_67' data-day='1' class='yaxis time_all time_1' data-time='1545' style='top: 681px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_68' data-day='1' class='yaxis major time_all time_1  data-time='1600' style='top: 701px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>16:00\n<\/div><\/div><\/div><div id = 'time_1' name='visible_69' data-day='1' class='yaxis time_all time_1' data-time='1615' style='top: 721px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_70' data-day='1' class='yaxis time_all time_1' data-time='1630' style='top: 741px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>16:30\n<\/div><\/div><\/div><div id = 'time_1' name='visible_71' data-day='1' class='yaxis time_all time_1' data-time='1645' style='top: 761px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_72' data-day='1' class='yaxis major time_all time_1  data-time='1700' style='top: 781px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>17:00\n<\/div><\/div><\/div><div id = 'time_1' name='visible_73' data-day='1' class='yaxis time_all time_1' data-time='1715' style='top: 801px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_74' data-day='1' class='yaxis time_all time_1' data-time='1730' style='top: 821px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>17:30\n<\/div><\/div><\/div><div id = 'time_1' name='visible_75' data-day='1' class='yaxis time_all time_1' data-time='1745' style='top: 841px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_76' data-day='1' class='yaxis major time_all time_1  data-time='1800' style='top: 861px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>18:00\n<\/div><\/div><\/div><div id = 'time_1' name='visible_77' data-day='1' class='yaxis time_all time_1' data-time='1815' style='top: 881px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_78' data-day='1' class='yaxis time_all time_1' data-time='1830' style='top: 901px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>18:30\n<\/div><\/div><\/div><div id = 'time_1' name='visible_79' data-day='1' class='yaxis time_all time_1' data-time='1845' style='top: 921px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_80' data-day='1' class='yaxis major time_all time_1  data-time='1900' style='top: 941px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>19:00\n<\/div><\/div><\/div><div id = 'time_1' name='visible_81' data-day='1' class='yaxis time_all time_1' data-time='1915' style='top: 961px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_82' data-day='1' class='yaxis time_all time_1' data-time='1930' style='top: 981px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>19:30\n<\/div><\/div><\/div><div id = 'time_1' name='visible_83' data-day='1' class='yaxis time_all time_1' data-time='1945' style='top: 1001px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_1' name='visible_84' data-day='1' class='yaxis major time_all time_1  data-time='2000' style='top: 1021px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>20:00\n<\/div><\/div><\/div><div id = 'time_1' name='visible_85' data-day='1' class='yaxis time_all time_1' data-time='2015' style='top: 1041px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_86' data-day='2' class='yaxis major time_all time_2  data-time='0700' style='top: 61px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>07:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_87' data-day='2' class='yaxis time_all time_2' data-time='0715' style='top: 81px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_88' data-day='2' class='yaxis time_all time_2' data-time='0730' style='top: 101px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>07:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_89' data-day='2' class='yaxis time_all time_2' data-time='0745' style='top: 121px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_90' data-day='2' class='yaxis major time_all time_2  data-time='0800' style='top: 141px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>08:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_91' data-day='2' class='yaxis time_all time_2' data-time='0815' style='top: 161px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_92' data-day='2' class='yaxis time_all time_2' data-time='0830' style='top: 181px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>08:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_93' data-day='2' class='yaxis time_all time_2' data-time='0845' style='top: 201px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_94' data-day='2' class='yaxis major time_all time_2  data-time='0900' style='top: 221px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>09:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_95' data-day='2' class='yaxis time_all time_2' data-time='0915' style='top: 241px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_96' data-day='2' class='yaxis time_all time_2' data-time='0930' style='top: 261px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>09:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_97' data-day='2' class='yaxis time_all time_2' data-time='0945' style='top: 281px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_98' data-day='2' class='yaxis major time_all time_2  data-time='1000' style='top: 301px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>10:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_99' data-day='2' class='yaxis time_all time_2' data-time='1015' style='top: 321px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_100' data-day='2' class='yaxis time_all time_2' data-time='1030' style='top: 341px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>10:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_101' data-day='2' class='yaxis time_all time_2' data-time='1045' style='top: 361px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_102' data-day='2' class='yaxis major time_all time_2  data-time='1100' style='top: 381px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>11:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_103' data-day='2' class='yaxis time_all time_2' data-time='1115' style='top: 401px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_104' data-day='2' class='yaxis time_all time_2' data-time='1130' style='top: 421px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>11:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_105' data-day='2' class='yaxis time_all time_2' data-time='1145' style='top: 441px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_106' data-day='2' class='yaxis major time_all time_2  data-time='1200' style='top: 461px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>12:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_107' data-day='2' class='yaxis time_all time_2' data-time='1215' style='top: 481px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_108' data-day='2' class='yaxis time_all time_2' data-time='1230' style='top: 501px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>12:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_109' data-day='2' class='yaxis time_all time_2' data-time='1245' style='top: 521px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_110' data-day='2' class='yaxis major time_all time_2  data-time='1300' style='top: 541px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>13:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_111' data-day='2' class='yaxis time_all time_2' data-time='1315' style='top: 561px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_112' data-day='2' class='yaxis time_all time_2' data-time='1330' style='top: 581px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>13:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_113' data-day='2' class='yaxis time_all time_2' data-time='1345' style='top: 601px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_114' data-day='2' class='yaxis major time_all time_2  data-time='1400' style='top: 621px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>14:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_115' data-day='2' class='yaxis time_all time_2' data-time='1415' style='top: 641px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_116' data-day='2' class='yaxis time_all time_2' data-time='1430' style='top: 661px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>14:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_117' data-day='2' class='yaxis time_all time_2' data-time='1445' style='top: 681px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_118' data-day='2' class='yaxis major time_all time_2  data-time='1500' style='top: 701px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>15:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_119' data-day='2' class='yaxis time_all time_2' data-time='1515' style='top: 721px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_120' data-day='2' class='yaxis time_all time_2' data-time='1530' style='top: 741px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>15:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_121' data-day='2' class='yaxis time_all time_2' data-time='1545' style='top: 761px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_122' data-day='2' class='yaxis major time_all time_2  data-time='1600' style='top: 781px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>16:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_123' data-day='2' class='yaxis time_all time_2' data-time='1615' style='top: 801px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_124' data-day='2' class='yaxis time_all time_2' data-time='1630' style='top: 821px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>16:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_125' data-day='2' class='yaxis time_all time_2' data-time='1645' style='top: 841px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_126' data-day='2' class='yaxis major time_all time_2  data-time='1700' style='top: 861px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>17:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_127' data-day='2' class='yaxis time_all time_2' data-time='1715' style='top: 881px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_128' data-day='2' class='yaxis time_all time_2' data-time='1730' style='top: 901px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>17:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_129' data-day='2' class='yaxis time_all time_2' data-time='1745' style='top: 921px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_130' data-day='2' class='yaxis major time_all time_2  data-time='1800' style='top: 941px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>18:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_131' data-day='2' class='yaxis time_all time_2' data-time='1815' style='top: 961px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_132' data-day='2' class='yaxis time_all time_2' data-time='1830' style='top: 981px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>18:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_133' data-day='2' class='yaxis time_all time_2' data-time='1845' style='top: 1001px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_134' data-day='2' class='yaxis major time_all time_2  data-time='1900' style='top: 1021px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>19:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_135' data-day='2' class='yaxis time_all time_2' data-time='1915' style='top: 1041px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_136' data-day='2' class='yaxis time_all time_2' data-time='1930' style='top: 1061px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>19:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_137' data-day='2' class='yaxis time_all time_2' data-time='1945' style='top: 1081px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_138' data-day='2' class='yaxis major time_all time_2  data-time='2000' style='top: 1101px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>20:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_139' data-day='2' class='yaxis time_all time_2' data-time='2015' style='top: 1121px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_140' data-day='2' class='yaxis time_all time_2' data-time='2030' style='top: 1141px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>20:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_141' data-day='2' class='yaxis time_all time_2' data-time='2045' style='top: 1161px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_142' data-day='2' class='yaxis major time_all time_2  data-time='2100' style='top: 1181px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>21:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_143' data-day='2' class='yaxis time_all time_2' data-time='2115' style='top: 1201px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_144' data-day='2' class='yaxis time_all time_2' data-time='2130' style='top: 1221px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>21:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_145' data-day='2' class='yaxis time_all time_2' data-time='2145' style='top: 1241px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_146' data-day='2' class='yaxis major time_all time_2  data-time='2200' style='top: 1261px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>22:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_147' data-day='2' class='yaxis time_all time_2' data-time='2215' style='top: 1281px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_148' data-day='2' class='yaxis time_all time_2' data-time='2230' style='top: 1301px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>22:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_149' data-day='2' class='yaxis time_all time_2' data-time='2245' style='top: 1321px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_150' data-day='2' class='yaxis major time_all time_2  data-time='2300' style='top: 1341px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>23:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_151' data-day='2' class='yaxis time_all time_2' data-time='2315' style='top: 1361px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_152' data-day='2' class='yaxis time_all time_2' data-time='2330' style='top: 1381px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>23:30\n<\/div><\/div><\/div><div id = 'time_2' name='visible_153' data-day='2' class='yaxis time_all time_2' data-time='2345' style='top: 1401px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_2' name='visible_154' data-day='2' class='yaxis major time_all time_2  data-time='0000' style='top: 1421px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>00:00\n<\/div><\/div><\/div><div id = 'time_2' name='visible_155' data-day='2' class='yaxis time_all time_2' data-time='0015' style='top: 1441px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_156' data-day='3' class='yaxis major time_all time_3  data-time='0700' style='top: 61px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>07:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_157' data-day='3' class='yaxis time_all time_3' data-time='0715' style='top: 81px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_158' data-day='3' class='yaxis time_all time_3' data-time='0730' style='top: 101px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>07:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_159' data-day='3' class='yaxis time_all time_3' data-time='0745' style='top: 121px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_160' data-day='3' class='yaxis major time_all time_3  data-time='0800' style='top: 141px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>08:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_161' data-day='3' class='yaxis time_all time_3' data-time='0815' style='top: 161px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_162' data-day='3' class='yaxis time_all time_3' data-time='0830' style='top: 181px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>08:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_163' data-day='3' class='yaxis time_all time_3' data-time='0845' style='top: 201px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_164' data-day='3' class='yaxis major time_all time_3  data-time='0900' style='top: 221px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>09:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_165' data-day='3' class='yaxis time_all time_3' data-time='0915' style='top: 241px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_166' data-day='3' class='yaxis time_all time_3' data-time='0930' style='top: 261px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>09:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_167' data-day='3' class='yaxis time_all time_3' data-time='0945' style='top: 281px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_168' data-day='3' class='yaxis major time_all time_3  data-time='1000' style='top: 301px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>10:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_169' data-day='3' class='yaxis time_all time_3' data-time='1015' style='top: 321px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_170' data-day='3' class='yaxis time_all time_3' data-time='1030' style='top: 341px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>10:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_171' data-day='3' class='yaxis time_all time_3' data-time='1045' style='top: 361px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_172' data-day='3' class='yaxis major time_all time_3  data-time='1100' style='top: 381px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>11:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_173' data-day='3' class='yaxis time_all time_3' data-time='1115' style='top: 401px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_174' data-day='3' class='yaxis time_all time_3' data-time='1130' style='top: 421px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>11:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_175' data-day='3' class='yaxis time_all time_3' data-time='1145' style='top: 441px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_176' data-day='3' class='yaxis major time_all time_3  data-time='1200' style='top: 461px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>12:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_177' data-day='3' class='yaxis time_all time_3' data-time='1215' style='top: 481px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_178' data-day='3' class='yaxis time_all time_3' data-time='1230' style='top: 501px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>12:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_179' data-day='3' class='yaxis time_all time_3' data-time='1245' style='top: 521px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_180' data-day='3' class='yaxis major time_all time_3  data-time='1300' style='top: 541px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>13:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_181' data-day='3' class='yaxis time_all time_3' data-time='1315' style='top: 561px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_182' data-day='3' class='yaxis time_all time_3' data-time='1330' style='top: 581px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>13:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_183' data-day='3' class='yaxis time_all time_3' data-time='1345' style='top: 601px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_184' data-day='3' class='yaxis major time_all time_3  data-time='1400' style='top: 621px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>14:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_185' data-day='3' class='yaxis time_all time_3' data-time='1415' style='top: 641px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_186' data-day='3' class='yaxis time_all time_3' data-time='1430' style='top: 661px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>14:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_187' data-day='3' class='yaxis time_all time_3' data-time='1445' style='top: 681px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_188' data-day='3' class='yaxis major time_all time_3  data-time='1500' style='top: 701px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>15:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_189' data-day='3' class='yaxis time_all time_3' data-time='1515' style='top: 721px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_190' data-day='3' class='yaxis time_all time_3' data-time='1530' style='top: 741px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>15:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_191' data-day='3' class='yaxis time_all time_3' data-time='1545' style='top: 761px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_192' data-day='3' class='yaxis major time_all time_3  data-time='1600' style='top: 781px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>16:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_193' data-day='3' class='yaxis time_all time_3' data-time='1615' style='top: 801px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_194' data-day='3' class='yaxis time_all time_3' data-time='1630' style='top: 821px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>16:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_195' data-day='3' class='yaxis time_all time_3' data-time='1645' style='top: 841px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_196' data-day='3' class='yaxis major time_all time_3  data-time='1700' style='top: 861px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>17:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_197' data-day='3' class='yaxis time_all time_3' data-time='1715' style='top: 881px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_198' data-day='3' class='yaxis time_all time_3' data-time='1730' style='top: 901px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>17:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_199' data-day='3' class='yaxis time_all time_3' data-time='1745' style='top: 921px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_200' data-day='3' class='yaxis major time_all time_3  data-time='1800' style='top: 941px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>18:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_201' data-day='3' class='yaxis time_all time_3' data-time='1815' style='top: 961px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_202' data-day='3' class='yaxis time_all time_3' data-time='1830' style='top: 981px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>18:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_203' data-day='3' class='yaxis time_all time_3' data-time='1845' style='top: 1001px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_204' data-day='3' class='yaxis major time_all time_3  data-time='1900' style='top: 1021px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>19:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_205' data-day='3' class='yaxis time_all time_3' data-time='1915' style='top: 1041px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_206' data-day='3' class='yaxis time_all time_3' data-time='1930' style='top: 1061px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>19:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_207' data-day='3' class='yaxis time_all time_3' data-time='1945' style='top: 1081px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_208' data-day='3' class='yaxis major time_all time_3  data-time='2000' style='top: 1101px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>20:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_209' data-day='3' class='yaxis time_all time_3' data-time='2015' style='top: 1121px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_210' data-day='3' class='yaxis time_all time_3' data-time='2030' style='top: 1141px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>20:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_211' data-day='3' class='yaxis time_all time_3' data-time='2045' style='top: 1161px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_212' data-day='3' class='yaxis major time_all time_3  data-time='2100' style='top: 1181px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>21:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_213' data-day='3' class='yaxis time_all time_3' data-time='2115' style='top: 1201px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_214' data-day='3' class='yaxis time_all time_3' data-time='2130' style='top: 1221px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>21:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_215' data-day='3' class='yaxis time_all time_3' data-time='2145' style='top: 1241px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_216' data-day='3' class='yaxis major time_all time_3  data-time='2200' style='top: 1261px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>22:00\n<\/div><\/div><\/div><div id = 'time_3' name='visible_217' data-day='3' class='yaxis time_all time_3' data-time='2215' style='top: 1281px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_3' name='visible_218' data-day='3' class='yaxis time_all time_3' data-time='2230' style='top: 1301px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>22:30\n<\/div><\/div><\/div><div id = 'time_3' name='visible_219' data-day='3' class='yaxis time_all time_3' data-time='2245' style='top: 1321px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_220' data-day='4' class='yaxis major time_all time_4  data-time='0700' style='top: 61px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>07:00\n<\/div><\/div><\/div><div id = 'time_4' name='visible_221' data-day='4' class='yaxis time_all time_4' data-time='0715' style='top: 81px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_222' data-day='4' class='yaxis time_all time_4' data-time='0730' style='top: 101px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>07:30\n<\/div><\/div><\/div><div id = 'time_4' name='visible_223' data-day='4' class='yaxis time_all time_4' data-time='0745' style='top: 121px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_224' data-day='4' class='yaxis major time_all time_4  data-time='0800' style='top: 141px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>08:00\n<\/div><\/div><\/div><div id = 'time_4' name='visible_225' data-day='4' class='yaxis time_all time_4' data-time='0815' style='top: 161px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_226' data-day='4' class='yaxis time_all time_4' data-time='0830' style='top: 181px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>08:30\n<\/div><\/div><\/div><div id = 'time_4' name='visible_227' data-day='4' class='yaxis time_all time_4' data-time='0845' style='top: 201px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_228' data-day='4' class='yaxis major time_all time_4  data-time='0900' style='top: 221px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>09:00\n<\/div><\/div><\/div><div id = 'time_4' name='visible_229' data-day='4' class='yaxis time_all time_4' data-time='0915' style='top: 241px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_230' data-day='4' class='yaxis time_all time_4' data-time='0930' style='top: 261px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>09:30\n<\/div><\/div><\/div><div id = 'time_4' name='visible_231' data-day='4' class='yaxis time_all time_4' data-time='0945' style='top: 281px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_232' data-day='4' class='yaxis major time_all time_4  data-time='1000' style='top: 301px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>10:00\n<\/div><\/div><\/div><div id = 'time_4' name='visible_233' data-day='4' class='yaxis time_all time_4' data-time='1015' style='top: 321px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_234' data-day='4' class='yaxis time_all time_4' data-time='1030' style='top: 341px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>10:30\n<\/div><\/div><\/div><div id = 'time_4' name='visible_235' data-day='4' class='yaxis time_all time_4' data-time='1045' style='top: 361px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_236' data-day='4' class='yaxis major time_all time_4  data-time='1100' style='top: 381px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>11:00\n<\/div><\/div><\/div><div id = 'time_4' name='visible_237' data-day='4' class='yaxis time_all time_4' data-time='1115' style='top: 401px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_238' data-day='4' class='yaxis time_all time_4' data-time='1130' style='top: 421px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>11:30\n<\/div><\/div><\/div><div id = 'time_4' name='visible_239' data-day='4' class='yaxis time_all time_4' data-time='1145' style='top: 441px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_240' data-day='4' class='yaxis major time_all time_4  data-time='1200' style='top: 461px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>12:00\n<\/div><\/div><\/div><div id = 'time_4' name='visible_241' data-day='4' class='yaxis time_all time_4' data-time='1215' style='top: 481px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_242' data-day='4' class='yaxis time_all time_4' data-time='1230' style='top: 501px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>12:30\n<\/div><\/div><\/div><div id = 'time_4' name='visible_243' data-day='4' class='yaxis time_all time_4' data-time='1245' style='top: 521px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_244' data-day='4' class='yaxis major time_all time_4  data-time='1300' style='top: 541px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>13:00\n<\/div><\/div><\/div><div id = 'time_4' name='visible_245' data-day='4' class='yaxis time_all time_4' data-time='1315' style='top: 561px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_246' data-day='4' class='yaxis time_all time_4' data-time='1330' style='top: 581px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>13:30\n<\/div><\/div><\/div><div id = 'time_4' name='visible_247' data-day='4' class='yaxis time_all time_4' data-time='1345' style='top: 601px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_248' data-day='4' class='yaxis major time_all time_4  data-time='1400' style='top: 621px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>14:00\n<\/div><\/div><\/div><div id = 'time_4' name='visible_249' data-day='4' class='yaxis time_all time_4' data-time='1415' style='top: 641px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_250' data-day='4' class='yaxis time_all time_4' data-time='1430' style='top: 661px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>14:30\n<\/div><\/div><\/div><div id = 'time_4' name='visible_251' data-day='4' class='yaxis time_all time_4' data-time='1445' style='top: 681px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_252' data-day='4' class='yaxis major time_all time_4  data-time='1500' style='top: 701px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>15:00\n<\/div><\/div><\/div><div id = 'time_4' name='visible_253' data-day='4' class='yaxis time_all time_4' data-time='1515' style='top: 721px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_254' data-day='4' class='yaxis time_all time_4' data-time='1530' style='top: 741px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>15:30\n<\/div><\/div><\/div><div id = 'time_4' name='visible_255' data-day='4' class='yaxis time_all time_4' data-time='1545' style='top: 761px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_256' data-day='4' class='yaxis major time_all time_4  data-time='1600' style='top: 781px; display: none;'>\n                <div class='yaxistime yaxistime-extra'><div class='yaxistext yaxistext-extra'>16:00\n<\/div><\/div><\/div><div id = 'time_4' name='visible_257' data-day='4' class='yaxis time_all time_4' data-time='1615' style='top: 801px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div><div id='time_4' name='visible_258' data-day='4' class='yaxis time_all time_4' data-time='1630' style='top: 821px; display: none;'>\n                <div class='yaxistime yaxistime-extra half-hour'><div class='yaxistext-minor yaxistext-minor-extra'>16:30\n<\/div><\/div><\/div><div id = 'time_4' name='visible_259' data-day='4' class='yaxis time_all time_4' data-time='1645' style='top: 841px; display: none;'>\n                <div class='yaxistime yaxistime-extra'>&nbsp;<\/div><\/div>\n          <div class=\"event \"\n            data-id=\"5243\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Registration\"\n            data-abs-nbr=\"\"\n            data-ystart=\"8\"\n            data-yend=\"17\"\n            data-time=\"08:00-17:00\"\n            data-room=\"23\"\n            data-room-id=\"1112\"\n            data-room-name=\"S\u00f6dra Huset, House A\"\n            data-day=\"0\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"\"\n            data-category-ids=\"\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Registration<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  08:00-17:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4262\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"TUTORIAL: Real-world Ambulatory Monitoring of Vocal Behavior\"\n            data-abs-nbr=\"\"\n            data-ystart=\"9\"\n            data-yend=\"12.5\"\n            data-time=\"09:00-12:30\"\n            data-room=\"4\"\n            data-room-id=\"1075\"\n            data-room-name=\"B5\"\n            data-day=\"0\"\n            data-abs-path=\"\/abs\/4262.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;strong&gt;Organizer:&lt;\/strong&gt;&lt;br&gt;Daryush D. Mehta, Center for Laryngeal Surgery and Voice Rehabilitation, Massachusetts General Hospital&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Tutorial\"\n            data-category-ids=\"1065\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>TUTORIAL: Real-world Ambulatory Monitoring of Vocal Behavior<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  09:00-12:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#9264EE; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4261\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"TUTORIAL: Deep Learning for Dialogue Systems\"\n            data-abs-nbr=\"\"\n            data-ystart=\"9\"\n            data-yend=\"12.5\"\n            data-time=\"09:00-12:30\"\n            data-room=\"5\"\n            data-room-id=\"1066\"\n            data-room-name=\"C6\"\n            data-day=\"0\"\n            data-abs-path=\"\/abs\/4261.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;strong&gt;Organizers:&lt;\/strong&gt;&lt;br&gt;Yun-Nung Chen, National Taiwan University, Taipei, Taiwan&lt;br&gt;Asli Celikyilmazy, Microsoft Research, Redmond, WA&lt;br&gt;Dilek Hakkani-Tur, Google Research, Mountain View, CA&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Tutorial\"\n            data-category-ids=\"1065\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>TUTORIAL: Deep Learning for Dialogue Systems<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  09:00-12:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#9264EE; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4260\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"TUTORIAL: Statistical Parametric Speech Processing: Solving Problems with the Model-based Approach\"\n            data-abs-nbr=\"\"\n            data-ystart=\"9\"\n            data-yend=\"12.5\"\n            data-time=\"09:00-12:30\"\n            data-room=\"3\"\n            data-room-id=\"1065\"\n            data-room-name=\"B4\"\n            data-day=\"0\"\n            data-abs-path=\"\/abs\/4260.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;strong&gt;Organizers:&lt;\/strong&gt;&lt;br&gt;Mads Gr\u00e6sb\u00f8ll Christensen, Aalborg University&lt;br&gt;Assistant Prof. Jesper Rindom Jensen, Aalborg University&lt;br&gt;Assistant Prof. Jesper Kj\u00e6r Nielsen, Aalborg University.&lt;br&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Tutorial\"\n            data-category-ids=\"1065\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>TUTORIAL: Statistical Parametric Speech Processing: Solving Problems with the Model-based Approach<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  09:00-12:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#9264EE; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4264\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"TUTORIAL: Creating Speech Databases of Less-Resourced Languages: A CLARIN Hands-On Tutorial\"\n            data-abs-nbr=\"\"\n            data-ystart=\"9\"\n            data-yend=\"12.5\"\n            data-time=\"09:00-12:30\"\n            data-room=\"6\"\n            data-room-id=\"1076\"\n            data-room-name=\"C307\"\n            data-day=\"0\"\n            data-abs-path=\"\/abs\/4264.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;strong&gt;Organizers:&lt;\/strong&gt;&lt;br&gt;Christoph Draxler, Institute of Phonetics and Speech Communication, Ludwig Maximilian University Munich, Germany&lt;br&gt;Florian Schiel, Institute of Phonetics and Speech Communication, Ludwig Maximilian University Munich, Germany&lt;br&gt;Thomas Kisler, Institute of Phonetics and Speech Communication, Ludwig Maximilian University Munich, Germany&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Tutorial\"\n            data-category-ids=\"1065\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>TUTORIAL: Creating Speech Databases of Less-Resourced Languages: A CLARIN Hands-On Tutorial<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  09:00-12:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#9264EE; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4263\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"TUTORIAL: Insights from Qualitative Research: An Introduction to the Phonetics of Talk-In-Interaction\"\n            data-abs-nbr=\"\"\n            data-ystart=\"9\"\n            data-yend=\"12.5\"\n            data-time=\"09:00-12:30\"\n            data-room=\"8\"\n            data-room-id=\"1077\"\n            data-room-name=\"C397\"\n            data-day=\"0\"\n            data-abs-path=\"\/abs\/4263.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;strong&gt;Organizers:&lt;\/strong&gt;&lt;br&gt;Richard Ogden Department of Language &amp;amp; Linguistic Science, Centre for Advanced Studies in Language &amp;amp; Communication, University of York, UK.&lt;br&gt;Jan Gorisch Department of Pragmatics, Institute for the German Language (IDS), Mannheim, Germany.&lt;br&gt;Gareth Walker School of English, University of Sheffield, UK.&lt;br&gt;Meg Zellers Department of Linguistics: English, University of Stuttgart, Germany&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Tutorial\"\n            data-category-ids=\"1065\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>TUTORIAL: Insights from Qualitative Research: An Introduction to the Phonetics of Talk-In-Interaction<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  09:00-12:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#9264EE; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4258\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"The Second Workshop for Young Female Researchers in Speech Science &amp; Technology (YFRSW)\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"17\"\n            data-time=\"10:00-17:00\"\n            data-room=\"15\"\n            data-room-id=\"1073\"\n            data-room-name=\"Fantum (KTH)\"\n            data-day=\"0\"\n            data-abs-path=\"\/abs\/4258.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Special event\"\n            data-category-ids=\"1064\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>The Second Workshop for Young Female Researchers in Speech Science & Technology (YFRSW)<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-17:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4259\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"3rd Doctoral Consortium\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"17\"\n            data-time=\"10:00-17:00\"\n            data-room=\"13\"\n            data-room-id=\"1074\"\n            data-room-name=\"F0 (KTH)\"\n            data-day=\"0\"\n            data-abs-path=\"\/abs\/4259.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Special event\"\n            data-category-ids=\"1064\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>3rd Doctoral Consortium<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-17:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4267\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"TUTORIAL: Latest Advances in Computational Speech and Audio Analysis: Big Data, Deep Learning, and Whatnots\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"17\"\n            data-time=\"13:30-17:00\"\n            data-room=\"5\"\n            data-room-id=\"1066\"\n            data-room-name=\"C6\"\n            data-day=\"0\"\n            data-abs-path=\"\/abs\/4267.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\n                 &lt;strong&gt;Organizers:&lt;\/strong&gt;\n                 &lt;br&gt;Bj\u00f6rn W. Schuller, Imperial College London, U.K. &amp;amp; Univeristy of Passau, Germany &amp;amp; audEERING Gmbh, Germany\n                 &lt;br&gt;Nicholas Cummins, Univeristy of Passau, Germany\n                &lt;br&gt;&lt;br&gt;\"\n            data-category=\"Tutorial\"\n            data-category-ids=\"1065\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>TUTORIAL: Latest Advances in Computational Speech and Audio Analysis: Big Data, Deep Learning, and Whatnots<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-17:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#9264EE; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4268\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"TUTORIAL: Modeling Situated Multi-modal Interaction with the Furhat Robot Head\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"17\"\n            data-time=\"13:30-17:00\"\n            data-room=\"7\"\n            data-room-id=\"1115\"\n            data-room-name=\"C389\"\n            data-day=\"0\"\n            data-abs-path=\"\/abs\/4268.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;strong&gt;Organizers:&lt;\/strong&gt;&lt;br&gt;Gabriel Skantze, KTH Speech Music and Hearing &amp;amp; Furhat Robotics, Sweden&lt;br&gt;Andr\u00e9 Pereira, Furhat Robotics, Sweden&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Tutorial\"\n            data-category-ids=\"1065\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>TUTORIAL: Modeling Situated Multi-modal Interaction with the Furhat Robot Head<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-17:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#9264EE; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4265\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"TUTORIAL: Computational Modeling of Language Acquisition\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"17\"\n            data-time=\"13:30-17:00\"\n            data-room=\"4\"\n            data-room-id=\"1075\"\n            data-room-name=\"B5\"\n            data-day=\"0\"\n            data-abs-path=\"\/abs\/4265.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;strong&gt;Organizers:&lt;\/strong&gt;&lt;br&gt;Naomi Feldman, University of Maryland, MD&lt;br&gt;Emmanuel Dupoux, Ecole des Hautes Etudes en Sciences Sociales, France&lt;br&gt;Okko R\u00e4s\u00e4nen, Aalto University, Finland&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Tutorial\"\n            data-category-ids=\"1065\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>TUTORIAL: Computational Modeling of Language Acquisition<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-17:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#9264EE; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4266\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"TUTORIAL: Deep Learning for Text-to-Speech Synthesis, using the Merlin toolkit\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"17\"\n            data-time=\"13:30-17:00\"\n            data-room=\"3\"\n            data-room-id=\"1065\"\n            data-room-name=\"B4\"\n            data-day=\"0\"\n            data-abs-path=\"\/abs\/4266.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;p&gt;\n                 &lt;\/p&gt;&lt;p&gt;&lt;strong&gt;Organizers:&lt;\/strong&gt;&lt;\/p&gt;&lt;p&gt;&lt;\/p&gt;&lt;ul&gt;&lt;li&gt;Simon King, Centre for Speech Technology Research, University of Edinburgh, UK&lt;\/li&gt;&lt;li&gt;Oliver Watts, Centre for Speech Technology Research, University of Edinburgh, UK&lt;\/li&gt;&lt;li&gt;Srikanth Ronanki, Centre for Speech Technology Research, University of Edinburgh, UK&lt;\/li&gt;&lt;li&gt;Zhizheng Wu, Apple Inc, USA&lt;\/li&gt;&lt;li&gt;Felipe Espic, Centre for Speech Technology Research, University of Edinburgh, UK&lt;\/li&gt;&lt;\/ul&gt;&lt;p&gt;\n                &lt;\/p&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Tutorial\"\n            data-category-ids=\"1065\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>TUTORIAL: Deep Learning for Text-to-Speech Synthesis, using the Merlin toolkit<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-17:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#9264EE; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5245\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Registration\"\n            data-abs-nbr=\"\"\n            data-ystart=\"8\"\n            data-yend=\"17\"\n            data-time=\"08:00-17:00\"\n            data-room=\"23\"\n            data-room-id=\"1112\"\n            data-room-name=\"S\u00f6dra Huset, House A\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"\"\n            data-category-ids=\"\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Registration<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  08:00-17:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5264\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Registration\"\n            data-abs-nbr=\"\"\n            data-ystart=\"8\"\n            data-yend=\"9\"\n            data-time=\"08:00-09:00\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"\"\n            data-category-ids=\"\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Registration<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  08:00-09:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5240\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Opening session\"\n            data-abs-nbr=\"\"\n            data-ystart=\"9\"\n            data-yend=\"9.75\"\n            data-time=\"09:00-09:45\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;p&gt;The session will also be broadcasted (with two-way communication) to rooms A2 and C6.&lt;\/p&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Misc\"\n            data-category-ids=\"1068\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Opening session<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Aula Magna<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  09:00-09:45                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4232\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"ISCA Medal 2017 Ceremony\"\n            data-abs-nbr=\"\"\n            data-ystart=\"9.75\"\n            data-yend=\"10.25\"\n            data-time=\"09:45-10:15\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"1\"\n            data-abs-path=\"\/abs\/4232.html\"\n            data-speaker=\"Fumitada Itakura\"\n            data-speakercell=\"Fumitada Itakura\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Haizhou Li&lt;br&gt;&lt;br&gt;&lt;p&gt;The session will also be broadcasted (with two-way communication) to rooms A2 and C6.&lt;br&gt;&lt;\/p&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Keynote\"\n            data-category-ids=\"1057\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>ISCA Medal 2017 Ceremony<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Aula Magna<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  09:45-10:15                <\/span><\/div>\n\n              <div class=\"lecturer\"><span>Fumitada Itakura<\/span><\/div>\n            <\/div>\n\n\n                                <div style=\"background-color:#72D9EE; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5256\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Refreshments\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10.25\"\n            data-yend=\"11\"\n            data-time=\"10:15-11:00\"\n            data-room=\"22\"\n            data-room-id=\"1079\"\n            data-room-name=\"Various locations\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"\"\n            data-category-ids=\"\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Refreshments<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Various locations<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:15-11:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4237\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Acoustic and Articulatory Phonetics\"\n            data-abs-nbr=\"\"\n            data-ystart=\"11\"\n            data-yend=\"13\"\n            data-time=\"11:00-13:00\"\n            data-room=\"5\"\n            data-room-id=\"1066\"\n            data-room-name=\"C6\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Marzena Zygis; \u0160tefan Be\u0148u\u0161&lt;br&gt;&lt;br&gt;11.00-11.20 - Phonetic Correlates of Pharyngeal and Pharyngealized Consonants in Saudi, Lebanese, and Jordanian Arabic: an rt-MRI Study&lt;br&gt;&lt;small&gt;Zainab Hermes; Marissa Barlaz; Ryan Shosted; Zhi-Pei Liang; Brad Sutton&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Glottal opening and strategies of production of fricatives&lt;br&gt;&lt;small&gt;Benjamin Elie; Yves Laprie&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Acoustics and articulation of medial versus final coronal stop gemination contrasts in Moroccan Arabic&lt;br&gt;&lt;small&gt;Mohamed Yassine Frej; Christopher Carignan; Catherine T. Best&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.00-12.20 - How are four-level length distinctions produced? Evidence from Moroccan Arabic&lt;br&gt;&lt;small&gt;Giuseppina Turco; Karim Shoul; Rachid Ridouane&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.20-12.40 - Nature of contrast and coarticulation: Evidence from Mizo tones and Assamese vowel harmony&lt;br&gt;&lt;small&gt;Indranil Dutta; Irfan S.; Pamir Gogoi; Priyankoo Sarmah&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.40-13.00 - CANCELLED: Vowels in the Barunga variety of North Australian Kriol&lt;br&gt;&lt;small&gt;Caroline Jones; Katherine Demuth; Weicong Li; Andre Almeida&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Phonetics, Phonology, and Prosody\"\n            data-category-ids=\"1056\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Acoustic and Articulatory Phonetics<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  11:00-13:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4256\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Interspeech 2017 Automatic Speaker Verification Spoofing and Countermeasures Challenge 1\"\n            data-abs-nbr=\"\"\n            data-ystart=\"11\"\n            data-yend=\"13\"\n            data-time=\"11:00-13:00\"\n            data-room=\"9\"\n            data-room-id=\"1062\"\n            data-room-name=\"D8\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Tomi Kinnunen; Junichi Yamagishi&lt;br&gt;&lt;br&gt;11.00-11.30 - The ASVspoof 2017 Challenge: Assessing the Limits of Replay Spoofing Attack Detection&lt;br&gt;&lt;small&gt;Tomi Kinnunen; Md Sahidullah; H\u00e9ctor Delgado; Massimiliano Todisco; Nicholas Evans; Junichi Yamagishi; Kong Aik Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.30-11.45 - Experimental analysis of features for replay attack detection-Results on the ASVspoof 2017 Challenge&lt;br&gt;&lt;small&gt;Roberto Javier Font Ruiz; Mar\u00eda Jos\u00e9 Cano Vicente; Juan Manuel Esp\u00edn L\u00f3pez&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.45-12.00 - Novel Variable Length Teager Energy Separation Based Instantaneous Frequency Features for Replay Detection&lt;br&gt;&lt;small&gt;Hemant Patil; Madhu Kamble; Tanvina Patel; Meet Soni&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.00-12.15 - Countermeasures for Automatic Speaker Verification Replay Spoofing Attack : On Data Augmentation, Feature Representation, Classification and Fusion&lt;br&gt;&lt;small&gt;Weicheng Cai; Danwei Cai; Wenbo Liu; Ming Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.15-12.30 - Spoof Detection Using Source, Instantaneous Frequency and Cepstral Features&lt;br&gt;&lt;small&gt;Sarfaraz Jelil; Rohan Kumar Das; S R Mahadeva Prasanna; Rohit Sinha&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.30-12.45 - Audio Replay Attack Detection with High-Frequency Features&lt;br&gt;&lt;small&gt;Marcin Witkowski; Stanis\u0142aw Kacprzak; Piotr \u017belasko; Konrad Kowalczyk; Jakub Ga\u0142ka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.45-13.00 - Feature selection based on CQCCs for Automatic Speaker Verification spoofing&lt;br&gt;&lt;small&gt;Wang Xianliang; Xiao Yanhong; Zhu Xuan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speaker and Language Identification\"\n            data-category-ids=\"1054\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Interspeech 2017 Automatic Speaker Verification Spoofing and Countermeasures Challenge 1<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  11:00-13:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4236\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Dereverberation, Echo Cancellation and Speech \"\n            data-abs-nbr=\"\"\n            data-ystart=\"11\"\n            data-yend=\"13\"\n            data-time=\"11:00-13:00\"\n            data-room=\"3\"\n            data-room-id=\"1065\"\n            data-room-name=\"B4\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Stephen Zahorian ; Bernd T. Meyer&lt;br&gt;&lt;br&gt;11.00-11.20 - Improving Speaker Verification for Reverberant Conditions with Deep Neural Network Dereverberation Processing&lt;br&gt;&lt;small&gt;Peter Guzewich; Stephen Zahorian&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Stepsize Control for Acoustic Feedback Cancellation Based on the Detection of Reverberant Signal Periods and the Estimated System Distance&lt;br&gt;&lt;small&gt;Philipp Bulling; Klaus Linhard; Arthur Wolf; Gerhard Schmidt&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - A Delay-Flexible Stereo Acoustic Echo Cancellation for DFT-Based In-Car Communication (ICC) Systems&lt;br&gt;&lt;small&gt;Jan Franzen; Tim Fingscheidt&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.00-12.20 - Speech Enhancement Based on Harmonic Estimation combined with MMSE to Improve Speech Intelligibility for Cochlear Implant Recipients&lt;br&gt;&lt;small&gt;Dongmei Wang; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.20-12.40 - Improving speech intelligibility in binaural hearing aids by estimating a time-frequency mask with a weighted least squares classi\ufb01er&lt;br&gt;&lt;small&gt;David Ayllon; Roberto Gil-Pita; Manuel Rosa-Zurera&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.40-13.00 - Simulations of high-frequency vocoder on Mandarin speech recognition for acoustic hearing preserved cochlear implant&lt;br&gt;&lt;small&gt;Tsung-Chen Wu; Tai-Shih Chi; Chia-Fone Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Coding and Enhancement\"\n            data-category-ids=\"1060\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Dereverberation, Echo Cancellation and Speech <\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  11:00-13:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4234\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Multimodal and Articulatory Synthesis\"\n            data-abs-nbr=\"\"\n            data-ystart=\"11\"\n            data-yend=\"13\"\n            data-time=\"11:00-13:00\"\n            data-room=\"10\"\n            data-room-id=\"1060\"\n            data-room-name=\"E10\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Ingmar Steiner; Korin Richmond&lt;br&gt;&lt;br&gt;11.00-11.20 - The Influence of Synthetic Voice on the Evaluation of a Virtual Character&lt;br&gt;&lt;small&gt;Joao Cabral; Benjamin Cowan; Katja Zibrek; Rachel McDonnell&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Articulatory Text-to-Speech Synthesis using the Digital Waveguide Mesh driven by a Deep Neural Network&lt;br&gt;&lt;small&gt;Amelia Gully; Takenori Yoshimura; Damian Murphy; Kei Hashimoto; Yoshihiko Nankaku; Keiichi Tokuda&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - An HMM\/DNN comparison for synchronized text-to-speech and tongue motion synthesis&lt;br&gt;&lt;small&gt;S\u00e9bastien Le Maguer; Ingmar Steiner; Alexander Hewer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.00-12.20 - VCV Synthesis using Task Dynamics to Animate a Factor-based Articulatory Model&lt;br&gt;&lt;small&gt;Rachel Alexander; Tanner Sorensen; Asterios Toutios; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.20-12.40 - Beyond the Listening Test: An interactive approach to TTS Evaluation&lt;br&gt;&lt;small&gt;Joseph Mendelson; Matthew Aylett&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.40-13.00 - Integrating Articulatory Information into Deep Learning-Based Text-to-Speech Synthesis&lt;br&gt;&lt;small&gt;Beiming Cao; Myungjong Kim; Jan van Santen; Ted Mau; Jun Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Synthesis and Spoken Language Generation\"\n            data-category-ids=\"1059\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Multimodal and Articulatory Synthesis<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  11:00-13:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4250\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Show &amp; Tell 1\"\n            data-abs-nbr=\"\"\n            data-ystart=\"11\"\n            data-yend=\"13\"\n            data-time=\"11:00-13:00\"\n            data-room=\"11\"\n            data-room-id=\"1070\"\n            data-room-name=\"E306\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"11.00-13.00 - A system for real-time collaborative transcription correction&lt;br&gt;&lt;small&gt;Peter Bell; Joachim Fainberg; Catherine Lai; Mark Sinclair&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - ChunkitApp: Investigating the relevant units of online speech processing&lt;br&gt;&lt;small&gt;Svetlana Vetchinnikova; Anna Mauranen; Nina Mikusova&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Extending the EMU Speech Database Management System: Cloud Hosting, Team Collaboration, Automatic Revision Control&lt;br&gt;&lt;small&gt;Markus Jochim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - HomeBank: A repository for long-form real-world audio recordings of children&lt;br&gt;&lt;small&gt;Anne Warlaumont; Mark vanDam; Elika Bergelson; Alejandrina Cristia&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - MoPAReST - Mobile Phone Assisted Remote Speech Therapy Platform&lt;br&gt;&lt;small&gt;Chitralekha Bhat; Anjali Kant; Bhavik Vachhani; Sarita Rautara; Ashok Kumar Sinha; Sunil Kumar Kopparapu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Prosograph: A Tool for Prosody Visualisation of Large Speech Corpora&lt;br&gt;&lt;small&gt;Alp Oktem; Mireia Farr\u00fas; Leo Wanner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Show & Tell\"\n            data-category-ids=\"1063\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Show & Tell 1<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  11:00-13:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#EEA2A2; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4235\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Multimodal Paralinguistics\"\n            data-abs-nbr=\"\"\n            data-ystart=\"11\"\n            data-yend=\"13\"\n            data-time=\"11:00-13:00\"\n            data-room=\"0\"\n            data-room-id=\"1064\"\n            data-room-name=\"A2\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Paula Lopez-Otero; Elizabeth Shriberg&lt;br&gt;&lt;br&gt;11.00-11.20 - Multimodal markers of persuasive speech : designing a Virtual Debate Coach&lt;br&gt;&lt;small&gt;Volha Petukhova; Manoj Raju; Harry Bunt&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Acoustic-Prosodic and Physiological Response to Stressful Interactions in Children with Autism Spectrum Disorder&lt;br&gt;&lt;small&gt;Daniel Bone; Julia Mertens; Emily Zane; Sungbok Lee; Shrikanth Narayanan; Ruth Grossman&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - A Stepwise Analysis of Aggregated Crowdsourced Labels Describing Multimodal Emotional Behaviors&lt;br&gt;&lt;small&gt;Alec Burmania; Carlos Busso&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.00-12.20 - An information theoretic analysis of the temporal synchrony between head gestures and prosodic patterns in spontaneous speech&lt;br&gt;&lt;small&gt;Gaurav Fotedar; Prasanta Ghosh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.20-12.40 - Multimodal Prediction of Affect Dimensions Fusing Multiple Regression Techniques&lt;br&gt;&lt;small&gt;Dongyan Huang; Wan Ding; Mingyu Xu; Huaiping Ming; Xinguo Yu; Minghui Dong; Haizhou Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.40-13.00 - Co-production of speech and pointing gestures in clear and perturbed interactive tasks: multimodal designation strategies&lt;br&gt;&lt;small&gt;Marion Dohen; Benjamin Roustan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Paralinguistics in Speech and Language\"\n            data-category-ids=\"1052\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Multimodal Paralinguistics<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  11:00-13:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4251\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Show &amp; Tell 2\"\n            data-abs-nbr=\"\"\n            data-ystart=\"11\"\n            data-yend=\"13\"\n            data-time=\"11:00-13:00\"\n            data-room=\"12\"\n            data-room-id=\"1071\"\n            data-room-name=\"E397\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"11.00-13.00 - An apparatus to investigate western opera singing skill learning using performance and result biofeedback, and measuring its neural correlates&lt;br&gt;&lt;small&gt;Aurore Jaumard-Hakoun; Samy Chikhi; Takfarinas Medani; Angelika Nair; G\u00e9rard Dreyfus; Fran\u00e7ois-Beno\u00eet Vialatte&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Emojive! Collecting Emotion Data from Speech and Facial Expression using Mobile Game App&lt;br&gt;&lt;small&gt;Ji Ho Park; Nayeon Lee; Dario Bertero; Anik Dey; Pascale Fung&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Mylly - The Mill: A new platform for processing speech and text corpora easily and efficiently&lt;br&gt;&lt;small&gt;Mietta Lennes; Jussi Piitulainen; Martin Matthiesen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - PercyConfigurator -- Perception Experiments as a Service&lt;br&gt;&lt;small&gt;Christoph Draxler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - System for speech transcription and post-editing in Microsoft Word&lt;br&gt;&lt;small&gt;Askars Salimbajevs; Indra Ikauniece&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Visual Learning 2: Pronunciation app using ultrasound, video, and MRI&lt;br&gt;&lt;small&gt;Kyori Suzuki; Ian Wilson; Hayato Watanabe&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Show & Tell\"\n            data-category-ids=\"1063\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Show & Tell 2<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  11:00-13:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#EEA2A2; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4244\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech and Audio Segmentation and Classification 2\"\n            data-abs-nbr=\"\"\n            data-ystart=\"11\"\n            data-yend=\"13\"\n            data-time=\"11:00-13:00\"\n            data-room=\"17\"\n            data-room-id=\"1067\"\n            data-room-name=\"Poster 2\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Hugo van Hamme&lt;br&gt;&lt;br&gt;11.00-13.00 - A robust Voiced\/Unvoiced phoneme classification from whispered speech using the \u201ccolor\u201d of whispered phonemes and Deep Neural Network&lt;br&gt;&lt;small&gt;Nisha Meenakshi; Prasanta Ghosh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Attention based CLDNNs for short-duration acoustic scene classification&lt;br&gt;&lt;small&gt;Jinxi Guo; Ning Xu; Li-Jia Li; Abeer Alwan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - AUDIO CLASSIFICATION USING CLASS-SPECIFIC LEARNED DESCRIPTORS&lt;br&gt;&lt;small&gt;Sukanya Sonowal; Tushar Sandhan; Inkyu Choi; Nam Soo Kim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Enhanced Feature Extraction for Speech Detection in Media Audio&lt;br&gt;&lt;small&gt;Inseon Jang; ChungHyun Ahn; Jeongil Seo; Younseon Jang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Frame-wise dynamic threshold based polyphonic acoustic event detection&lt;br&gt;&lt;small&gt;Xianjun Xia; Roberto Togneri; Ferdous Sohel; David Huang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Hidden Markov Model Variational Autoencoder for Acoustic Unit Discovery&lt;br&gt;&lt;small&gt;Janek Ebbers; Jahn Heymann; Lukas Drude; Thomas Glarner; Reinhold Haeb-Umbach; Bhiksha Raj&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Indoor\/Outdoor Audio Classification using Foreground Speech Segmentation&lt;br&gt;&lt;small&gt;Banriskhem K. Khonglah; Deepak K T; S R Mahadeva Prasanna&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Montreal Forced Aligner: trainable text-speech alignment using Kaldi&lt;br&gt;&lt;small&gt;Michael McAuliffe; Michaela Socolof; Sarah Mihuc; Michael Wagner; Morgan Sonderegger&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Multilingual I-Vector based Statistical Modeling for Music Genre Classification&lt;br&gt;&lt;small&gt;Jia Dai; Wei Xue; Wenju Liu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Virtual Adversarial Training and Data Augmentation for Acoustic Event Detection with Gated Recurrent Neural Networks&lt;br&gt;&lt;small&gt;Matthias Z\u00f6hrer; Franz Pernkopf&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Speech and Audio Signals\"\n            data-category-ids=\"1062\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech and Audio Segmentation and Classification 2<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  11:00-13:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4255\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Speech Technology for Code-Switching in Multilingual Communities\"\n            data-abs-nbr=\"\"\n            data-ystart=\"11\"\n            data-yend=\"13\"\n            data-time=\"11:00-13:00\"\n            data-room=\"14\"\n            data-room-id=\"1059\"\n            data-room-name=\"F11\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Kalika Bali; Alan W Black&lt;br&gt;&lt;br&gt; \n                 &lt;p&gt;&lt;\/p&gt; \n                 &lt;p&gt;See full description at: https:\/\/www.microsoft.com\/en-us\/research\/event\/interspeech-2017-special-session-speech-technologies-for-code-switching-in-multilingual-communities\/#&lt;\/p&gt;\n                 &lt;p&gt;Topics of interest for this special session will include but are not limited to:&amp;nbsp;&lt;br&gt;&lt;\/p&gt; \n                 &lt;li&gt;Speech Recognition of code-switched speech&lt;\/li&gt; \n                 &lt;li&gt;Language Modeling for code-switched speech&lt;\/li&gt; \n                 &lt;li&gt;Speech Synthesis of code-switched text&lt;\/li&gt; \n                 &lt;li&gt;Speech Translation of code-switched languages&lt;\/li&gt; \n                 &lt;li&gt;Spoken Dialogue Systems that can handle code-switching&lt;\/li&gt; \n                 &lt;li&gt;Speech data and resources for code-switching&lt;\/li&gt; \n                 &lt;li&gt;Language Identification from speech&lt;\/li&gt; \n                 &lt;p&gt;&lt;\/p&gt; \n                 &lt;p&gt;&lt;br&gt;&lt;\/p&gt;\n                 &lt;p&gt;&lt;span style=&quot;font-weight: bold;&quot;&gt;Organizing Committee:&lt;\/span&gt;&lt;br&gt;&lt;\/p&gt;\n                 &lt;p&gt;Kalika Bali, Microsoft Research India&lt;br&gt;Alan W Black, Carnegie Mellon University&lt;br&gt;Mona Diab, George Washington University&lt;br&gt;Julia Hirschberg, Columbia University&lt;br&gt;Sunayana Sitaram, Microsoft Research India&lt;br&gt;Thamar Solorio, University of Houston&lt;\/p&gt; \n                &lt;br&gt;&lt;br&gt;11.00-11.20 - Introduction&lt;br&gt;&lt;small&gt;Kalika Bali; Alan W Black&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Longitudinal Speaker Clustering and Verification Corpus with Code-Switching Frisian-Dutch Speech&lt;br&gt;&lt;small&gt;Emre Yilmaz; Jelske Dijkstra; Hans Van de Velde; Frederik Kampstra; Jouke Algra; Henk Van den Heuvel; David Van Leeuwen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Exploiting Untranscribed Broadcast Data for Improved Code-Switching Detection&lt;br&gt;&lt;small&gt;Emre Yilmaz; Henk van den Heuvel; David van Leeuwen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.00-12.20 - Jee haan, I'd like both, por favor: Elicitation of a Code-Switched Corpus of Hindi-English and Spanish-English Human-Machine Dialog&lt;br&gt;&lt;small&gt;Vikram Ramanarayanan; David Suendermann-Oeft&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.20-12.40 - On building mixed lingual speech synthesis systems&lt;br&gt;&lt;small&gt;SaiKrishna Rallabandi; Alan W Black&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.40-13.00 - Speech Synthesis for Mixed-Language Navigation Instructions&lt;br&gt;&lt;small&gt;Khyathi Chandu; Sai Krishna Rallabandi; Sunayana Sitaram; Alan W Black&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Perception, Production and Acquisition\"\n            data-category-ids=\"1055\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Speech Technology for Code-Switching in Multilingual Communities<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  11:00-13:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4245\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Search, Computational Strategies and Language Modeling\"\n            data-abs-nbr=\"\"\n            data-ystart=\"11\"\n            data-yend=\"13\"\n            data-time=\"11:00-13:00\"\n            data-room=\"19\"\n            data-room-id=\"1068\"\n            data-room-name=\"Poster 4\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Gy\u00f6rgy Szasz\u00e1k&lt;br&gt;&lt;br&gt;11.00-13.00 - A phonological phrase sequence modelling approach for resource efficient and robust real-time punctuation recovery&lt;br&gt;&lt;small&gt;Anna Mor\u00f3; Gy\u00f6rgy Szasz\u00e1k&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Binary Deep Neural Networks for Speech Recognition&lt;br&gt;&lt;small&gt;Xu Xiang; Yanmin Qian; Kai Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Comparison of Different Decoding Strategies for CTC Acoustic Models&lt;br&gt;&lt;small&gt;Thomas Zenkel; Ramon Sanabria; Florian Metze; Jan Niehues; Matthias Sperber; Sebastian St\u00fcker; Alex Waibel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Empirical Evaluation of Parallel Training Algorithms on Acoustic Modeling&lt;br&gt;&lt;small&gt;Wenpeng Li; Binbin Zhang; Lei Xie; Dong Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Estimation of Gap Between Current Language Models and Human Performance&lt;br&gt;&lt;small&gt;Xiaoyu Shen; Youssef Oualil; Clayton Greenberg; Mittul Singh; Dietrich Klakow&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Hierarchical Constrained Bayesian Optimization for Joint Feature, Acoustic Model and Decoder Parameter Optimization&lt;br&gt;&lt;small&gt;Akshay Chandrashekaran; Ian Lane&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Joint Learning of Correlated Sequence Labeling Tasks Using Bidirectional Recurrent Neural Networks&lt;br&gt;&lt;small&gt;Vardaan Pahuja; Anirban Laha; Shachar Mirkin; Vikas Raykar; Lili Kotlerman; Guy Lev&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Phone duration modeling for LVCSR using neural networks&lt;br&gt;&lt;small&gt;Hossein Hadian; Daniel Povey; Hossein Sameti; Sanjeev Khudanpur&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Rescoring-aware Beam Search for Reduced Search Errors in Contextual Automatic Speech Recognition&lt;br&gt;&lt;small&gt;Ian Williams; Aleksic Google, Inc&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Towards better decoding and language model integration in sequence to sequence models&lt;br&gt;&lt;small&gt;Jan Chorowski; Navdeep Jaitly&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Use of Global and Acoustic Features Associated with Contextual Factors to Adapt Language Models for Spontaneous Speech Recognition&lt;br&gt;&lt;small&gt;Shohei Toyama; Daisuke Saito; Nobuaki Minematsu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Architecture, Search, and Linguistic Components\"\n            data-category-ids=\"1061\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Search, Computational Strategies and Language Modeling<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  11:00-13:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4243\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech Analysis and Representation 2\"\n            data-abs-nbr=\"\"\n            data-ystart=\"11\"\n            data-yend=\"13\"\n            data-time=\"11:00-13:00\"\n            data-room=\"16\"\n            data-room-id=\"1061\"\n            data-room-name=\"Poster 1\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Sekhar Seelamantula&lt;br&gt;&lt;br&gt;11.00-13.00 - A modulation property of time-frequency derivatives of filtered phase and its application to aperiodicity and FO estimation&lt;br&gt;&lt;small&gt;Hideki Kawahara; Ken-Ichi Sakakibara; Masanori Morise; Hideki Banno; Tomoki Toda&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Analytic Filter Bank for Speech Analysis, Feature Extraction and Perceptual Studies&lt;br&gt;&lt;small&gt;Unto K. Laine&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - A Time-Warping Pitch Tracking Algorithm considering fast f0 changes&lt;br&gt;&lt;small&gt;Simon Stone; Peter Steiner; Peter Birkholz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Learning the mapping function from voltage amplitudes to sensor positions in 3D-EMA using deep neural networks&lt;br&gt;&lt;small&gt;Christian Kroos; Mark D. Plumbley&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Low-dimensional representation of spectral envelope without deterioration for full-band speech analysis\/synthesis system&lt;br&gt;&lt;small&gt;Masanori Morise; Kenji Ozawa; Genta Miayashita&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Non-Local Estimation of Speech Signal for Vowel Onset Point Detection in Varied Environments&lt;br&gt;&lt;small&gt;Avinash Kumar; Syed Shahnawazuddin; Gayadhar Pradhan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Robust Source-Filter Separation of Speech Signal in the Phase Domain&lt;br&gt;&lt;small&gt;Erfan Loweimi; Jon Barker; Oscar Saz Torralba; Thomas Hain&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Time-domain envelope modulating the noise component of excitation in a continuous residual-based vocoder for statistical parametric speech synthesis&lt;br&gt;&lt;small&gt;Mohammed Salah Al-Radhi; Tam\u00e1s G\u00e1bor Csap\u00f3; G\u00e9za N\u00e9meth&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Wavelet Speech Enhancement Based on Robust Principal Component Analysis&lt;br&gt;&lt;small&gt;Chia-Lung Wu; Hsiang-Ping Hsu; Syu-Siang Wang; Jeih-weih Hung; Ying-Hui Lai; Hsin-Min Wang; Yu Tsao&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-13.00 - Vowel Onset Point Detection using Sonority Information&lt;br&gt;&lt;small&gt;Bidisha Sharma; S R Mahadeva Prasanna&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Speech and Audio Signals\"\n            data-category-ids=\"1062\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech Analysis and Representation 2<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  11:00-13:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4233\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Conversational Telephone Speech Recognition\"\n            data-abs-nbr=\"\"\n            data-ystart=\"11\"\n            data-yend=\"13\"\n            data-time=\"11:00-13:00\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Penny Karanasou; Ralf Schl\u00fcter&lt;br&gt;&lt;br&gt;11.00-11.20 - Improved Single System Conversational Telephone Speech Recognition with VGG Bottleneck Features&lt;br&gt;&lt;small&gt;William Hartmann; Roger Hsiao; Tim Ng; Jeff Ma; Francis Keith; Man-hung Siu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Student-teacher training with diverse decision tree ensembles&lt;br&gt;&lt;small&gt;Jeremy H. M. Wong; Mark Gales&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Embedding-Based Speaker Adaptive Training of Deep Neural Networks&lt;br&gt;&lt;small&gt;Xiaodong Cui; Vaibhava Goel; George Saon&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.00-12.20 - Improving Deliverable Speech-to-text Systems with Multilingual Knowledge Transfer&lt;br&gt;&lt;small&gt;Jeff Ma; Francis Keith; Owen Kimball; Man-hung Siu; Tim Ng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.20-12.40 - English Conversational Telephone Speech Recognition by Humans and Machines&lt;br&gt;&lt;small&gt;George Saon; Gakuto Kurata; Tom Sercu; Kartik Audhkhasi; Samuel Thomas; Dimitrios Dimitriadis; Xiaodong Cui; Bhuvana Ramabhadran; Michael Picheny; Lynn-Li Lim; Bergul Roomi; Phil Hall&lt;\/small&gt;&lt;br&gt;&lt;br&gt;12.40-13.00 - Comparing Human and Machine Errors in Conversational Speech Transcription&lt;br&gt;&lt;small&gt;Andreas Stolcke; Jasha Droppo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n            data-category-ids=\"1058\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Conversational Telephone Speech Recognition<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  11:00-13:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5255\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Lunch\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13\"\n            data-yend=\"14.5\"\n            data-time=\"13:00-14:30\"\n            data-room=\"22\"\n            data-room-id=\"1079\"\n            data-room-name=\"Various locations\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Misc\"\n            data-category-ids=\"1068\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Lunch<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Various locations<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:00-14:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4242\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Perception of Dialects and L2\"\n            data-abs-nbr=\"\"\n            data-ystart=\"14.5\"\n            data-yend=\"16.5\"\n            data-time=\"14:30-16:30\"\n            data-room=\"5\"\n            data-room-id=\"1066\"\n            data-room-name=\"C6\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Marija Tabain; Felicitas Kleber&lt;br&gt;&lt;br&gt;14.30-14.50 - End-to-End Acoustic Feedback in Language Learning for Correcting Devoiced French Final-Fricatives&lt;br&gt;&lt;small&gt;Sucheta Ghosh; Camille Fauth; Yves Laprie; Aghilas Sini&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Dialect perception by older children&lt;br&gt;&lt;small&gt;Ewa Jacewicz; Robert A. Fox&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Perception of non-contrastive variations in American English by Japanese learners: Flaps are less favored than stops&lt;br&gt;&lt;small&gt;Kiyoko Yoneyama; Mafuyu Kitahara; Keiichi Tajima&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.30-15.50 - How L1 speakers perceive L2 prosody: The cumulative effect of intonation, rhythm, and speech rate on accentedness and comprehensibility ratings&lt;br&gt;&lt;small&gt;Lieke van Maastricht; Tim Zee; Emiel Krahmer; Marc Swerts&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.50-16.10 - Effects of Pitch Fall and L1 on Vowel Length Identification in L2 Japanese&lt;br&gt;&lt;small&gt;IZUMI TAKIGUCHI&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-16.30 - A Preliminary Study of Prosodic Disambiguation by Chinese EFL Learners&lt;br&gt;&lt;small&gt;Yuanyuan Zhang; Hongwei Ding&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Phonetics, Phonology, and Prosody\"\n            data-category-ids=\"1056\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Perception of Dialects and L2<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  14:30-16:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4249\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Prosody and Text Processing\"\n            data-abs-nbr=\"\"\n            data-ystart=\"14.5\"\n            data-yend=\"16.5\"\n            data-time=\"14:30-16:30\"\n            data-room=\"19\"\n            data-room-id=\"1068\"\n            data-room-name=\"Poster 4\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Zofia Malisz&lt;br&gt;&lt;br&gt;14.30-16.30 - An RNN Model of Text Normalization&lt;br&gt;&lt;small&gt;Richard Sproat; Navdeep Jaitly&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Comparison of Modeling Target in LSTM-RNN Duration Model&lt;br&gt;&lt;small&gt;Bo Chen; Jiahao Lai; Kai Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Discrete Duration Model For Speech Synthesis&lt;br&gt;&lt;small&gt;Bo Chen; Tianling Bian; Kai Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Global Syllable Vectors for Building TTS Front-End with Deep Learning&lt;br&gt;&lt;small&gt;Jinfu Ni; Yoshinori Shiga; Hisashi Kawai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Investigating Efficient Feature Representation Methods and Training Objective for BLSTM-Based Phone Duration Prediction&lt;br&gt;&lt;small&gt;Yibin Zheng; Jianhua Tao; Zhengqi Wen; Ya Li; Bin Liu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Learning word vector representations based on acoustic counts&lt;br&gt;&lt;small&gt;Manuel Sam Ribeiro; Oliver Watts; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Multi-Task Learning for Prosodic Structure Generation using BLSTM RNN with Structured Output Layer&lt;br&gt;&lt;small&gt;Yuchen Huang; Zhiyong Wu; Runnan Li; Helen Meng; Lianhong Cai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Prosody Aware Word-level Encoder Based on BLSTM-RNNs for DNN-based Speech Synthesis&lt;br&gt;&lt;small&gt;Yusuke Ijima; Nobukatsu Hojo; Ryo Masumura; Taichi Asami&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Prosody Control of Utterance Sequence for Information Delivering&lt;br&gt;&lt;small&gt;Ishin Fukuoka; Kazuhiko Iwata; Tetsunori Kobayashi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Synthesising uncertainty: the interplay of vocal effort and hesitation disfluencies&lt;br&gt;&lt;small&gt;Eva Szekely; Joseph Mendelson; Joakim Gustafson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Weakly-Supervised Phrase Assignment from Text in a Speech-Synthesis System Using Noisy Labels&lt;br&gt;&lt;small&gt;Asaf Rendel; Raul Fernandez; Zvi Kons; Andrew Rosenberg; Ron Hoory; Bhuvana Ramabhadran&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Synthesis and Spoken Language Generation\"\n            data-category-ids=\"1059\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Prosody and Text Processing<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  14:30-16:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4248\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Multi-lingual Models and Adaptation for ASR\"\n            data-abs-nbr=\"\"\n            data-ystart=\"14.5\"\n            data-yend=\"16.5\"\n            data-time=\"14:30-16:30\"\n            data-room=\"18\"\n            data-room-id=\"1069\"\n            data-room-name=\"Poster 3\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Khe Chai Sim&lt;br&gt;&lt;br&gt;14.30-16.30 - 2016 BUT Babel system: Multilingual BLSTM acoustic model with i-vector based adaptation&lt;br&gt;&lt;small&gt;Martin Karafiat; Murali Karthick Baskar; Pavel Matejka; Karel Vesely; Frantisek Grezl; Lukas Burget; Jan \u010cernock\u00fd&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - An Investigation of Deep Neural Networks for Multilingual Speech Recognition Training and Adaptation&lt;br&gt;&lt;small&gt;Sibo Tong; Philip N. Garner; Herve Bourlard&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - CTC Training of Multi-Phone Acoustic Models for Speech Recognition&lt;br&gt;&lt;small&gt;Olivier Siohan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Deep Least Squares Regression for Speaker Adaptation&lt;br&gt;&lt;small&gt;Younggwan Kim; Hyungjun Lim; Jahyun Goo; Hoirin Kim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Factorised representations for neural network adaptation to diverse acoustic environments&lt;br&gt;&lt;small&gt;Joachim Fainberg; Steve Renals; Peter Bell&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Generalized Distillation Framework For Speaker Normalization&lt;br&gt;&lt;small&gt;Neethu Mariam Joy; Sandeep Reddy Kothinti; Srinivasan Umesh; Basil Abraham&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Learning Factorized Transforms for Unsupervised Adaptation of LSTM-RNN Acoustic Models&lt;br&gt;&lt;small&gt;Lahiru Samarakoon; Brian Mak; Khe Chai Sim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Multilingual Recurrent Neural Networks with Residual Learning for Low-Resource Speech Recognition&lt;br&gt;&lt;small&gt;Shiyu Zhou; Yuanyuan Zhao; Shuang Xu; Bo Xu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Multi-task Learning using Mismatched Transcription for Under-resourced Speech Recognition&lt;br&gt;&lt;small&gt;Van Hai Do; Nancy F. Chen; Boon Pang Lim; Mark Hasegawa-Johnson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - OPTIMIZING DNN ADAPTATION FOR RECOGNITION OF ENHANCED SPEECH&lt;br&gt;&lt;small&gt;Marco Matassoni; Alessio Brutti; Falavigna Daniele&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n            data-category-ids=\"1058\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Multi-lingual Models and Adaptation for ASR<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  14:30-16:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4247\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech Production and Perception\"\n            data-abs-nbr=\"\"\n            data-ystart=\"14.5\"\n            data-yend=\"16.5\"\n            data-time=\"14:30-16:30\"\n            data-room=\"17\"\n            data-room-id=\"1067\"\n            data-room-name=\"Poster 2\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Wentao Gu&lt;br&gt;&lt;br&gt;14.30-16.30 - Accurate Synchronization of Speech and EGG signal using Phase Information&lt;br&gt;&lt;small&gt;Sunil Kumar S B; K Sreenivasa Rao; Tanumay Mandal&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - An objective critical distance measure based on the relative level of spectral valley&lt;br&gt;&lt;small&gt;Ananthapadmanabha T V; Ramakrishnan Angarai Ganesan; Shubham Sharma&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Audiovisual recalibration of vowel categories&lt;br&gt;&lt;small&gt;Matthias Franken; Frank Eisner; Jan-Mathijs Schoffelen; Dan Acheson; Peter Hagoort; James McQueen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Auditory-visual integration of talker gender in Cantonese tone perception&lt;br&gt;&lt;small&gt;Wei Lai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Critical articulators identification from RT-MRI of the vocal tract&lt;br&gt;&lt;small&gt;Samuel Silva; Ant\u00f3nio Teixeira&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Cross-modal Analysis between Phonation Differences and Texture Images based on Sentiment Correlations&lt;br&gt;&lt;small&gt;Win Thuzar Kyaw; Yoshinori Sagisaka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Database of volumetric and real-time vocal tract MRI for speech science&lt;br&gt;&lt;small&gt;Tanner Sorensen; Zisis Iason Skordilis; Asterios Toutios; Yoon-Chul Kim; Yinghua Zhu; Jangwon Kim; Adam Lammert; Vikram Ramanarayanan; Louis Goldstein; Dani Byrd; Krishna Nayak; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Event-related potentials associated with somatosensory effect in audio-visual speech perception&lt;br&gt;&lt;small&gt;Takayuki Ito; Hiroki Ohashi; Eva Montas; Vincent Gracco&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Semantic Edge Detection for Tracking Vocal Tract Air-tissue Boundaries in Real-time Magnetic Resonance Images&lt;br&gt;&lt;small&gt;Krishna Somandepalli; Asterios Toutios; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - The acquisition of focal lengthening in Stockholm Swedish&lt;br&gt;&lt;small&gt;Anna Sara Hexeberg Rom\u00f8ren; Aoju Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - The effect of gesture on persuasive speech&lt;br&gt;&lt;small&gt;Judith Peters; Marieke Hoetjes&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - The Influence on Realization and Perception of Lexical Tones from Affricate's Aspiration&lt;br&gt;&lt;small&gt;Chong Cao; Yanlu Xie; Qi Zhang; Jinsong Zhang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - When a dog is a cat and how it changes your pupil size: Pupil dilation in response to information mismatch&lt;br&gt;&lt;small&gt;Lena F. Renner; Marcin Wlodarczak&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Video-based tracking of jaw movements during speech: Preliminary results and future directions&lt;br&gt;&lt;small&gt;Andrea Bandini; Aravind Namasivayam; Yana Yunusova&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Wireless neck-surface accelerometer and microphone on flex circuit with application to noise-robust monitoring of Lombard speech&lt;br&gt;&lt;small&gt;Daryush Mehta; Patrick Chwalek; Thomas Quatieri; Laura Brattain&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Vocal Tract Airway Tissue Boundary Tracking for rtMRI using Shape and Appearance Priors&lt;br&gt;&lt;small&gt;Sasan Asadiabadi; Engin Erzin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Perception, Production and Acquisition\"\n            data-category-ids=\"1055\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech Production and Perception<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  14:30-16:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4238\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Neural Networks for Language Modeling\"\n            data-abs-nbr=\"\"\n            data-ystart=\"14.5\"\n            data-yend=\"16.5\"\n            data-time=\"14:30-16:30\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Tanel Alum\u00e4e; Xunying Liu&lt;br&gt;&lt;br&gt;14.30-14.50 - Approaches for Neural-Network Language Model Adaptation&lt;br&gt;&lt;small&gt;Min Ma; Michael Nirschl; Fadi Biadsy; Shankar Kumar&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - A Batch Noise Contrastive Estimation Approach for Training Large Vocabulary Language Models&lt;br&gt;&lt;small&gt;Youssef Oualil; Dietrich Klakow&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Investigating Bidirectional Recurrent Neural Network Language Models for Speech Recognition&lt;br&gt;&lt;small&gt;Xie Chen; Anton Ragni; Xunying Liu; Mark Gales&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.30-15.50 - FAST NEURAL NETWORK LANGUAGE MODEL LOOKUPS AT N-GRAM SPEEDS&lt;br&gt;&lt;small&gt;Yinghui Huang; Abhinav Sethy; Bhuvana Ramabhadran&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.50-16.10 - Empirical Exploration of Novel Architectures and Objectives for Language Models&lt;br&gt;&lt;small&gt;Gakuto Kurata; Abhinav Sethy; Bhuvana Ramabhadran; George Saon&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-16.30 - Residual Memory Networks in Language Modeling: Improving the Reputation of Feed-Forward Networks&lt;br&gt;&lt;small&gt;Karel Bene\u0161; Murali Baskar; Luk\u00e1\u0161 Burget&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Architecture, Search, and Linguistic Components\"\n            data-category-ids=\"1061\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Neural Networks for Language Modeling<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  14:30-16:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4246\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech Perception\"\n            data-abs-nbr=\"\"\n            data-ystart=\"14.5\"\n            data-yend=\"16.5\"\n            data-time=\"14:30-16:30\"\n            data-room=\"16\"\n            data-room-id=\"1061\"\n            data-room-name=\"Poster 1\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Louis ten Bosch&lt;br&gt;&lt;br&gt;14.30-16.30 - Emotional thin-slicing: a proposal for a short- and long-term division of emotional speech&lt;br&gt;&lt;small&gt;Daniel Oliveira Peres; Dominic Watt; Waldemar Ferreira Netto&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Factors Affecting the Intelligibility of Low-pass Filtered Speech&lt;br&gt;&lt;small&gt;Lei Wang; Fei Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Lexically Guided Perceptual Learning in Mandarin Chinese&lt;br&gt;&lt;small&gt;L. Ann Burchfield; San-hei Kenny Luk; Mark Antoniou; Anne Cutler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Misperceptions of the emotional content of natural and vocoded speech in a car&lt;br&gt;&lt;small&gt;Jaime Lorenzo-Trueba; Cassia Valentini-Botinhao; Gustav Eje Henter; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Perception and acoustics of vowel nasality in Brazilian Portuguese&lt;br&gt;&lt;small&gt;Luciana Marques; Rebecca Scarborough&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Phonetic Restoration of Temporally Reversed Speech&lt;br&gt;&lt;small&gt;Shi-yu Wang; Fei Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Predicting epenthetic vowel quality from acoustics&lt;br&gt;&lt;small&gt;Adriana Guevara-Rukoz; Erika Parlato-Oliveira; Shi Yu; Yuki Hirose; Sharon Peperkamp; Emmanuel Dupoux&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Simultaneous articulatory and acoustic distortion in L1 and L2 Listening: Locally time-reversed \u201cfast\u201d speech&lt;br&gt;&lt;small&gt;Mako Ishida&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Sociophonetic realizations guide subsequent lexical access&lt;br&gt;&lt;small&gt;Jonny Kim; Katie Drager&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - The effect of spectral profile on the intelligibility of emotional speech in noise&lt;br&gt;&lt;small&gt;Chris Davis; Chee Seng Chong; Jeesun Kim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - The effect of spectral tilt on size discrimination of voiced speech sounds&lt;br&gt;&lt;small&gt;Toshie Matsui; Toshio Irino; Kodai Yamamoto; Hideki Kawahara; Roy Patterson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - The relative cueing power of F0 and duration in German prominence perception&lt;br&gt;&lt;small&gt;Oliver Niebuhr; Jana Winkler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Whether long-term tracking of speech rate affects perception depends on who is talking&lt;br&gt;&lt;small&gt;Merel Maslowski; Antje S. Meyer; Hans Rutger Bosker&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Perception, Production and Acquisition\"\n            data-category-ids=\"1055\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech Perception<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  14:30-16:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4253\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Show &amp; Tell 2\"\n            data-abs-nbr=\"\"\n            data-ystart=\"14.5\"\n            data-yend=\"16.5\"\n            data-time=\"14:30-16:30\"\n            data-room=\"12\"\n            data-room-id=\"1071\"\n            data-room-name=\"E397\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"14.30-16.30 - An apparatus to investigate western opera singing skill learning using performance and result biofeedback, and measuring its neural correlates&lt;br&gt;&lt;small&gt;Aurore Jaumard-Hakoun; Samy Chikhi; Takfarinas Medani; Angelika Nair; G\u00e9rard Dreyfus; Fran\u00e7ois-Beno\u00eet Vialatte&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Emojive! Collecting Emotion Data from Speech and Facial Expression using Mobile Game App&lt;br&gt;&lt;small&gt;Ji Ho Park; Nayeon Lee; Dario Bertero; Anik Dey; Pascale Fung&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - PercyConfigurator -- Perception Experiments as a Service&lt;br&gt;&lt;small&gt;Christoph Draxler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - System for speech transcription and post-editing in Microsoft Word&lt;br&gt;&lt;small&gt;Askars Salimbajevs; Indra Ikauniece&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Visual Learning 2: Pronunciation app using ultrasound, video, and MRI&lt;br&gt;&lt;small&gt;Kyori Suzuki; Ian Wilson; Hayato Watanabe&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Show & Tell\"\n            data-category-ids=\"1063\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Show & Tell 2<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  14:30-16:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#EEA2A2; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4241\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech Analysis and Representation 1\"\n            data-abs-nbr=\"\"\n            data-ystart=\"14.5\"\n            data-yend=\"16.5\"\n            data-time=\"14:30-16:30\"\n            data-room=\"3\"\n            data-room-id=\"1065\"\n            data-room-name=\"B4\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Hema Murthy; Jon Barker&lt;br&gt;&lt;br&gt;14.30-14.50 - Phone Classification using a Non-Linear Manifold with Broad Phone Class Dependent DNNs&lt;br&gt;&lt;small&gt;Linxue Bai; Peter Jancovic; Martin Russell; Philip Weber; Steve Houghton&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - An Investigation of Crowd Speech for Room Occupancy Estimation&lt;br&gt;&lt;small&gt;Siyuan Chen; Julien Epps; Eliathamby Ambikairajah; Phu Le&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Time-Frequency Coherence for Periodic-Aperiodic Decomposition of Speech Signals&lt;br&gt;&lt;small&gt;Karthika Vijayan; Jitendra Dhiman; Chandra Sekhar Seelamantula&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.30-15.50 - Musical Speech: a New Methodology for Transcribing Speech Prosody&lt;br&gt;&lt;small&gt;Alexsandro Meireles; Ant\u00f4nio Sim\u00f5es; Antonio Celso Ribeiro; Beatriz Raposo de Medeiros&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.50-16.10 - Estimation of Place of Articulation of Fricatives from Spectral Characteristics for Speech Training&lt;br&gt;&lt;small&gt;K S Nataraj; Prem C. Pandey; Hirak Dasgupta&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-16.30 - Estimation of the Probability Distribution of Spectral Fine Structure in the Speech Source&lt;br&gt;&lt;small&gt;Tom B\u00e4ckstr\u00f6m&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Speech and Audio Signals\"\n            data-category-ids=\"1062\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech Analysis and Representation 1<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  14:30-16:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4257\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Interspeech 2017 Automatic Speaker Verification Spoofing and Countermeasures Challenge 2\"\n            data-abs-nbr=\"\"\n            data-ystart=\"14.5\"\n            data-yend=\"16.5\"\n            data-time=\"14:30-16:30\"\n            data-room=\"9\"\n            data-room-id=\"1062\"\n            data-room-name=\"D8\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Nicholas Evans; Kong Aik Lee&lt;br&gt;&lt;br&gt;14.30-14.45 - Audio replay attack detection with deep learning frameworks&lt;br&gt;&lt;small&gt;Galina Lavrentyeva; Sergey Novoselov; Egor Malykh; Alexandr Kozlov; Oleg Kudashev; Vadim Shchemelinin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.45-15.00 - Ensemble learning for countermeasure of audio replay spoofing attack in ASVspoof2017&lt;br&gt;&lt;small&gt;Zhe Ji; Zhi-Yi Li; Peng Li; Maobo An; Shengxiang Gao; Dan Wu; Faru Zhao&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.00-15.15 - A Study on Replay Attack and Anti-Spoofing for Automatic Speaker Verification&lt;br&gt;&lt;small&gt;Lantian Li; Yixiang Chen; Dong Wang; Thomas Fang Zheng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.15-15.30 - Replay Attack Detection using DNN for Channel Discrimination&lt;br&gt;&lt;small&gt;Parav Nagarsheth; Elie Khoury; Kailash Patil; Matt Garland&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.30-15.45 - ResNet and Model Fusion for Automatic Spoofing Detection&lt;br&gt;&lt;small&gt;Zhuxin Chen; Zhifeng Xie; Weibin Zhang; Xiangmin Xu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.45-16.00 - SFF Anti-Spoofer: IIIT-H Submission for Automatic Speaker Verification Spoofing and Countermeasures Challenge 2017&lt;br&gt;&lt;small&gt;K N R K Raju Alluri; Sivanand Achanta; Sudarsana Reddy Kadiri; Suryakanth V Gangashetty; Anil Kumar Vuppala&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-16.30 - Discussion&lt;br&gt;&lt;small&gt;&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speaker and Language Identification\"\n            data-category-ids=\"1054\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Interspeech 2017 Automatic Speaker Verification Spoofing and Countermeasures Challenge 2<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  14:30-16:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5241\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Speech Technology for Code-Switching in Multilingual Communities\"\n            data-abs-nbr=\"\"\n            data-ystart=\"14.5\"\n            data-yend=\"16.5\"\n            data-time=\"14:30-16:30\"\n            data-room=\"14\"\n            data-room-id=\"1059\"\n            data-room-name=\"F11\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Alan W Black; Kalika Bali&lt;br&gt;&lt;br&gt;&lt;p&gt;See full description at: https:\/\/www.microsoft.com\/en-us\/research\/event\/interspeech-2017-special-session-speech-technologies-for-code-switching-in-multilingual-communities\/#&lt;\/p&gt;&lt;p&gt;Topics of interest for this special session will include but are not limited to:&amp;nbsp;&lt;br&gt;&lt;\/p&gt;&lt;li&gt;Speech Recognition of code-switched speech&lt;\/li&gt;&lt;li&gt;Language Modeling for code-switched speech&lt;\/li&gt;&lt;li&gt;Speech Synthesis of code-switched text&lt;\/li&gt;&lt;li&gt;Speech Translation of code-switched languages&lt;\/li&gt;&lt;li&gt;Spoken Dialogue Systems that can handle code-switching&lt;\/li&gt;&lt;li&gt;Speech data and resources for code-switching&lt;\/li&gt;&lt;li&gt;Language Identification from speech&lt;\/li&gt;&lt;p&gt;&lt;\/p&gt;&lt;p&gt;&lt;br&gt;&lt;\/p&gt;&lt;p&gt;Organizing Committee:&lt;br&gt;&lt;\/p&gt;&lt;p&gt;Kalika Bali, Microsoft Research India&lt;br&gt;Alan W Black, Carnegie Mellon University&lt;br&gt;Mona Diab, George Washington University&lt;br&gt;Julia Hirschberg, Columbia University&lt;br&gt;Sunayana Sitaram, Microsoft Research India&lt;br&gt;Thamar Solorio, University of Houston&lt;\/p&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Addressing Code-Switching in French\/Algerian Arabic Speech&lt;br&gt;&lt;small&gt;Amazouz Djegdjiga; Martine Adda-Decker; Lori Lamel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Metrics for modeling code-switching across corpora&lt;br&gt;&lt;small&gt;Wally Guzman; Joseph Ricard; Jacqueline Serigos; Barbara Bullock; Almeida Jacqueline Toribio&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Synthesising isiZulu-English code-switch bigrams using word embeddings&lt;br&gt;&lt;small&gt;Ewald Van der westhuizen; Thomas Niesler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.30-15.50 - Crowdsourcing Universal Part-Of-Speech Tags for Code-Switching&lt;br&gt;&lt;small&gt;Victor Soto; Julia Hirschberg&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.50-16.30 - Discussion&lt;br&gt;&lt;small&gt;&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"\"\n            data-category-ids=\"\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Speech Technology for Code-Switching in Multilingual Communities<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  14:30-16:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4239\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Far-field Speech Recognition\"\n            data-abs-nbr=\"\"\n            data-ystart=\"14.5\"\n            data-yend=\"16.5\"\n            data-time=\"14:30-16:30\"\n            data-room=\"10\"\n            data-room-id=\"1060\"\n            data-room-name=\"E10\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Thomas Hain; Zheng-Hua Tan&lt;br&gt;&lt;br&gt;14.30-14.50 - Generation of simulated utterances in virtual rooms to train deep-neural networks for far-field speech recognition in Google Home&lt;br&gt;&lt;small&gt;Chanwoo Kim; Ananya Misra; K.K. Chin; Thad Hughes; Arun Narayanan; Tara Sainath; Michiel Bacchiani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Neural network-based spectrum estimation for online WPE dereverberation&lt;br&gt;&lt;small&gt;Keisuke Kinoshita; Marc Delcroix; Haeyong Kwon; Takuma Mori; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Factorial Modeling for Effective Suppression of Directional Noise&lt;br&gt;&lt;small&gt;Osamu Ichikawa; Takashi Fukuda; Gakuto Kurata; Steven Rennie&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.30-15.50 - On Design of Robust Deep Models for CHiME-4 Multi-Channel Speech Recognition with Multiple Configurations of Array Microphones&lt;br&gt;&lt;small&gt;Yan-Hui Tu; Jun Du; Lei Sun; Feng Ma; Chin-Hui Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.50-16.10 - Acoustic Modeling for Google Home&lt;br&gt;&lt;small&gt;Bo Li; Tara Sainath; Joe Caroselli; Arun Narayanan; Michiel Bacchiani; Ananya Misra; Izhak Shafran; Hasim Sak; Golan Pundak; K.K. Chin; Khe Chai Sim; Ron Weiss; Kevin Wilson; Ehsan Variani; Chanwoo Kim; Olivier Siohan; Mitchell Weintraub; Erik McDermott; Richard Rose; Matt Shannon&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-16.30 - On multi-domain training and adaptation of end-to-end RNN acoustic models for distant speech recognition&lt;br&gt;&lt;small&gt;Seyedmahdad Mirsamadi; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n            data-category-ids=\"1058\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Far-field Speech Recognition<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  14:30-16:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4254\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Swedish Kulning (SweKul). What's so Special About Kulning - The Singing Technique in Traditional Swedish Cattle Calls?\"\n            data-abs-nbr=\"\"\n            data-ystart=\"14.5\"\n            data-yend=\"16.5\"\n            data-time=\"14:30-16:30\"\n            data-room=\"2\"\n            data-room-id=\"1072\"\n            data-room-name=\"B3\"\n            data-day=\"1\"\n            data-abs-path=\"\/abs\/4254.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Special event\"\n            data-category-ids=\"1064\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Swedish Kulning (SweKul). What's so Special About Kulning - The Singing Technique in Traditional Swedish Cattle Calls?<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  14:30-16:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4240\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Pathological Speech and Language\"\n            data-abs-nbr=\"\"\n            data-ystart=\"14.5\"\n            data-yend=\"16.5\"\n            data-time=\"14:30-16:30\"\n            data-room=\"0\"\n            data-room-id=\"1064\"\n            data-room-name=\"A2\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Heidi Christensen; Rafa Orozco&lt;br&gt;&lt;br&gt;14.30-14.50 - Dominant Distortion Classification for Pre-Processing of Vowels in Remote Biomedical Voice Analysis&lt;br&gt;&lt;small&gt;Amir Hossein Poorjam; Jesper Rindom Jensen; Max A. Little; Mads Gr\u00e6sb\u00f8ll Christensen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Automatic Paraphasia Detection from Aphasic Speech: A Preliminary Study&lt;br&gt;&lt;small&gt;Duc Le; Keli Licata; Emily Mower Provost&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Evaluation of the neurological state of people with Parkinson\u2019s disease using i-vectors&lt;br&gt;&lt;small&gt;Nicanor Garcia; Juan Rafael Orozco-Arroyave; Luis Fernando D'Haro; Najim Dehak; Elmar Noeth&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.30-15.50 - Objective Severity Assessment From Disordered Voice Using Estimated Glottal Airflow&lt;br&gt;&lt;small&gt;Yu-Ren Chien; Michal Borsky; Jon Gudnason&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.50-16.10 - Earlier Identification of Children with Autism Spectrum Disorder: An Automatic Vocalisation-based Approach&lt;br&gt;&lt;small&gt;Florian Pokorny; Bj\u00f6rn Schuller; Peter Marschik; Raymond Brueckner; P\u00e4r Nystr\u00f6m; Nicholas Cummins; Sven B\u00f6lte; Christa Einspieler; Terje Falck-Ytter&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-16.30 - Convolutional Neural Network to Model Articulation Impairments in Patients with Parkinson's Disease&lt;br&gt;&lt;small&gt;Juan Camilo V\u00e1squez Correa; Juan Rafael Orozco-Arroyave; Elmar Noeth&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Paralinguistics in Speech and Language\"\n            data-category-ids=\"1052\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Pathological Speech and Language<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  14:30-16:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4252\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Show &amp; Tell 1\"\n            data-abs-nbr=\"\"\n            data-ystart=\"14.5\"\n            data-yend=\"16.5\"\n            data-time=\"14:30-16:30\"\n            data-room=\"11\"\n            data-room-id=\"1070\"\n            data-room-name=\"E306\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"14.30-16.30 - ChunkitApp: Investigating the relevant units of online speech processing&lt;br&gt;&lt;small&gt;Svetlana Vetchinnikova; Anna Mauranen; Nina Mikusova&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Extending the EMU Speech Database Management System: Cloud Hosting, Team Collaboration, Automatic Revision Control&lt;br&gt;&lt;small&gt;Markus Jochim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - HomeBank: A repository for long-form real-world audio recordings of children&lt;br&gt;&lt;small&gt;Anne Warlaumont; Mark vanDam; Elika Bergelson; Alejandrina Cristia&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - MoPAReST - Mobile Phone Assisted Remote Speech Therapy Platform&lt;br&gt;&lt;small&gt;Chitralekha Bhat; Anjali Kant; Bhavik Vachhani; Sarita Rautara; Ashok Kumar Sinha; Sunil Kumar Kopparapu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-16.30 - Prosograph: A Tool for Prosody Visualisation of Large Speech Corpora&lt;br&gt;&lt;small&gt;Alp Oktem; Mireia Farr\u00fas; Leo Wanner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Show & Tell\"\n            data-category-ids=\"1063\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Show & Tell 1<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  14:30-16:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#EEA2A2; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5249\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"ISCA General Assembly and Refreshments\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16.5\"\n            data-yend=\"18\"\n            data-time=\"16:30-18:00\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Misc\"\n            data-category-ids=\"1068\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>ISCA General Assembly and Refreshments<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Aula Magna<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:30-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5250\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Welcome Reception\"\n            data-abs-nbr=\"\"\n            data-ystart=\"19\"\n            data-yend=\"20.5\"\n            data-time=\"19:00-20:30\"\n            data-room=\"24\"\n            data-room-id=\"1116\"\n            data-room-name=\"Stockholm City Hall and Teaterbaren\"\n            data-day=\"1\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Social event\"\n            data-category-ids=\"1067\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Welcome Reception<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Stockholm City Hall and Teaterbaren<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  19:00-20:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#C9EE91; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5246\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Registration\"\n            data-abs-nbr=\"\"\n            data-ystart=\"7.75\"\n            data-yend=\"17\"\n            data-time=\"07:45-17:00\"\n            data-room=\"23\"\n            data-room-id=\"1112\"\n            data-room-name=\"S\u00f6dra Huset, House A\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"\"\n            data-category-ids=\"\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Registration<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  07:45-17:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4289\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Keynote 1: James Allen, Dialogue as Collaborative Problem Solving\"\n            data-abs-nbr=\"\"\n            data-ystart=\"8.5\"\n            data-yend=\"9.5\"\n            data-time=\"08:30-09:30\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"2\"\n            data-abs-path=\"\/abs\/4289.html\"\n            data-speaker=\"James Allen\"\n            data-speakercell=\"James Allen\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Joakim Gustafson&lt;br&gt;&lt;br&gt;\n                 &lt;span style=&quot;font-weight: normal;&quot;&gt;The session will also be broadcasted (with two-way communication) to rooms A2 and C6.&lt;\/span&gt;\n                 &lt;br&gt;\n                &lt;br&gt;&lt;br&gt;\"\n            data-category=\"Keynote\"\n            data-category-ids=\"1057\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Keynote 1: James Allen, Dialogue as Collaborative Problem Solving<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Aula Magna<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  08:30-09:30                <\/span><\/div>\n\n              <div class=\"lecturer\"><span>James Allen<\/span><\/div>\n            <\/div>\n\n\n                                <div style=\"background-color:#72D9EE; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5257\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Refreshments\"\n            data-abs-nbr=\"\"\n            data-ystart=\"9.5\"\n            data-yend=\"10\"\n            data-time=\"09:30-10:00\"\n            data-room=\"22\"\n            data-room-id=\"1079\"\n            data-room-name=\"Various locations\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Misc\"\n            data-category-ids=\"1068\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Refreshments<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Various locations<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  09:30-10:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4308\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Short Utterances Speaker Recognition\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"16\"\n            data-room-id=\"1061\"\n            data-room-name=\"Poster 1\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Anthony Larcher&lt;br&gt;&lt;br&gt;10.00-12.00 - Adversarial Network Bottleneck Features for Noise Robust Speaker Verification&lt;br&gt;&lt;small&gt;Hong Yu; Zheng-Hua Tan; Zhanyu Ma; Jun Guo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Content Normalization for Text-dependent Speaker Verification&lt;br&gt;&lt;small&gt;Subhadeep Dey; Srikanth Madikeri; Petr Motlicek; Marc Ferras&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Deep Speaker Embeddings for Short-Duration Speaker Verification&lt;br&gt;&lt;small&gt;Gautam Bhattacharya; Md Jahangir Alam; Patrick Kenny&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - DNN i-vector Speaker Verification with Short, Text-constrained Test Utterances&lt;br&gt;&lt;small&gt;Jinghua Zhong; Wenping Hu; Frank Soong; Helen Meng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - End-to-End Text-Independent Speaker Verification with Triplet Loss on Short Utterances&lt;br&gt;&lt;small&gt;Chunlei Zhang; Kazuhito Koishida&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Gain Compensation for Fast I-Vector Extraction over Short Duration&lt;br&gt;&lt;small&gt;Kong Aik Lee; Haizhou Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Incorporating Local Acoustic Variability Information into Short Duration Speaker Verification&lt;br&gt;&lt;small&gt;Jianbo Ma; Vidhyasaharan Sethu; Eliathamby Ambikairajah; Kong Aik Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Joint Training of Expanded End-to-end DNN for Text-dependent Speaker Verification&lt;br&gt;&lt;small&gt;Hee-Soo Heo; Jee-Weon Jung; IL-Ho Yang; Sung-Hyun Yoon; Ha-Jin Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Time-Varying Autoregressions for Speaker Verification in Reverberant Conditions&lt;br&gt;&lt;small&gt;Ville Vestman; Dhananjaya Gowda; Md Sahidullah; Paavo Alku; Tomi Kinnunen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Using Voice Quality Features to Improve Short-Utterance, Text-Independent Speaker Verification Systems&lt;br&gt;&lt;small&gt;Soo Jin Park; Gary Yeung; Jody Kreiman; Patricia Keating; Abeer Alwan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - What Does the Speaker Embedding Encode?&lt;br&gt;&lt;small&gt;Shuai Wang; Yanmin Qian; Kai Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speaker and Language Identification\"\n            data-category-ids=\"1054\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Short Utterances Speaker Recognition<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4293\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speaker Recognition\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"3\"\n            data-room-id=\"1065\"\n            data-room-name=\"B4\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Jean-Francois Bonastre; Kornel Laskowski&lt;br&gt;&lt;br&gt;10.00-10.20 - Deep Neural Network Embeddings for Text-Independent Speaker Verification&lt;br&gt;&lt;small&gt;David Snyder; Daniel Garcia-Romero; Dan Povey; Sanjeev Khudanpur&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Tied Variational Autoencoder Backends for i-Vector Speaker Recognition&lt;br&gt;&lt;small&gt;Jesus Villalba; Niko Brummer; Najim Dehak&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Improved Gender Independent Speaker Recognition Using Convolutional Neural Network Based Bottleneck Features&lt;br&gt;&lt;small&gt;Shivesh Ranjan; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Autoencoder based Domain Adaptation for Speaker Recognition under Insufficient Channel Information&lt;br&gt;&lt;small&gt;Suwon Shon; Seongkyu Mun; Wooil Kim; Hanseok Ko&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Nonparametrically Trained Probabilistic Linear Discriminant Analysis for i-Vector Speaker Verification&lt;br&gt;&lt;small&gt;Abbas Khosravani; Mohammad Mehdi Homayounpour&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - DNN bottleneck features for speaker clustering&lt;br&gt;&lt;small&gt;Jes\u00fas Jorr\u00edn; Leibny Paola Garcia Perera; Luis Buera&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speaker and Language Identification\"\n            data-category-ids=\"1054\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speaker Recognition<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4291\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Emotion Recognition\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"10\"\n            data-room-id=\"1060\"\n            data-room-name=\"E10\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Elmar N\u00f6th; Shrikanth Narayanan&lt;br&gt;&lt;br&gt;10.00-10.20 - Efficient Emotion Recognition from Speech Using Deep Learning on Spectrograms&lt;br&gt;&lt;small&gt;Aharon Satt; Shai Rozenberg; Ron Hoory&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Interaction and Transition Model for Speech Emotion Recognition in Dialogue&lt;br&gt;&lt;small&gt;Ruo Zhang; Atsushi Ando; Satoshi Kobashikawa; Yushi Aono&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Progressive Neural Networks for Transfer Learning in Emotion Recognition&lt;br&gt;&lt;small&gt;John Gideon; Soheil Khorram; Zakaria Aldeneh; Dimitrios Dimitriadis; Emily Mower Provost&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Jointly Predicting Arousal, Valence and Dominance with Multi-Task Learning&lt;br&gt;&lt;small&gt;Srinivas Parthasarathy; Carlos Busso&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Discretized Continuous Speech Emotion Recognition with Multi-Task Deep Recurrent Neural Network&lt;br&gt;&lt;small&gt;Duc Le; Zakaria Aldeneh; Emily Mower Provost&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Towards Speech Emotion Recognition \u201cin the wild\u201d using Aggregated Corpora and Deep Multi-Task Learning&lt;br&gt;&lt;small&gt;Jaebok Kim; Gwenn Englebienne; Khiet Truong; Vanessa Evers&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Paralinguistics in Speech and Language\"\n            data-category-ids=\"1052\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Emotion Recognition<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4318\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Show &amp; Tell 4\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"12\"\n            data-room-id=\"1071\"\n            data-room-name=\"E397\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"10.00-12.00 - Combining Gaussian mixture models and segmental feature models for speaker recognition&lt;br&gt;&lt;small&gt;Milana Milo\u0161evi\u0107; Ulrike Glavitsch&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Did you laugh enough today? - Deep Neural Networks for Mobile and Wearable Laughter Trackers&lt;br&gt;&lt;small&gt;Gerhard Hagerer; Nicholas Cummins; Florian Eyben; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Evolving recurrent neural networks that process and classify raw audio in a streaming fashion&lt;br&gt;&lt;small&gt;Adrien DANIEL&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Low-Frequency Ultrasonic Communication for Speech Broadcasting in Public Transportation&lt;br&gt;&lt;small&gt;Kwang Myung Jeon; Nam Kyun Kim; Chan Woong Kwak; Jung Min Moon; Hong Kook Kim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Reading validation for pronunciation evaluation in the Digitala project&lt;br&gt;&lt;small&gt;Aku Rouhe; Reima Karhila; Peter Smit; Mikko Kurimo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Real-time Speech Enhancement with GCC-NMF: Demonstration on the Raspberry Pi and NVIDIA Jetson&lt;br&gt;&lt;small&gt;Sean Wood; Jean Rouat&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Show & Tell\"\n            data-category-ids=\"1063\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Show & Tell 4<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#EEA2A2; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4324\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Speech and Human-Robot Interaction\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"14\"\n            data-room-id=\"1059\"\n            data-room-name=\"F11\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;G\u00e9rard Bailly; Gabriel Skantze&lt;br&gt;&lt;br&gt;10.00-10.15 - Introduction&lt;br&gt;&lt;small&gt;&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.15-10.30 - Elicitation Design for Acoustic Depression Classification: An Investigation of Articulation Effort, Linguistic Complexity, and Word Affect&lt;br&gt;&lt;small&gt;Brian Stasak; Julien Epps; Roland Goecke&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.30-10.45 - Robustness over time-varying channels in DNN-HMM ASR based human-robot interaction&lt;br&gt;&lt;small&gt;Jose Novoa; Jorge Wuth; Juan Pablo Escudero; Josue Fredes; Rodrigo Mahu; Richard Stern; Nestor Becerra Yoma&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.45-11.00 - Analysis of Engagement and User Experience with a Laughter Responsive Social Robot&lt;br&gt;&lt;small&gt;Bekir Berker T\u00fcrker; Zana Bu\u00e7inca; Engin Erzin; Y\u00fccel Yemez; Metin Sezgin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.15 - Automatic Classification of Autistic Child Vocalisations: A Novel Database and Results&lt;br&gt;&lt;small&gt;Alice Baird; Shahin Amiriparian; Nicholas Cummins; Alyssa M. Alcorn; Anton Batliner; Sergey Pugachevskiy; Michael Freitag; Mauric Gerczuk; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.15-11.30 - Crowd-Sourced Design of Artificial Attentive Listeners&lt;br&gt;&lt;small&gt;Catharine Oertel; Patrik Jonell; Dimosthenis Kontogiorgos; Joseph Mendelson; Jonas Beskow; Joakim Gustafson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.30-11.45 - Studying the link between inter-speaker coordination and speech imitation through human-machine interactions&lt;br&gt;&lt;small&gt;Leonardo Lancia; Thierry Chaminade; No\u00ebl Nguyen; Laurent Pr\u00e9vot&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.45-12.00 - Discussion&lt;br&gt;&lt;small&gt;&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Spoken Dialog Systems and Analysis of Conversation\"\n            data-category-ids=\"1051\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Speech and Human-Robot Interaction<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4295\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech Synthesis Prosody\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"9\"\n            data-room-id=\"1062\"\n            data-room-name=\"D8\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mirjam Wester; Prasanta Ghosh&lt;br&gt;&lt;br&gt;10.00-10.20 - An RNN-based Quantized F0 Model with Multi-tier Feedback Links for Text-to-Speech Synthesis&lt;br&gt;&lt;small&gt;Xin Wang; Shinji Takaki; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Phrase break prediction for long-form reading TTS: exploiting the text structure information&lt;br&gt;&lt;small&gt;Viacheslav Klimkov; Adam Nadolski; Alexis Moinet; Bartosz Putrycz; Roberto Barra-Chicote; Thomas Merritt; Thomas Drugman&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Physically constrained statistical F0 prediction for electrolaryngeal speech enhancement&lt;br&gt;&lt;small&gt;Kou Tanaka; Hirokazu Kameoka; Tomoki Toda; Satoshi Nakamura&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - DNN-SPACE: DNN-HMM-based Generative Model of Voice $F_0$ Contours for Statistical Phrase\/Accent Command Estimation&lt;br&gt;&lt;small&gt;Nobukatsu Hojo; Ohsugi Yasuhito; Yusuke Ijima; Hirokazu Kameoka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Controlling prominence realisation in parametric DNN-based speech synthesis.&lt;br&gt;&lt;small&gt;Zofia Malisz; Harald Berthelsen; Jonas Beskow; Joakim Gustafson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Increasing Recall of Lengthening Detection via Semi-Automatic Classification&lt;br&gt;&lt;small&gt;Simon Betz; Jana Vo\u00dfe; Sina Zarrie\u00df; Petra Wagner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Synthesis and Spoken Language Generation\"\n            data-category-ids=\"1059\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech Synthesis Prosody<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4292\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Models of Speech Production\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"0\"\n            data-room-id=\"1064\"\n            data-room-name=\"A2\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Marcin Wlodarczak; Daryush Mehta&lt;br&gt;&lt;br&gt;10.00-10.20 - Functional principal component analysis of vocal tract area functions&lt;br&gt;&lt;small&gt;Jorge Lucero&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Analysis of acoustic-to-articulatory speech inversion across different accents and languages&lt;br&gt;&lt;small&gt;Ganesh Sivaraman; Carol Espy-Wilson; Martijn Wieling&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Integrated mechanical model of [r]-[l] and [b]-[m]-[w] producing consonant cluster [br]&lt;br&gt;&lt;small&gt;Takayuki Arai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - A Speaker Adaptive DNN Training Approach for Speaker-independent Acoustic Inversion&lt;br&gt;&lt;small&gt;Leonardo Badino; Luca Franceschi; Raman Arora; Michele Donini; Massimiliano Pontil&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Acoustic-to-articulatory mapping based on mixture of probabilistic canonical correlation analysis&lt;br&gt;&lt;small&gt;Hidetsugu Uchida; Daisuke Saito; Nobuaki Minematsu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Test-retest repeatability of articulatory strategies using real-time magnetic resonance imaging&lt;br&gt;&lt;small&gt;Tanner Sorensen; Asterios Toutios; Johannes Toger; Louis Goldstein; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Perception, Production and Acquisition\"\n            data-category-ids=\"1055\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Models of Speech Production<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4317\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Show &amp; Tell 3\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"11\"\n            data-room-id=\"1070\"\n            data-room-name=\"E306\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"10.00-12.00 - Applications of the BBN Sage Speech Processing Platform&lt;br&gt;&lt;small&gt;Ralf Meermeier; Sean Colbath&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - A Signal Processing Approach for Speaker Separation using SFF Analysis&lt;br&gt;&lt;small&gt;Nivedita Chennupati; Narayana Murthy BHVS; Bayya Yegnanarayana&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Bob Speaks Kaldi&lt;br&gt;&lt;small&gt;Milos Cernak; Alain Komaty; Amir Mohammadi; Andre Anjos; Sebastien Marcel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - MetaLab: A repository for meta-analyses on language development, and more&lt;br&gt;&lt;small&gt;Sho Tsuji; Christina Bergmann; Molly Lewis; Mika Braginsky; Page Piccinini; Michael C. Frank; Alejandrina Cristia&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Real time pitch shifting with formant structure preservation using the phase vocoder&lt;br&gt;&lt;small&gt;Micha\u0142 Lenarczyk&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Speech Recognition and Understanding on Hardware-Accelerated DSP&lt;br&gt;&lt;small&gt;Georg Stemmer; Munir Georges; Joachim Hofer; Piotr Rozen; Josef Bauer; Jakub Nowicki; Tobias Bocklet; Hannah Colett; Ohad Falik; Michael Deisher; Sylvia Downing&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Show & Tell\"\n            data-category-ids=\"1063\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Show & Tell 3<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#EEA2A2; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4309\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speaker Characterization and Recognition\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"17\"\n            data-room-id=\"1067\"\n            data-room-name=\"Poster 2\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Michael Wagner&lt;br&gt;&lt;br&gt;10.00-12.00 - A Distribution Free Formulation of the Total Variability Model&lt;br&gt;&lt;small&gt;Ruchir Travadi; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Alternative Approaches to Neural Network based Speaker Verification&lt;br&gt;&lt;small&gt;Anna Silnova; Lukas Burget; Jan \u010cernock\u00fd&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Analysis of Score Normalization in Multilingual Speaker Recognition&lt;br&gt;&lt;small&gt;Pavel Matejka; Oldrich Plchot; Ond\u0159ej Novotn\u00fd; Lukas Burget; Mireia Diez S\u00e1nchez; Jan \u010cernock\u00fd&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Deep Speaker Feature Learning for Text-independent Speaker Verification&lt;br&gt;&lt;small&gt;Lantian Li; Yixiang Chen; Ying Shi; Zhiyuan Tang; Dong Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Domain mismatch modeling of out-domain i-vectors for PLDA speaker verification&lt;br&gt;&lt;small&gt;Md Hafizur Rahman; Ivan Himawan; David Dean; Sridha Sridharan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Duration mismatch compensation using four-covariance model and deep neural network for speaker verification&lt;br&gt;&lt;small&gt;Pierre-Michel Bousquet; Mickael Rouvier&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Extended Variability Modeling and Unsupervised Adaptation for PLDA Speaker Recognition&lt;br&gt;&lt;small&gt;Alan McCree; Greg Sell; Daniel Garcia-Romero&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Improving the Effectiveness of Speaker Verification Domain Adaptation With Inadequate In-Domain Data&lt;br&gt;&lt;small&gt;Jonas Borgstrom; Elliot Singer; Douglas Reynolds; Seyed Omid Sadjadi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - I-Vector DNN Scoring and Calibration for Noise Robust Speaker Verification&lt;br&gt;&lt;small&gt;Zhili Tan; Manwai Mak&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Speaker Verification via Estimating Total Variability Space Using Probabilistic Partial Least Squares&lt;br&gt;&lt;small&gt;Chen Chen; Jiqing Han; Yilin Pan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speaker and Language Identification\"\n            data-category-ids=\"1054\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speaker Characterization and Recognition<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4290\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Neural Network Acoustic Models for ASR 1\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mathew Magimai-Doss; Jan \u010cernock\u00fd&lt;br&gt;&lt;br&gt;10.00-10.20 - A Comparison of Sequence-to-Sequence Models for Speech Recognition&lt;br&gt;&lt;small&gt;Rohit Prabhavalkar; Kanishka Rao; Tara Sainath; Bo Li; Leif Johnson; Navdeep Jaitly&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - CTC in the Context of Generalized Full-Sum HMM Training&lt;br&gt;&lt;small&gt;Albert Zeyer; Eugen Beck; Ralf Schl\u00fcter; Hermann Ney&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Advances in Joint CTC-Attention based End-to-End Speech Recognition with a Deep CNN Encoder and RNN-LM&lt;br&gt;&lt;small&gt;Takaaki Hori; Shinji Watanabe; Yu Zhang; William Chan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Multitask Learning with CTC and Segmental CRF for Speech Recognition&lt;br&gt;&lt;small&gt;Liang Lu; Lingpeng Kong; Chris Dyer; Noah Smith&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Direct Acoustics-to-Word Models for English Conversational Speech Recognition&lt;br&gt;&lt;small&gt;Kartik Audhkhasi; Bhuvana Ramabhadran; George Saon; Michael Picheny; David Nahamoo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Reducing the Computational Complexity of Two-Dimensional LSTMs&lt;br&gt;&lt;small&gt;Bo Li; Tara Sainath&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n            data-category-ids=\"1058\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Neural Network Acoustic Models for ASR 1<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4294\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Phonation and Voice Quality\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"5\"\n            data-room-id=\"1066\"\n            data-room-name=\"C6\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Peter Birkholz; Kikuo Maekawa&lt;br&gt;&lt;br&gt;10.00-10.20 - Creak as a feature of lexical stress in Estonian&lt;br&gt;&lt;small&gt;K\u00e4tlin Aare; P\u00e4rtel Lippus; Juraj \u0160imko&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Cross-speaker Variation in Voice Source Correlates of Focus and Deaccentuation&lt;br&gt;&lt;small&gt;Irena Yanushevskaya; Ailbhe N\u00ed Chasaide; Christer Gobl&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Acoustic Characterization of Word-final Glottal Stops in Mizo and Assam Sora&lt;br&gt;&lt;small&gt;Sishir Kalita; Wendy Lalhminghlui; Luke Horo; Priyankoo Sarmah; S R Mahadeva Prasanna; Samarendra Dandapat&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Iterative Optimal Preemphasis for Improved Glottal-Flow Estimation by Iterative Adaptive Inverse Filtering&lt;br&gt;&lt;small&gt;Parham Mokhtari; Hiroshi Ando&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Automatic Measurement of Pre-aspiration&lt;br&gt;&lt;small&gt;Yaniv Sheena; Michaela Hejna; Yossi Adi; Joseph Keshet&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Acoustic and electroglottographic study of breathy and modal vowels as produced by heritage and native Gujarati speakers&lt;br&gt;&lt;small&gt;Kiranpreet Nara&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Phonetics, Phonology, and Prosody\"\n            data-category-ids=\"1056\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Phonation and Voice Quality<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5239\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Lunch\"\n            data-abs-nbr=\"\"\n            data-ystart=\"12\"\n            data-yend=\"13.5\"\n            data-time=\"12:00-13:30\"\n            data-room=\"22\"\n            data-room-id=\"1079\"\n            data-room-name=\"Various locations\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Misc\"\n            data-category-ids=\"1068\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Lunch<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Various locations<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  12:00-13:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4299\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Source Separation and Auditory Scene Analysis\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"3\"\n            data-room-id=\"1065\"\n            data-room-name=\"B4\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mahadeva Prasanna; G\u00e9za N\u00e9meth&lt;br&gt;&lt;br&gt;13.30-13.50 - A Maximum Likelihood Approach to Deep Neural Network Based Nonlinear Spectral Mapping for Single-Channel Speech Separation&lt;br&gt;&lt;small&gt;Yannan Wang; Jun Du; Lirong Dai; Chin-Hui Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Deep clustering-based beamforming for separation with unknown number of sources&lt;br&gt;&lt;small&gt;Takuya Higuchi; Keisuke Kinoshita; Marc Delcroix; Katerina Zmolikova; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Time-frequency masking for blind source separation with preserved spatial cues&lt;br&gt;&lt;small&gt;Shadi Pirhosseinloo; Kostas Kokkinakis&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Variational Recurrent Neural Networks for Speech Separation&lt;br&gt;&lt;small&gt;Jen-Tzung Chien; Kuan-Ting Kuo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Detecting overlapped speech on short timeframes using deep learning&lt;br&gt;&lt;small&gt;Valentin Andrei; Horia Cucu; Corneliu Burileanu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Ideal ratio mask estimation using deep neural networks for monaural speech segregation in noisy reverberant conditions&lt;br&gt;&lt;small&gt;Xu Li; Junfeng Li; Yonghong Yan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Speech and Audio Signals\"\n            data-category-ids=\"1062\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Source Separation and Auditory Scene Analysis<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4310\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Acoustic Models for ASR 1\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"16\"\n            data-room-id=\"1061\"\n            data-room-name=\"Poster 1\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Michiel Bacchiani&lt;br&gt;&lt;br&gt;13.30-15.30 - A Comparative Evaluation of GMM-Free State Tying Methods for ASR&lt;br&gt;&lt;small&gt;Tam\u00e1s Gr\u00f3sz; G\u00e1bor Gosztolya; L\u00e1szl\u00f3 T\u00f3th&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - An exploration of dropout with LSTMs&lt;br&gt;&lt;small&gt;Gaofeng Cheng; Vijayaditya Peddinti; Dan Povey; Vimal Manohar; Sanjeev Khudanpur; Yonghong Yan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Convolutional Recurrent Neural Networks for Small-Footprint Keyword Spotting&lt;br&gt;&lt;small&gt;Sercan Arik; Markus Kliegl; Rewon Child; Joel Hestness; Andrew Gibiansky; Chris Fougner; Ryan Prenger; Adam Coates&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Deep Activation Mixture Model for Speech Recognition&lt;br&gt;&lt;small&gt;Chunyang Wu; Mark Gales&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Ensembles of Multi-scale VGG Acoustic Models&lt;br&gt;&lt;small&gt;Michael Heck; Masayuki Suzuki; Takashi Fukuda; Gakuto Kurata; Satoshi Nakamura&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Forward-backward Convolutional LSTM for Acoustic Modeling&lt;br&gt;&lt;small&gt;Shigeki Karita; Atsunori Ogawa; Marc Delcroix; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Residual LSTM: Design of a Deep Recurrent Architecture for Distant Speech Recognition&lt;br&gt;&lt;small&gt;Jaeyoung Kim; Mostafa El-Khamy; Jungwon Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Training Context-Dependent DNN Acoustic Models using Probabilistic Sampling&lt;br&gt;&lt;small&gt;Tam\u00e1s Gr\u00f3sz; G\u00e1bor Gosztolya; L\u00e1szl\u00f3 T\u00f3th&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - UNFOLDED DEEP RECURRENT CONVOLUTIONAL NEURAL NETWORK WITH JUMP AHEAD CONNECTIONS FOR ACOUSTIC MODELING&lt;br&gt;&lt;small&gt;Tien Dung Tran; Marc Delcroix; Shigeki Karita; Michael Hentschel; Atsunori Ogawa; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n            data-category-ids=\"1058\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Acoustic Models for ASR 1<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4319\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Show &amp; Tell 3\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"11\"\n            data-room-id=\"1070\"\n            data-room-name=\"E306\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"13.30-15.30 - Applications of the BBN Sage Speech Processing Platform&lt;br&gt;&lt;small&gt;Ralf Meermeier; Sean Colbath&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - A Signal Processing Approach for Speaker Separation using SFF Analysis&lt;br&gt;&lt;small&gt;Nivedita Chennupati; Narayana Murthy BHVS; Bayya Yegnanarayana&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Bob Speaks Kaldi&lt;br&gt;&lt;small&gt;Milos Cernak; Alain Komaty; Amir Mohammadi; Andre Anjos; Sebastien Marcel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - MetaLab: A repository for meta-analyses on language development, and more&lt;br&gt;&lt;small&gt;Sho Tsuji; Christina Bergmann; Molly Lewis; Mika Braginsky; Page Piccinini; Michael C. Frank; Alejandrina Cristia&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Real time pitch shifting with formant structure preservation using the phase vocoder&lt;br&gt;&lt;small&gt;Micha\u0142 Lenarczyk&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Speech Recognition and Understanding on Hardware-Accelerated DSP&lt;br&gt;&lt;small&gt;Georg Stemmer; Munir Georges; Joachim Hofer; Piotr Rozen; Josef Bauer; Jakub Nowicki; Tobias Bocklet; Hannah Colett; Ohad Falik; Michael Deisher; Sylvia Downing&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Show & Tell\"\n            data-category-ids=\"1063\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Show & Tell 3<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#EEA2A2; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4312\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Dialog Modeling\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"18\"\n            data-room-id=\"1069\"\n            data-room-name=\"Poster 3\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Kristiina Jokinen&lt;br&gt;&lt;br&gt;13.30-15.30 - Analysis of the Relationship between Prosodic Features of Fillers and Its Forms or Occurrence Positions&lt;br&gt;&lt;small&gt;Shizuka Nakamura; Ryosuke Nakanishi; Katsuya Takanashi; Tatsuya Kawahara&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - A Turn-taking Estimation Model based on Joint Embedding of Lexical and Prosodic Contents&lt;br&gt;&lt;small&gt;Chaoran Liu; Carlos Ishi; Hiroshi Ishiguro&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Cross-Subject Continuous Emotion Recognition using Speech and Body Motion in Dyadic Interactions&lt;br&gt;&lt;small&gt;Syeda Narjis Fatima; Engin Erzin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Domain-independent User Satisfaction Reward Estimation for Dialogue Policy Learning&lt;br&gt;&lt;small&gt;Stefan Ultes; Pawe\u0142 Budzianowski; I\u00f1igo Casanueva; Nikola Mrk\u0161i\u0107; Lina M. Rojas Barahona; Pei-Hao Su; Tsung-Hsien Wen; Milica Gasic; Steve Young&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - End-of-Utterance Prediction by Prosodic Features and Phrase-Dependency Structure in Spontaneous Japanese Speech&lt;br&gt;&lt;small&gt;Yuichi Ishimoto; Takehiro Teraoka; Mika Enomoto&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Entrainment in Multi-Party Spoken Dialogues at Multiple Linguistic Levels&lt;br&gt;&lt;small&gt;Zahra Rahimi; Anish Kumar; Diane Litman; Susannah Paletz; Mingzhi Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Hierarchical LSTMs with Joint Learning for Estimating Customer Satisfaction from Contact Center Calls&lt;br&gt;&lt;small&gt;Atsushi Ando; Ryo Masumura; Hosana Kamiyama; Satoshi Kobashikawa; Yushi Aono&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Improving prediction of speech activity using multi-participant respiratory state&lt;br&gt;&lt;small&gt;Marcin Wlodarczak; Kornel Laskowski; Mattias Heldner; K\u00e4tlin Aare&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Issues in Human and Automated Scoring of Fluency, Pronunciation and Intonation During Human--Machine Spoken Dialog Interactions&lt;br&gt;&lt;small&gt;Vikram Ramanarayanan; Patrick Lange; Keelan Evanini; Hillary Molloy; David Suendermann-Oeft&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Measuring Synchrony in Task-based Dialogues&lt;br&gt;&lt;small&gt;Justine Reverdy; Carl Vogel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Online End-of-Turn Detection from Speech based on Stacked Time-Asynchronous Sequential Networks&lt;br&gt;&lt;small&gt;Ryo Masumura; Taichi Asami; Hirokazu Masataki; Ryo Ishii; Ryuichiro Higashinaka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Sequence to Sequence Modeling for User Simulation in Dialog Systems&lt;br&gt;&lt;small&gt;Paul Crook; Alex Marin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Social Signal Detection in Spontaneous Dialogue Using Bidirectional LSTM-CTC&lt;br&gt;&lt;small&gt;Hirofumi Inaguma; Koji Inoue; Masato Mimura; Tatsuya Kawahara&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Towards Deep End-of-Turn Prediction for Situated Spoken Dialogue Systems&lt;br&gt;&lt;small&gt;Angelika Maier; Julian Hough; David Schlangen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Turn-Taking Offsets and Dialogue Context&lt;br&gt;&lt;small&gt;Peter Heeman; Rebecca Lunsford&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Spoken Dialog Systems and Analysis of Conversation\"\n            data-category-ids=\"1051\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Dialog Modeling<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4297\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Voice Conversion 1\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"10\"\n            data-room-id=\"1060\"\n            data-room-name=\"E10\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Hema Murthy; S R M Prasanna&lt;br&gt;&lt;br&gt;13.30-13.50 - Voice Conversion Using Sequence-to-Sequence Learning of Context Posterior Probabilities&lt;br&gt;&lt;small&gt;Hiroyuki Miyoshi; Yuki Saito; Shinnosuke Takamichi; Hiroshi Saruwatari&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Learning Latent Representations for Speech Generation and Transformation&lt;br&gt;&lt;small&gt;Wei-Ning Hsu; Yu Zhang; James Glass&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Parallel-data-free Many-to-many Voice Conversion based on DNN Integrated with Eigenspace Using a Non-parallel Speech Corpus&lt;br&gt;&lt;small&gt;Tetsuya Hashimoto; Hidetsugu Uchida; Daisuke Saito; Nobuaki Minematsu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Sequence-to-Sequence Voice Conversion with Similarity Metric Learned Using Generative Adversarial Networks&lt;br&gt;&lt;small&gt;Takuhiro Kaneko; Hirokazu Kameoka; Kaoru Hiramatsu; Kunio Kashino&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - A mouth opening effect based on pole modification for expressive singing voice transformation&lt;br&gt;&lt;small&gt;Luc Ardaillon; Axel Roebel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Siamese Autoencoders for Speech Style Extraction and Switching Applied to Voice Identification and Conversion&lt;br&gt;&lt;small&gt;Seyed Hamidreza Mohammadi; Alexander Kain&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Synthesis and Spoken Language Generation\"\n            data-category-ids=\"1059\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Voice Conversion 1<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4320\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Show &amp; Tell 4\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"12\"\n            data-room-id=\"1071\"\n            data-room-name=\"E397\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"13.30-15.30 - Combining Gaussian mixture models and segmental feature models for speaker recognition&lt;br&gt;&lt;small&gt;Milana Milo\u0161evi\u0107; Ulrike Glavitsch&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Did you laugh enough today? - Deep Neural Networks for Mobile and Wearable Laughter Trackers&lt;br&gt;&lt;small&gt;Gerhard Hagerer; Nicholas Cummins; Florian Eyben; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Evolving recurrent neural networks that process and classify raw audio in a streaming fashion&lt;br&gt;&lt;small&gt;Adrien DANIEL&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Low-Frequency Ultrasonic Communication for Speech Broadcasting in Public Transportation&lt;br&gt;&lt;small&gt;Kwang Myung Jeon; Nam Kyun Kim; Chan Woong Kwak; Jung Min Moon; Hong Kook Kim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Reading validation for pronunciation evaluation in the Digitala project&lt;br&gt;&lt;small&gt;Aku Rouhe; Reima Karhila; Peter Smit; Mikko Kurimo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Real-time Speech Enhancement with GCC-NMF: Demonstration on the Raspberry Pi and NVIDIA Jetson&lt;br&gt;&lt;small&gt;Sean Wood; Jean Rouat&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Show & Tell\"\n            data-category-ids=\"1063\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Show & Tell 4<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#EEA2A2; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4322\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speaker Recognition for the Next Decade\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"2\"\n            data-room-id=\"1072\"\n            data-room-name=\"B3\"\n            data-day=\"2\"\n            data-abs-path=\"\/abs\/4322.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Special event\"\n            data-category-ids=\"1064\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speaker Recognition for the Next Decade<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4296\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"WaveNet and Novel Paradigms\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Peter Cahill; Rob Clark&lt;br&gt;&lt;br&gt;13.30-13.50 - Speaker-dependent WaveNet vocoder&lt;br&gt;&lt;small&gt;Akira Tamamori; Tomoki Hayashi; Kazuhiro Kobayashi; Kazuya Takeda; Tomoki Toda&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Waveform Modeling Using Stacked Dilated Convolutional Neural Networks for Speech Bandwidth Extension&lt;br&gt;&lt;small&gt;Yu Gu; Zhen-Hua Ling&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Direct modeling of frequency spectra and waveform generation based on phase recovery for DNN-based speech synthesis&lt;br&gt;&lt;small&gt;Shinji Takaki; Hirokazu Kameoka; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - A Hierarchical Encoder-Decoder Model for Statistical parametric speech synthesis&lt;br&gt;&lt;small&gt;Srikanth Ronanki; Oliver Watts; Simon King&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Statistical voice conversion with WaveNet-based waveform generation&lt;br&gt;&lt;small&gt;Kazuhiro Kobayashi; Tomoki Hayashi; Akira Tamamori; Tomoki Toda&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Google\u2019s Next-Generation Real-Time Unit-Selection Synthesizer using Sequence-To-Sequence LSTM-based Autoencoders&lt;br&gt;&lt;small&gt;Vincent Wan; Yannis Agiomyrgiannakis; Hanna Silen; Jakub Vit&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Synthesis and Spoken Language Generation\"\n            data-category-ids=\"1059\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>WaveNet and Novel Paradigms<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4325\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Incremental Processing and Responsive Behaviour\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"14\"\n            data-room-id=\"1059\"\n            data-room-name=\"F11\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Timo Baumann; Ingmar Steiner&lt;br&gt;&lt;br&gt;13.30-13.45 - Introduction&lt;br&gt;&lt;small&gt;Timo Baumann; Thomas Hueber; David Schlangen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.45-14.00 - Adjusting the Frame: Biphasic Performative Control of Speech Rhythm&lt;br&gt;&lt;small&gt;Samuel Delalez; Christophe d'Alessandro&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.00-14.15 - Attentional factors in listeners' uptake of gesture cues during speech processing&lt;br&gt;&lt;small&gt;Raheleh Saryazdi; Craig Chambers&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.15-14.30 - Motion analysis in vocalized surprise expressions&lt;br&gt;&lt;small&gt;Carlos Ishi; Takashi Minato; Hiroshi Ishiguro&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.45 - Enhancing Backchannel Prediction Using Word Embeddings&lt;br&gt;&lt;small&gt;Robin R\u00fcde; Markus M\u00fcller; Sebastian St\u00fcker; Alex Waibel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.45-15.00 - A Computational Model for Phonetically Responsive Spoken Dialogue Systems&lt;br&gt;&lt;small&gt;Eran Raveh; Ingmar Steiner; Bernd M\u00f6bius&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.00-15.15 - Incremental Dialogue Act Recognition: token- vs chunk-based classification&lt;br&gt;&lt;small&gt;Eustace Ebhotemhen; Volha Petukhova; Dietrich Klakow&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.15-15.30 - Discussion&lt;br&gt;&lt;small&gt;&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Spoken Dialog Systems and Analysis of Conversation\"\n            data-category-ids=\"1051\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Incremental Processing and Responsive Behaviour<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4301\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Emotion Modeling\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"9\"\n            data-room-id=\"1062\"\n            data-room-name=\"D8\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Koichi Shinoda; Anton  Batliner&lt;br&gt;&lt;br&gt;13.30-13.50 - Speech Emotion Recognition with Emotion-Pair based Framework Considering Emotion Distribution Information in Dimensional Emotion Space&lt;br&gt;&lt;small&gt;Xi Ma; Zhiyong Wu; Jia Jia; Mingxing Xu; Helen Meng; Lianhong Cai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Adversarial Auto-encoders for Speech Based Emotion Recognition&lt;br&gt;&lt;small&gt;Saurabh Sahu; Rahul Gupta; Ganesh Sivaraman; Wael Abdalmageed; Carol Espy-Wilson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - An Investigation of Emotion Prediction Uncertainty Using Gaussian Mixture Regression&lt;br&gt;&lt;small&gt;Ting Dang; Vidhyasaharan Sethu; Julien Epps; Eliathamby Ambikairajah&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Capturing Long-term Temporal Dependencies with Convolutional Networks for Continuous Emotion Recognition&lt;br&gt;&lt;small&gt;Soheil Khorram; Zakaria Aldeneh; Dimitrios Dimitriadis; Melvin McInnis; Emily Mower Provost&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Voice-to-affect mapping: inferences on language voice baseline settings&lt;br&gt;&lt;small&gt;Ailbhe N\u00ed Chasaide; Irena Yanushevskaya; Christer Gobl&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Attentive Convolutional Neural Network based Speech Emotion Recognition: A Study on the Impact of Input Features, Signal Length, and Acted Speech&lt;br&gt;&lt;small&gt;Michael Neumann; Ngoc Thang Vu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Paralinguistics in Speech and Language\"\n            data-category-ids=\"1052\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Emotion Modeling<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4300\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Prosody: Tone and Intonation\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"5\"\n            data-room-id=\"1066\"\n            data-room-name=\"C6\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mariapaola D'Imperio; Oliver Niebuhr&lt;br&gt;&lt;br&gt;13.30-13.50 - The Vocative Chant and Beyond: German Calling Melodies under Routine and Urgent Contexts&lt;br&gt;&lt;small&gt;Sergio Quiroz; Marzena Zygis&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Comparing languages using hierarchical prosodic analysis&lt;br&gt;&lt;small&gt;Juraj \u0160imko; Antti Suni; Katri Hiovain; Martti Vainio&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Intonation Facilitates Prediction of Focus even in the Presence of Lexical Tones&lt;br&gt;&lt;small&gt;Martin Ho Kwan Ip; Anne Cutler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Mind the peak: When museum is temporarily understood as musical in Australian English&lt;br&gt;&lt;small&gt;Katharina Zahner; Heather Kember; Bettina Braun&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Pashto intonation patterns&lt;br&gt;&lt;small&gt;Luca Rognoni; Judith Bishop; Miriam Corris&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - A new model of final lowering in spontaneous monologue&lt;br&gt;&lt;small&gt;Kikuo Maekawa&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Phonetics, Phonology, and Prosody\"\n            data-category-ids=\"1056\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Prosody: Tone and Intonation<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4311\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Acoustic Models for ASR 2\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"17\"\n            data-room-id=\"1067\"\n            data-room-name=\"Poster 2\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Karen Livescu&lt;br&gt;&lt;br&gt;13.30-15.30 - Acoustic feature learning with deep variational canonical correlation analysis&lt;br&gt;&lt;small&gt;Qingming Tang; Weiran Wang; Karen Livescu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - An Efficient Phone N-gram Forward-backward Computation Using Dense Matrix Multiplication&lt;br&gt;&lt;small&gt;Khe Chai Sim; Arun Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Backstitch: Counteracting Finite-sample Bias via Negative Steps&lt;br&gt;&lt;small&gt;Yiming Wang; Vijayaditya Peddinti; Hainan Xu; Xiaohui Zhang; Dan Povey; Sanjeev Khudanpur&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - End-to-End Training of Acoustic Models for Large Vocabulary Continuous Speech Recognition with TensorFlow&lt;br&gt;&lt;small&gt;Ehsan Variani; Tom Bagby; Erik McDermott; Michiel Bacchiani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Node pruning based on Entropy of Weights and Node Activity for Small-footprint Acoustic Model based on Deep Neural Networks&lt;br&gt;&lt;small&gt;Ryu Takeda; Kazuhiro Nakadai; Kazunori Komatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Parallel Neural Network Features for Improved Tandem Acoustic Modeling&lt;br&gt;&lt;small&gt;Zolt\u00e1n T\u00fcske; Wilfried Michel; Ralf Schl\u00fcter; Hermann Ney&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n            data-category-ids=\"1058\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Acoustic Models for ASR 2<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4298\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Models of Speech Perception\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"0\"\n            data-room-id=\"1064\"\n            data-room-name=\"A2\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Chris Davis; Frank Zimmerer&lt;br&gt;&lt;br&gt;13.30-13.50 - A Comparison of Sentence-level Speech Intelligibility Metrics&lt;br&gt;&lt;small&gt;Alexander Kain; Max Del Giudice; Kris Tjaden&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - An auditory model of speaker size perception for voiced speech sounds&lt;br&gt;&lt;small&gt;Toshio Irino; Eri Takimoto; Toshie Matsui; Roy Patterson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - The recognition of compounds: a computational account&lt;br&gt;&lt;small&gt;Louis ten Bosch; Lou Boves; Mirjam Ernestus&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Humans do not maximize the probability of correct decision when recognizing DANTALE words in noise&lt;br&gt;&lt;small&gt;Mohsen Zareian Jahromi; Jan \u00d8stergaard; Jesper Jensen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Single-ended prediction of listening effort based on automatic speech recognition&lt;br&gt;&lt;small&gt;Rainer Huber; Constantin Spille; Bernd T. Meyer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Modeling categorical perception with the receptive fields of auditory neurons&lt;br&gt;&lt;small&gt;Chris Neufeld&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Perception, Production and Acquisition\"\n            data-category-ids=\"1055\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Models of Speech Perception<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5258\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Refreshments\"\n            data-abs-nbr=\"\"\n            data-ystart=\"15.5\"\n            data-yend=\"16\"\n            data-time=\"15:30-16:00\"\n            data-room=\"22\"\n            data-room-id=\"1079\"\n            data-room-name=\"Various locations\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Misc\"\n            data-category-ids=\"1068\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Refreshments<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Various locations<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  15:30-16:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4314\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Voice, Speech and Hearing Disorders\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"17\"\n            data-room-id=\"1067\"\n            data-room-name=\"Poster 2\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Timothy Bunnell&lt;br&gt;&lt;br&gt;16.00-18.00 - Apkinson \u2013 A mobile monitoring solution for Parkinson\u2019s disease&lt;br&gt;&lt;small&gt;Philipp Klumpp; Thomas Janu; Tom\u00e1s Arias-Vergara; Juan Camilo V\u00e1squez Correa; Juan Rafael Orozco-Arroyave; Elmar Noeth&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Automatic Prediction of Speech Evaluation Metrics for Dysarthric Speech&lt;br&gt;&lt;small&gt;Imed Laaridh; Waad Ben Kheder; Corinne Fredouille; Christine Meunier&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Cepstral and entropy analyses in vowels excerpted from continuous speech of dysphonic and control speakers&lt;br&gt;&lt;small&gt;Antonella Castellana; Andreas Selamtzis; Giampiero Salvi; Alessio Carullo; Arianna Astolfi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Classification of bulbar ALS from kinematic features of the jaw and lips: Towards computer-mediated assessment&lt;br&gt;&lt;small&gt;Andrea Bandini; Jordan Green; Lorne Zinman; Yana Yunusova&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Deep Autoencoder based Speech Features for Improved Dysarthric Speech Recognition&lt;br&gt;&lt;small&gt;Bhavik Vachhani; Chitralekha Bhat; Biswajit Das; Sunil Kumar Kopparapu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Dysprosody differentiate between Parkinson\u2019s disease, progressive supranuclear palsy, and multiple system atrophy&lt;br&gt;&lt;small&gt;Jan Hlavni\u010dka; Tereza Tykalov\u00e1; Roman \u010cmejla; Ji\u0159\u00ed Klemp\u00ed\u0159; Ev\u017een R\u016f\u017ei\u010dka; Jan Rusz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Hypernasality Severity Analysis in Cleft Lip and Palate Speech Using Vowel Space Area.&lt;br&gt;&lt;small&gt;Nikitha K; Sishir Kalita; CM Vikram; M. Pushpavathi; S R Mahadeva Prasanna&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Interpretable Objective Assessment of Dysarthric Speech based on Deep Neural Networks&lt;br&gt;&lt;small&gt;Ming Tu; Visar Berisha; Julie Liss&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Prediction of Speech Delay from Acoustic Measurements&lt;br&gt;&lt;small&gt;Jason Lilley; Madhavi Ratnagiri; H Timothy Bunnell&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Production of sustained vowels and categorical perception of tones in Mandarin among cochlear-implanted children&lt;br&gt;&lt;small&gt;Wentao Gu; Jiao Yin; James Mahshie&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - The Frequency Range of \u201cThe Ling Six Sounds\u201d in Standard Chinese&lt;br&gt;&lt;small&gt;Aijun Li; Hua Zhang; Wen Sun&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Zero Frequency Filter Based Analysis of Voice Disorders&lt;br&gt;&lt;small&gt;Nagaraj Adiga; Vikram C M; Keerthi Pullela; S R Mahadeva Prasanna&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Perception, Production and Acquisition\"\n            data-category-ids=\"1055\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Voice, Speech and Hearing Disorders<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4302\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Neural Network Acoustic Models for ASR 2\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mark Gales; Tara Sainath&lt;br&gt;&lt;br&gt;16.00-16.20 - Recurrent Neural Aligner: An Encoder-Decoder Neural Network Model for Sequence to Sequence Mapping&lt;br&gt;&lt;small&gt;Hasim Sak; Matt Shannon; Kanishka Rao; Francoise Beaufays&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Highway-LSTM and Recurrent Highway Networks for Speech Recognition&lt;br&gt;&lt;small&gt;Golan Pundak; Tara Sainath&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Improving speech recognition by revising gated recurrent units&lt;br&gt;&lt;small&gt;Mirco Ravanelli; Philemon Brakel; Maurizio Omologo; Yoshua Bengio&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Stochastic Recurrent Neural Network for Speech Recognition&lt;br&gt;&lt;small&gt;Jen-Tzung Chien; Chen Shen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - Frame and Segment Level Recurrent Neural Networks for Phone Classification&lt;br&gt;&lt;small&gt;Martin Ratajczak; Sebastian Tschiatschek; Franz Pernkopf&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Deep Learning-based Telephony Speech Recognition in the Wild&lt;br&gt;&lt;small&gt;Kyu Han; Seongjun Hahm; Byung-Hak Kim; Jungsuk Kim; Ian Lane&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n            data-category-ids=\"1058\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Neural Network Acoustic Models for ASR 2<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4313\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"L1 and L2 Acquisition\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"16\"\n            data-room-id=\"1061\"\n            data-room-name=\"Poster 1\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Aoju Chen&lt;br&gt;&lt;br&gt;16.00-18.00 - A comparison of Danish listeners\u2019 processing cost in judging the truth value of Norwegian, Swedish, and English sentences&lt;br&gt;&lt;small&gt;Ocke-Schwen Bohn; Trine Askj\u00e6r-J\u00f8rgensen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - A data-driven approach for perceptually validated acoustic features for children's sibilant fricative productions&lt;br&gt;&lt;small&gt;Patrick Reidy; Mary Beckman; Jan Edwards; Benjamin Munson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - An Automatically Aligned Corpus of Child-directed Speech&lt;br&gt;&lt;small&gt;Micha Elsner; Kiwako Ito&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Articulation rate in Swedish child-directed speech increases as a function of the age of the child even when surprisal is controlled for&lt;br&gt;&lt;small&gt;Johan Sjons; Thomas H\u00f6rberg; Robert \u00d6stling; Johannes Bjerva&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Changes in early L2 cue-weighting of non-native speech: Evidence from learners of Mandarin Chinese&lt;br&gt;&lt;small&gt;Seth Wiener&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Directing Attention during Perceptual Training: A Preliminary Study of Phonetic Learning in Southern Min by Mandarin Speakers&lt;br&gt;&lt;small&gt;Ying Chen; Eric Pederson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Lexical adaptation to a novel accent in German: A comparison between German, Swedish, and Finnish listeners&lt;br&gt;&lt;small&gt;Adriana Hanulikova; Jenny Ekstr\u00f6m&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Measuring Encoding Efficiency in Swedish and English Language Learner Speech Production&lt;br&gt;&lt;small&gt;Gintare Grigonyte; Gerold Schneider&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Mechanisms of Tone Sandhi Rule Application by Non-native Speakers&lt;br&gt;&lt;small&gt;Si Chen; YUNJUAN HE; Chun Wah Yuen; Bei Li; Yike Yang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - MMN responses in adults after exposure to bimodal and unimodal frequency distributions of rotated speech&lt;br&gt;&lt;small&gt;Ellen Marklund; El\u00edsabet Eir Cortes; Johan Sjons&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - On the role of temporal variability in the acquisition of the German vowel length contrast&lt;br&gt;&lt;small&gt;Felicitas Kleber&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Prosody analysis of L2 English for naturalness evaluation through speech modification&lt;br&gt;&lt;small&gt;Dean Luo; Ruxin Luo; Lixin Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Qualitative differences in L3 learners' neurophysiological response to L1 versus L2 transfer&lt;br&gt;&lt;small&gt;Alejandra Keidel Fern\u00e1ndez; Thomas H\u00f6rberg&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Quality Assessment of ESL Learner\u2019s Sentence Prosody with TTS Synthesized Voice as Reference&lt;br&gt;&lt;small&gt;Yujia Xiao; Frank Soong&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - The relationship between the perception and production of non-native tones&lt;br&gt;&lt;small&gt;Kaile Zhang; Gang Peng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Perception, Production and Acquisition\"\n            data-category-ids=\"1055\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>L1 and L2 Acquisition<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4315\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Source Separation and Voice Activity Detection\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"18\"\n            data-room-id=\"1069\"\n            data-room-name=\"Poster 3\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Tom B\u00e4ckstr\u00f6m&lt;br&gt;&lt;br&gt;16.00-18.00 - A Contrast Function and Algorithm for Blind Separation of Audio Signals&lt;br&gt;&lt;small&gt;Wei Gao; Roberto Togneri; Victor Sreeram&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - A Mask Estimation Method Integrating Data Field Model for Speech Enhancement&lt;br&gt;&lt;small&gt;Xianyun Wang; Changchun Bao; Feng Bao&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Audio Content based Geotagging in Multimedia&lt;br&gt;&lt;small&gt;Anurag Kumar; Benjamin Elizalde; Bhiksha Raj&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Bimodal Recurrent Neural Network for Audiovisual Voice Activity Detection&lt;br&gt;&lt;small&gt;Fei Tao; Carlos Busso&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Domain-Specific Utterance End-Point Detection for Speech Recognition&lt;br&gt;&lt;small&gt;Roland Maas; Ariya Rastrow; Kyle Goehner; Gautam Tiwari; Shaun Joseph; Bjorn Hoffmeister&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Excitation Source Features for Improving the Detection of Vowel Onset and Offset Points in a Speech Sequence&lt;br&gt;&lt;small&gt;Gayadhar Pradhan; Avinash Kumar; Syed Shahnawazuddin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Improved end-of-query detection for streaming speech recognition&lt;br&gt;&lt;small&gt;Matt Shannon; Gabor Simko; Shuo-Yiin Chang; Carolina Parada&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Improving Source Separation via Multi-Speaker Representations&lt;br&gt;&lt;small&gt;Jeroen Zegers; Hugo Van hamme&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Multiple Sound Source Counting and Localization Based on Spatial Principal Eigenvector&lt;br&gt;&lt;small&gt;Bing Yang; Hong Liu; Cheng Pang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speaker Direction-of-Arrival Estimation Based On Frequency-Independent Beampattern&lt;br&gt;&lt;small&gt;Feng Guo; Yuhang Cao; Zheng Liu; Jiaen Liang; Baoqing Li; Xiaobing Yuan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speech detection and enhancement using single microphone for distant speech applications in reverberant environments&lt;br&gt;&lt;small&gt;Vinay Kothapally; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Subband selection for binaural speech source localization&lt;br&gt;&lt;small&gt;Karthik Girija Ramesan; Prasanta Ghosh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Time Delay Histogram Based Speech Source Separation Using a Planar Array&lt;br&gt;&lt;small&gt;Zhaoqiong Huang; Zhanzhong Cao; Dongwen Ying; Jielin Pan; Yonghong Yan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Unmixing Convolutive Mixtures by Exploiting Amplitude Co-modulation: Methods and Evaluation on Mandarin Speech Recordings&lt;br&gt;&lt;small&gt;Bo-Rui Chen; Huang-Yi Lee; Yi-Wen Liu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Using Approximated Auditory Roughness as a Pre-filtering Feature for Human Screaming and Affective Speech AED&lt;br&gt;&lt;small&gt;Di He; Zuofu Cheng; Mark Hasegawa-Johnson; Deming Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Weighted Spatial Covariance Matrix Estimation for MUSIC based TDOA Estimation of Speech Source&lt;br&gt;&lt;small&gt;Chenglin Xu; Xiong Xiao; Sining Sun; Wei Rao; Eng Siong Chng; Haizhou Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Speech and Audio Signals\"\n            data-category-ids=\"1062\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Source Separation and Voice Activity Detection<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4326\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Acoustic Manifestations of Social Characteristics\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"14\"\n            data-room-id=\"1059\"\n            data-room-name=\"F11\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Stefanie Jannedy; Melanie Weirich&lt;br&gt;&lt;br&gt;16.00-16.05 - Introduction&lt;br&gt;&lt;small&gt;Stefanie Jannedy; Melanie Weirich&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.05-16.25 - Clear Speech - Mere Speech? How segmental and prosodic speech reduction shape the impression that speakers create on listeners&lt;br&gt;&lt;small&gt;Oliver Niebuhr&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.25-16.45 - To see or not to see: Interlocutor visibility and likeability influence convergence in intonation&lt;br&gt;&lt;small&gt;Katrin Schweitzer; Michael Walsh; Antje Schweitzer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.45-17.05 - Acoustic correlates of parental role and gender identity in the speech of expecting parents&lt;br&gt;&lt;small&gt;Melanie Weirich; Adrian Simpson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.05-17.25 - Effects of Talker Dialect, Gender &amp; Race on Accuracy of Bing Speech and YouTube Automatic Captions&lt;br&gt;&lt;small&gt;Rachael Tatman; Conner Kasten&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.25-18.00 - A Semi-Supervised Learning Approach for Acoustic-Prosodic Personality Perception in Under-Resourced Domains&lt;br&gt;&lt;small&gt;Rub\u00e9n Solera-Ure\u00f1a; Helena Moniz; Fernando Batista; Vera Cabarrao; Anna Pompili; Ram\u00f3n Fern\u00e1ndez-Astudillo; Joana Campos; Ana Paiva; Isabel Trancoso&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.25-18.00 - Perceptual and acoustic correlates of gender in the prepubertal voice&lt;br&gt;&lt;small&gt;Adrian Simpson; Riccarda Funk; Frederik Palmer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.25-18.00 - Prosodic analysis of attention-drawing speech&lt;br&gt;&lt;small&gt;Carlos Ishi; Jun Arai; Norihiro Hagita&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.25-18.00 - Relationships between speech timing and perceived hostility in a French corpus of political debates&lt;br&gt;&lt;small&gt;Charlotte Kouklia; Nicolas Audibert&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.25-18.00 - Towards Speaker Characterization: Identifying and Predicting Dimensions of Person Attribution&lt;br&gt;&lt;small&gt;Laura Fern\u00e1ndez Gallardo; Benjamin Weiss&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Phonetics, Phonology, and Prosody\"\n            data-category-ids=\"1056\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Acoustic Manifestations of Social Characteristics<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4316\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech-enhancement\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"19\"\n            data-room-id=\"1068\"\n            data-room-name=\"Poster 4\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Timo Gerkmann&lt;br&gt;&lt;br&gt;16.00-18.00 - A comparison of perceptually motivated loss functions for binary mask estimation in speech separation&lt;br&gt;&lt;small&gt;Danny Websdale; Ben Milner; Danny Websdale; Ben Milner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - A Fully Convolutional Network for Speech Enhancement&lt;br&gt;&lt;small&gt;Serim Park; Jinwon Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - A Post-filtering Approach Based on Locally Linear Embedding Difference Compensation for Speech Enhancement&lt;br&gt;&lt;small&gt;YICHIAO WU; Hsin-Te Hwang; Syu-Siang Wang; Chin-Cheng Hsu; Yu Tsao; Hsin-Min Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Binary mask estimation strategies for constrained imputation-based speech enhancement&lt;br&gt;&lt;small&gt;Ricard Marxer; Jon Barker&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - BINAURAL REVERBERANT SPEECH SEPARATION BASED ON DEEP NEURAL NETWORKS&lt;br&gt;&lt;small&gt;Xueliang Zhang; DeLiang Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Conditional Generative Adversarial Networks for Speech Enhancement and Noise-Robust Speaker Verification&lt;br&gt;&lt;small&gt;Daniel Michelsanti; Zheng-Hua Tan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Improved Example-based Speech Enhancement by Using Deep Neural Network Acoustic Model for Noise Robust Example Search&lt;br&gt;&lt;small&gt;Atsunori Ogawa; Keisuke Kinoshita; Marc Delcroix; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - MixMax Approximation as a Super-Gaussian Log-Spectral Amplitude Estimator for Speech Enhancement&lt;br&gt;&lt;small&gt;Robert Rehr; Timo Gerkmann&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Multi-target Ensemble Learning for Monaural Speech Separation&lt;br&gt;&lt;small&gt;Hui Zhang; Xueliang Zhang; Guanglai Gao&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - On the influence of modifying magnitude and phase spectrum to enhance noisy speech signals&lt;br&gt;&lt;small&gt;Hans-Guenter Hirsch; Michael Gref&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - On the quality and intelligibility of noisy speech processed for near-end listening enhancement&lt;br&gt;&lt;small&gt;Catalin Zorila; Yannis Stylianou&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - REAL-TIME MODULATION ENHANCEMENT OF TEMPORAL ENVELOPES FOR INCREASING SPEECH INTELLIGIBILITY&lt;br&gt;&lt;small&gt;Maria Koutsogiannaki; Holly Francois; Kihyun Choo; Eunmi Oh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speech Enhancement Using Bayesian Wavenet&lt;br&gt;&lt;small&gt;Kaizhi Qian; Yang Zhang; Shiyu Chang; Xuesong Yang; Dinei Florencio; Mark Hasegawa-Johnson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speech enhancement using non-negative spectrogram models with mel-generalized cepstral regularization&lt;br&gt;&lt;small&gt;Li Li; Hirokazu Kameoka; Tomoki Toda; Shoji Makino&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Subjective intelligibility of deep neural network-based speech enhancement&lt;br&gt;&lt;small&gt;Femke B. Gelderblom; Tron V. Tronstad; Erlend M. Viggen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Coding and Enhancement\"\n            data-category-ids=\"1060\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech-enhancement<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4306\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Prosody: Rhythm, Stress, Quantity and Phrasing\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"5\"\n            data-room-id=\"1066\"\n            data-room-name=\"C6\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Plinio Barbosa; P\u00e4rtel Lippus&lt;br&gt;&lt;br&gt;16.00-16.20 - Similar prosodic structure perceived differently in German and English&lt;br&gt;&lt;small&gt;Heather Kember; Ann-Kathrin Grohe; Katharina Zahner; Bettina Braun; Andrea Weber; Anne Cutler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Disambiguate or not? \u2013 The role of prosody in unambiguous and potentially ambiguous anaphora production in strictly Mandarin parallel structures&lt;br&gt;&lt;small&gt;Luying Hou; Bert Le Bruyn; Ren\u00e9 Kager&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Acoustic Properties of Canonical and Non-Canonical Stress in French, Turkish, Armenian and Brazilian Portuguese&lt;br&gt;&lt;small&gt;Angeliki Athanasopoulou; Irene Vogel; Hossep Dolatian&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Phonological complexity, segment rate and speech tempo perception&lt;br&gt;&lt;small&gt;Leendert Plug; Rachel Smith&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - On the Duration of Mandarin Tones&lt;br&gt;&lt;small&gt;Jing Yang; Yu Zhang; Aijun Li; Li Xu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - The formant dynamics of long close vowels in three varieties of Swedish&lt;br&gt;&lt;small&gt;Otto Ewald; Eva Liina Asu; Susanne Sch\u00f6tz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Phonetics, Phonology, and Prosody\"\n            data-category-ids=\"1056\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Prosody: Rhythm, Stress, Quantity and Phrasing<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4304\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speaker Recognition Evaluation\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"0\"\n            data-room-id=\"1064\"\n            data-room-name=\"A2\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Kong Aik Lee; Rahim Saeidi&lt;br&gt;&lt;br&gt;16.00-16.20 - The I4U Mega Fusion and Collaboration for NIST Speaker Recognition Evaluation 2016&lt;br&gt;&lt;small&gt;Kong Aik Lee; Ville Hautamaki; Tomi Kinnunen; Anthony Larcher; Chunlei Zhang; Andreas Nautsch; Themos Stafylakis; Gang Liu; Mickael Rouvier; Wei Rao; Federico Alegre; Jianbo Ma; Manwai Mak; Achintya Sarkar; H\u00e9ctor Delgado; Rahim Saeidi; Hagai Aronowitz; Aleksandr Sizov; Hanwu Sun; Guangsen Wang; Trung Hieu Nguyen; Bin Ma; Ville Vestman; Md Sahidullah; Miikka Halonen; Anssi Kanervisto; Gael Le Lan; Fahimeh Bahmaninezhad; Sergey Isadskiy; Christian Rathgeb; Christoph Busch; Georgios Tzimiropoulos; Qi Qian; Zhibin Wang; Qingen Zhao; Tianzhou Wang; Hao Li; Jian Xue; Shenghuo Zhu; Rong Jin; Tuo Zhao; Pierre-Michel Bousquet; Moez Ajili; Waad Ben Kheder; Driss Matrouf; Zhi Hao Lim; Chenglin Xu; Haihua Xu; Xiong Xiao; Eng Siong Chng; Benoit Fauve; Vidhyasaharan Sethu; Kaavya Sriskandaraja; W. W. Lin; Zheng-Hua Tan; Dennis Alexander Lehmann Thomsen; Massimiliano Todisco; Nicholas Evans; Haizhou Li; John H.L. Hansen; Jean-Francois Bonastre&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - The MIT-LL, JHU and LRDE NIST 2016 Speaker Recognition Evaluation System&lt;br&gt;&lt;small&gt;Pedro Torres-Carrasquillo; Fred Richardson; Shahan Nercessian; Douglas Sturim; William Campbell; Youngjune Gwon; Swaroop Vattam; Najim Dehak; Harish Mallidi; Phani Sankar Nidadavolu; Ruizhi Li; Reda Dehak&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Nuance - Politecnico di Torino\u2019s 2016 NIST Speaker Recognition Evaluation System&lt;br&gt;&lt;small&gt;Daniele Colibro; Claudio Vair; Emanuele Dalmasso; Kevin Farrell; Gennady Karvitsky; Sandro Cumani; Pietro Laface&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - UTD-CRSS Systems for 2016 NIST Speaker Recognition Evaluation&lt;br&gt;&lt;small&gt;Chunlei Zhang; Fahimeh Bahmaninezhad; Shivesh Ranjan; Chengzhu Yu; Navid Shokouhi; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - Analysis and Description of ABC Submission to NIST SRE 2016&lt;br&gt;&lt;small&gt;Oldrich Plchot; Pavel Matejka; Anna Silnova; Ond\u0159ej Novotn\u00fd; Mireia Diez; Johan Rohdin; Ondrej Glembek; Niko Brummer; Albert Swart; Jes\u00fas Jorr\u00edn; Leibny Paola Garcia Perera; Luis Buera; Patrick Kenny; Md Jahangir Alam; Gautam Bhattacharya&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - The 2016 NIST Speaker Recognition Evaluation&lt;br&gt;&lt;small&gt;Seyed Omid Sadjadi; Timothee Kheyrkhah; Audrey Tong; Craig Greenberg; Douglas Reynolds; Elliot Singer; Lisa Mason; Jaime Hernandez-Cordero&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speaker and Language Identification\"\n            data-category-ids=\"1054\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speaker Recognition Evaluation<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4323\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Students Meet Experts\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"2\"\n            data-room-id=\"1072\"\n            data-room-name=\"B3\"\n            data-day=\"2\"\n            data-abs-path=\"\/abs\/4323.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Special event\"\n            data-category-ids=\"1064\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Students Meet Experts<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4303\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Stance, Credibility, and Deception\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"10\"\n            data-room-id=\"1060\"\n            data-room-name=\"E10\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Julien Epps; Carlos Busso&lt;br&gt;&lt;br&gt;16.00-16.20 - Inferring Stance from Prosody&lt;br&gt;&lt;small&gt;Nigel Ward; Jason Carlson; Olac Fuentes; Diego Castan; Elizabeth Shriberg; Andreas Tsiartas&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Exploring Dynamic Measures of Stance in Spoken Interaction&lt;br&gt;&lt;small&gt;Gina-Anne Levow; Richard A. Wright&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Opinion Dynamics Modeling for Movie Review Transcripts Classification with Hidden Conditional Random Fields&lt;br&gt;&lt;small&gt;Valentin Barriere; Chlo\u00e9 Clavel; Slim Essid&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - TRANSFER LEARNING BETWEEN CONCEPTS FOR HUMAN BEHAVIOR MODELING: AN APPLICATION TO SINCERITY AND DECEPTION PREDICTION&lt;br&gt;&lt;small&gt;Qinyi Luo; Rahul Gupta; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - The Sound of Deception - What Makes a Speaker Credible?&lt;br&gt;&lt;small&gt;Anne Schr\u00f6der; Simon Stone; Peter Birkholz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Hybrid Acoustic-Lexical Deep Learning Approach for Deception Detection&lt;br&gt;&lt;small&gt;Gideon Mendels; Sarah Ita Levitan; Kai-Zhan Lee; Julia Hirschberg&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Paralinguistics in Speech and Language\"\n            data-category-ids=\"1052\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Stance, Credibility, and Deception<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4307\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech Recognition for Langauge Learning\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"9\"\n            data-room-id=\"1062\"\n            data-room-name=\"D8\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Tatsuya Kawahara; Martin Russell&lt;br&gt;&lt;br&gt;16.00-16.20 - Bidirectional LSTM-RNN for Improving Automated Assessment of Non-native Children\u2019s Speech&lt;br&gt;&lt;small&gt;Yao Qian; Keelan Evanini; Xinhao Wang; Chong Min Lee; Matthew Mulholland&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Automatic Scoring of Shadowing Speech based on DNN Posteriors and their DTW&lt;br&gt;&lt;small&gt;Junwei Yue; Fumiya Shiozawa; Shohei Toyama; Yutaka Yamauchi; Kayoko Ito; Daisuke Saito; Nobuaki Minematsu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Off-Topic Spoken Response Detection Using Siamese Convolutional Neural Networks&lt;br&gt;&lt;small&gt;Chong Min Lee; Su-Youn Yoon; Xinhao Wang; Matthew Mulholland; Ikkyu Choi; Keelan Evanini&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Phonological Feature Based Mispronunciation Detection and Diagnosis using Multi-Task DNNs and Active Learning&lt;br&gt;&lt;small&gt;Vipul Arora; Aditi Lahiri; Henning Reetz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - Detection of Mispronunciations and Disfluencies in Children Reading Aloud&lt;br&gt;&lt;small&gt;Jorge Proen\u00e7a; Carla Lopes; Michael Tjalve; Andreas Stolcke; Sara Candeias; Fernando Perdig\u00e3o&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Automatic assessment of non-native prosody by measuring distances on prosodic label sequences&lt;br&gt;&lt;small&gt;David Escudero-Mancebo; C\u00e9sar Gonz\u00e1lez-Ferreras; Eva Estebas-Vilaplana; Lourdes Aguilar&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Technologies and Systems for New Applications\"\n            data-category-ids=\"1066\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech Recognition for Langauge Learning<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4305\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Glottal Source Modeling\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"3\"\n            data-room-id=\"1065\"\n            data-room-name=\"B4\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Jo\u00e3o Cabral; Thomas Drugman&lt;br&gt;&lt;br&gt;16.00-16.20 - A new cosine series antialiasing function and its application to aliasing-free glottal source models for speech and singing synthesis&lt;br&gt;&lt;small&gt;Hideki Kawahara; Ken-Ichi Sakakibara; Hideki Banno; Masanori Morise; Tomoki Toda; Toshio Irino&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Speaking style conversion from normal to Lombard speech using a glottal vocoder and Bayesian GMMs&lt;br&gt;&lt;small&gt;Ana Ram\u00edrez L\u00f3pez; Shreyas Seshadri; Lauri Juvela; Okko R\u00e4s\u00e4nen; Paavo Alku&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Reducing mismatch in training of DNN-based glottal excitation models in a statistical parametric text-to-speech system&lt;br&gt;&lt;small&gt;Lauri Juvela; Bajibabu Bollepalli; Junichi Yamagishi; Paavo Alku&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Semi Parametric Concatenative TTS with Instant Voice Modification Capabilities&lt;br&gt;&lt;small&gt;Alexander Sorin; Slava Shechtman; Asaf Rendel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - Modeling laryngeal muscle activation noise for low-order physiological based speech synthesis&lt;br&gt;&lt;small&gt;Rodrigo Manriquez; Sean Peterson; Pavel Prado; Patricio Orio; Matias Za\u00f1artu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Direct Modelling of Magnitude and Phase Spectra for Statistical Parametric Speech Synthesis&lt;br&gt;&lt;small&gt;Felipe Espic; Cassia Valentini-Botinhao; Simon King&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Synthesis and Spoken Language Generation\"\n            data-category-ids=\"1059\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Glottal Source Modeling<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5251\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Student Reception\"\n            data-abs-nbr=\"\"\n            data-ystart=\"19\"\n            data-yend=\"24.5\"\n            data-time=\"19:00-00:30\"\n            data-room=\"20\"\n            data-room-id=\"1078\"\n            data-room-name=\"K\u00e4gelbanan, S\u00f6dra teatern\"\n            data-day=\"2\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;p&gt;&amp;nbsp;&lt;\/p&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Social event\"\n            data-category-ids=\"1067\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Student Reception<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>K\u00e4gelbanan, S\u00f6dra teatern<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  19:00-00:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#C9EE91; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5247\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Registration\"\n            data-abs-nbr=\"\"\n            data-ystart=\"7.75\"\n            data-yend=\"17\"\n            data-time=\"07:45-17:00\"\n            data-room=\"23\"\n            data-room-id=\"1112\"\n            data-room-name=\"S\u00f6dra Huset, House A\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"\"\n            data-category-ids=\"\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Registration<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  07:45-17:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4327\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Keynote 2: Catherine Pelachaud, Conversing with social agents that smile and laugh\"\n            data-abs-nbr=\"\"\n            data-ystart=\"8.5\"\n            data-yend=\"9.5\"\n            data-time=\"08:30-09:30\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"3\"\n            data-abs-path=\"\/abs\/4327.html\"\n            data-speaker=\"Catherine Pelachaud\"\n            data-speakercell=\"Catherine Pelachaud\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Bj\u00f6rn Granstr\u00f6m&lt;br&gt;&lt;br&gt;\n                 The session will also be broadcasted (with two-way communication) to rooms A2 and C6.\n                &lt;br&gt;&lt;br&gt;\"\n            data-category=\"Keynote\"\n            data-category-ids=\"1057\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Keynote 2: Catherine Pelachaud, Conversing with social agents that smile and laugh<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Aula Magna<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  08:30-09:30                <\/span><\/div>\n\n              <div class=\"lecturer\"><span>Catherine Pelachaud<\/span><\/div>\n            <\/div>\n\n\n                                <div style=\"background-color:#72D9EE; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5261\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Refreshments\"\n            data-abs-nbr=\"\"\n            data-ystart=\"9.5\"\n            data-yend=\"10\"\n            data-time=\"09:30-10:00\"\n            data-room=\"22\"\n            data-room-id=\"1079\"\n            data-room-name=\"Various locations\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Misc\"\n            data-category-ids=\"1068\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Refreshments<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Various locations<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  09:30-10:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4331\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Dialog and Prosody\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"5\"\n            data-room-id=\"1066\"\n            data-room-name=\"C6\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Julia Hirschberg; Rolf Carlson&lt;br&gt;&lt;br&gt;10.00-10.20 - Prosodic Event Recognition using Convolutional Neural Networks with Context Information&lt;br&gt;&lt;small&gt;Sabrina Stehwien; Ngoc Thang Vu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Prosodic Facilitation and Interference while Judging on the Veracity of Synthesized Statements&lt;br&gt;&lt;small&gt;Ramiro H. Galvez; \u0160tefan Be\u0148u\u0161; Agustin Gravano; Marian Trnka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - An investigation of pitch matching across adjacent turns in a corpus of spontaneous German&lt;br&gt;&lt;small&gt;Margaret Zellers; Antje Schweitzer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - The Relationship between F0 Synchrony and Speech Convergence in Dyadic Interaction&lt;br&gt;&lt;small&gt;Sankar Mukherjee; Alessandro D'Ausilio; No\u00ebl Nguyen; Luciano Fadiga; Leonardo Badino&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - The role of linguistic and prosodic cues on the prediction of self-reported satisfaction in contact centre phone calls&lt;br&gt;&lt;small&gt;Jordi Luque; Ariadna S\u00e1nchez; Carlos Segura; Mart\u00ed Umbert; Luis \u00c1ngel Galindo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Cross-linguistic study of the production of turn-taking cues in American English and Argentine Spanish&lt;br&gt;&lt;small&gt;Pablo Brusco; Agustin Gravano; Juan Manuel P\u00e9rez&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Spoken Dialog Systems and Analysis of Conversation\"\n            data-category-ids=\"1051\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Dialog and Prosody<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4332\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Social Signals, Styles, and Interaction\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"9\"\n            data-room-id=\"1062\"\n            data-room-name=\"D8\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Khiet Truong; Nigel Ward&lt;br&gt;&lt;br&gt;10.00-10.20 - Emotional Features for Speech Overlaps Classification&lt;br&gt;&lt;small&gt;Olga Egorow; Andreas Wendemuth&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Computing Multimodal Dyadic Behaviors during Spontaneous Diagnosis Interviews toward Automatic Categorization of Autism Spectrum Disorder&lt;br&gt;&lt;small&gt;Chin-Po Chen; Xian-Hong Tseng; Susan Shur-Fen Gau; Chi-Chun Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Deriving Dyad-Level Interaction Representation using Interlocutors Structural and Expressive Multimodal Behavior Features&lt;br&gt;&lt;small&gt;Yun-Shao Lin; Chi-Chun Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Spotting Social Signals in Conversational Speech over IP: A Deep Learning Perspective&lt;br&gt;&lt;small&gt;Raymond Brueckner; Maximilian Schmitt; Maja Pantic; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Optimized Time Series Filters for Detecting Laughter and Filler Events&lt;br&gt;&lt;small&gt;G\u00e1bor Gosztolya&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Visual, Laughter, Applause and Spoken Expression Features for Predicting Engagement within TED Talks.&lt;br&gt;&lt;small&gt;Fasih Haider; Fahim A. Salim; Saturnino Luz; Carl Vogel; Owen Conlan; Nick Campbell&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Paralinguistics in Speech and Language\"\n            data-category-ids=\"1052\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Social Signals, Styles, and Interaction<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4345\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speaker and Language Recognition Applications\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"17\"\n            data-room-id=\"1067\"\n            data-room-name=\"Poster 2\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mitchell McLaren&lt;br&gt;&lt;br&gt;10.00-12.00 - Acoustic Pairing of Original and Dubbed Voices in the Context of Video Game Localization&lt;br&gt;&lt;small&gt;Adrien Gresse; Mickael Rouvier; Richard Dufour; Vincent Labatut; Jean-Francois Bonastre&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - A Generative Model for Score Normalization in Speaker Recognition&lt;br&gt;&lt;small&gt;Albert Swart; Niko Brummer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Bidirectional Modelling for Short Duration Language Identification&lt;br&gt;&lt;small&gt;Sarith Fernando; Vidhyasaharan Sethu; Eliathamby Ambikairajah; Julien Epps&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Calibration Approaches for Language Detection&lt;br&gt;&lt;small&gt;Mitchell McLaren; Luciana Ferrer; Diego Castan; Aaron Lawson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Conditional Generative Adversarial Nets Classifier for Spoken Language Identification&lt;br&gt;&lt;small&gt;Peng Shen; Xugang Lu; Sheng Li; Hisashi Kawai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Domain Adaptation of PLDA models in Broadcast Diarization by means of Unsupervised Speaker Clustering&lt;br&gt;&lt;small&gt;Ignacio Vi\u00f1als; Alfonso Ortega; Jesus Villalba; Antonio Miguel; Eduardo Lleida Solano&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Homogeneity Measure Impact on Target and Non-target Trials in Forensic Voice Comparison&lt;br&gt;&lt;small&gt;Moez Ajili; Jean-Francois Bonastre; Waad Ben Kheder; Solange Rossato; Juliette Kahn&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - IITG-Indigo System for NIST 2016 SRE Challenge&lt;br&gt;&lt;small&gt;Nagendra Kumar; Rohan Kumar Das; Sarfaraz Jelil; Dhanush B K; Harish Kashyap; Sri Rama Murty Kodukula; Sriram Ganapathy; Rohit Sinha; S R Mahadeva Prasanna&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Locally Weighted Linear Discriminant Analysis for Robust Speaker Verification&lt;br&gt;&lt;small&gt;Abhinav Misra; Shivesh Ranjan; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - LSTM Neural Network-based Speaker Segmentation using Acoustic and Language Modelling&lt;br&gt;&lt;small&gt;Miquel Angel India Massana; Jos\u00e9 A. R. Fonollosa; Javier Hernando&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Null-Hypothesis LLR: A proposal for Forensic Automatic Speaker Recognition&lt;br&gt;&lt;small&gt;Yosef A. Solewicz; Michael Jessen; David van der Vloed&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Recursive Whitening Transformation for Speaker Recognition on Language Mismatched Condition&lt;br&gt;&lt;small&gt;Suwon Shon; Seongkyu Mun; Hanseok Ko&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Speaker Clustering by Iteratively Finding Discriminative Feature Space and Cluster Labels&lt;br&gt;&lt;small&gt;Sungrack Yun; Hye Jin Jang; Taesu Kim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - The Opensesame NIST 2016 Speaker Recognition Evaluation System&lt;br&gt;&lt;small&gt;Gang Liu; Qi Qian; Zhibin Wang; Qingen Zhao; Tianzhou Wang; Hao Li; Jian Xue; Shenghuo Zhu; Rong Jin; Tuo Zhao&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Tied Hidden Factors in Neural Networks for End-to-End Speaker Recognition&lt;br&gt;&lt;small&gt;Antonio Miguel; Jorge Llombart; Alfonso Ortega; Eduardo Lleida Solano&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speaker and Language Identification\"\n            data-category-ids=\"1054\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speaker and Language Recognition Applications<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4359\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Data Collection, Transcription and Annotation Issues in Child Language Acquisition\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"14\"\n            data-room-id=\"1059\"\n            data-room-name=\"F11\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Elika Bergelson; Sho Tsuji&lt;br&gt;&lt;br&gt;10.00-10.20 - Top-down versus bottom-up theories of phonological acquisition: A big data approach&lt;br&gt;&lt;small&gt;Christina Bergmann; Sho Tsuji; Alejandrina Cristia&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - What do babies hear? Analyses of child- and adult-directed speech&lt;br&gt;&lt;small&gt;Marisa  Casillas; Andrei Amatuni; Amanda Seidl; Melanie Soderstrom; Anne Warlaumont; Elika Bergelson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - The LENA system applied to Swedish: Reliability of the Adult Word Count estimate&lt;br&gt;&lt;small&gt;Iris-Corinna Schwarz; Noor Botros; Alekzandra Lord; Amelie Marcusson; Henrik Tidelius; Ellen Marklund&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Which acoustic and phonological factors shape infants' vowel discrimination? Exploiting natural variation in InPhonDB&lt;br&gt;&lt;small&gt;Sho Tsuji; Alejandrina Cristia&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - A New Workflow for Semi-automatized Annotations: Tests with Long-Form Naturalistic Recordings of Children\u2019s Language Environments&lt;br&gt;&lt;small&gt;Marisa  Casillas; Elika Bergelson; Anne S. Warlaumont; Alejandrina Cristia; Melanie Soderstrom; Mark VanDam; Han Sloetjes&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - SLPAnnotator: Tools for implementing Sign Language Phonetic Annotation&lt;br&gt;&lt;small&gt;Kathleen Currie Hall; Scott Mackie; Michael Fry; Oksana Tkachman&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Perception, Production and Acquisition\"\n            data-category-ids=\"1055\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Data Collection, Transcription and Annotation Issues in Child Language Acquisition<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4346\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Spoken Document Processing\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"18\"\n            data-room-id=\"1069\"\n            data-room-name=\"Poster 3\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Tanja Schultz&lt;br&gt;&lt;br&gt;10.00-12.00 - A relevance score estimation for spoken term detection based on RNN-generated pronunciation embeddings&lt;br&gt;&lt;small&gt;Jan \u0160vec; Josef V. Psutka; Lubo\u0161 \u0160m\u00eddl; Jan Trmal&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Automatic Alignment between Classroom Lecture Utterances and Slide Components&lt;br&gt;&lt;small&gt;Masatoshi Tsuchiya; Ryo Minamiguchi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Compensating Gender Variability in Query-by-Example Search on Speech Using Voice Conversion&lt;br&gt;&lt;small&gt;Paula Lopez-Otero; Laura Docio-Fernandez; Carmen Garcia-Mateo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Constructing Acoustic Distances between Subwords and States Obtained from a Deep Neural Network for Spoken Term Detection&lt;br&gt;&lt;small&gt;Daisuke Kaneko; Kazunori Kojima; Kazuyo Tanaka; Shi-wook Lee; Yoshiaki Itoh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Evaluating automatic topic segmentation as a segment retrieval task&lt;br&gt;&lt;small&gt;Abdessalam Bouchekif; Delphine Charlet; Geraldine Damnati; Nathalie Camelin; Yannick Est\u00e8ve&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Exploring the Use of Significant Words Language Modeling for Spoken Document Retrieval&lt;br&gt;&lt;small&gt;Ying-Wen Chen; Kuan-Yu Chen; Hsin-Min Wang; Berlin Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Fast and Accurate OOV Decoder on High-Level Features&lt;br&gt;&lt;small&gt;Yuri Khokhlov; Natalia Tomashenko; Ivan Medennikov; Aleksei Romanenko&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Hierarchical Recurrent Neural Network for Story Segmentation&lt;br&gt;&lt;small&gt;Emiru Tsunoo; Peter Bell; Steve Renals&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Improving Speech Recognizers by Refining Broadcast Data with Inaccurate Subtitle Timestamps&lt;br&gt;&lt;small&gt;Jeong-Uk Bang; Mu-Yeol Choi; Sang-Hun Kim; Oh-Wook Kwon&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Incorporating Acoustic Features for Spontaneous Speech driven Content Retrieval&lt;br&gt;&lt;small&gt;Hiroto Tasaki; Tomoyosi Akiba&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Order-Preserving Abstractive Summarization for Spoken Content Based on Connectionist Temporal Classification&lt;br&gt;&lt;small&gt;Bo Ru Lu; Frank Shyu; Yun-Nung Chen; Hung-yi Lee; Lin-shan Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Query-by-Example Search with Discriminative Neural Acoustic Word Embeddings&lt;br&gt;&lt;small&gt;Shane Settle; Keith Levin; Herman Kamper; Karen Livescu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Zero-Shot Learning across Heterogenous Overlapping Domains&lt;br&gt;&lt;small&gt;Anjishnu Kumar; Pavankumar Muddireddy; Markus Dreyer; Bjorn Hoffmeister&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n            data-category-ids=\"1053\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Spoken Document Processing<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4360\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Digital Revolution for Under-resourced Languages 1\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"0\"\n            data-room-id=\"1064\"\n            data-room-name=\"A2\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Alexey Karpov; Kristiina Jokinen&lt;br&gt;&lt;br&gt;10.00-10.20 - Team ELISA System for DARPA LORELEI Speech Evaluation 2016&lt;br&gt;&lt;small&gt;Pavlos Papadopoulos; Ruchir Travadi; Colin Vaz; Nikolaos Malandrakis; Ulf Hermjakob; Nima Pourdamghani; Michael Pust; Boliang Zhang; Xiaoman Pan; Di Lu; Ying Lin; Ondrej Glembek; Murali Karthick B; Martin Karafiat; Lukas Burget; Mark Hasegawa-Johnson; Heng Ji; Jonathan May; Kevin Knight; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - First Results in Developing a Medieval Latin Language Charter Dictation System for the East-Central Europe Region&lt;br&gt;&lt;small&gt;Peter Mihajlik; Lili Szabo; Balazs Tarjan; Andras Balog; Krisztina Rabai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - The motivation and development of MPAi, a M\u0101ori Pronunication Aid.&lt;br&gt;&lt;small&gt;Catherine Watson; Peter Keegan; Margaret Maclagan; Ray Harlow; Jeanette King&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - On the Linguistic Relevance of Speech Units Learned by Unsupervised Acoustic Modeling&lt;br&gt;&lt;small&gt;Siyuan Feng; Tan Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Deep Autoencoder Based Multi-task Learning Using Probabilistic Transcriptions&lt;br&gt;&lt;small&gt;Amit Das; Mark Hasegawa-Johnson; Karel Vesely&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Areal and Phylogenetic Features for Multilingual Speech Synthesis&lt;br&gt;&lt;small&gt;Alexander Gutkin; Richard Sproat&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n            data-category-ids=\"1053\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Digital Revolution for Under-resourced Languages 1<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4344\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech Recognition: Technologies for New Applicaitions and Paradigms\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"16\"\n            data-room-id=\"1061\"\n            data-room-name=\"Poster 1\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Kris Demuynck&lt;br&gt;&lt;br&gt;10.00-12.00 - A Mostly Data-driven Approach to Inverse Text Normalization&lt;br&gt;&lt;small&gt;Ernest Pusateri; Bharat Ambati; Elizabeth Brooks; Ondrej Platek; Donald McAllaster; Venki Nagesha&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Automatic Evaluation of Children Reading Aloud on Sentences and Pseudowords&lt;br&gt;&lt;small&gt;Jorge Proen\u00e7a; Carla Lopes; Michael Tjalve; Andreas Stolcke; Sara Candeias; Fernando Perdig\u00e3o&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Automatic Explanation Spot Estimation Method Targeted at Text and Figures in Lecture Slides&lt;br&gt;&lt;small&gt;Shoko Tsujimura; Kazumasa Yamamoto; Seiichi Nakagawa&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Comparison of Non-parametric Bayesian Mixture Models for Syllable Clustering and Zero-Resource Speech Processing&lt;br&gt;&lt;small&gt;Shreyas Seshadri; Ulpu Remes; Okko R\u00e4s\u00e4nen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Developing On-Line Speaker Diarization System&lt;br&gt;&lt;small&gt;Dimitrios Dimitriadis; Petr Fousek&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Distilling Knowledge from an Ensemble of Models for Punctuation Prediction&lt;br&gt;&lt;small&gt;Jiangyan Yi; Jianhua Tao; Zhengqi Wen; Ya Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Experiments in Character-level Neural Network Models for Punctuation&lt;br&gt;&lt;small&gt;William Gale; Sarangarajan Parthasarathy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Improving Mispronunciation Detection for Non-Native Learners with Multisource Information and LSTM-Based Deep Models&lt;br&gt;&lt;small&gt;Wei Li; Nancy F Chen; Sabato Marco Siniscalchi; Chin-Hui Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Mismatched Crowdsourcing From Multiple Annotator Languages For Recognizing Zero-resourced Languages: A Nullspace Clustering Approach&lt;br&gt;&lt;small&gt;Wenda Chen; Mark Hasegawa-Johnson; Nancy Chen; Boon Pang Lim&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Multi-Channel Apollo Mission Speech Transcript Calibration&lt;br&gt;&lt;small&gt;Lakshmish Kaushik; Abhijeet Sangwan; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Multiview Representation Learning via Deep CCA for Silent Speech Recognition&lt;br&gt;&lt;small&gt;Myungjong Kim; Beiming Cao; Ted Mau; Jun Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Off-topic Spoken Response Detection with Word Embeddings&lt;br&gt;&lt;small&gt;Su-Youn Yoon; Chong Min Lee; Ikkyu Choi; Xinhao Wang; Matthew Mulholland; Keelan Evanini&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Use of Graphemic Lexicons for Spoken Language Assessment&lt;br&gt;&lt;small&gt;Kate Knill; Mark Gales; Kostas Kyriakopoulos; Anton Ragni; Yu Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Technologies and Systems for New Applications\"\n            data-category-ids=\"1066\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech Recognition: Technologies for New Applicaitions and Paradigms<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4347\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech Intelligibility\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"19\"\n            data-room-id=\"1068\"\n            data-room-name=\"Poster 4\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Prasanta Ghosh&lt;br&gt;&lt;br&gt;10.00-12.00 - Intelligibilities of Mandarin Chinese Sentences with Spectral \u201cHoles\u201d&lt;br&gt;&lt;small&gt;Yafan Chen; Yong Xu; Jun Yang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Listening in the dips: Comparing relevant features for speech recognition in humans and machines&lt;br&gt;&lt;small&gt;Constantin Spille; Bernd T. Meyer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - On the use of Band Importance Weighting in the Short-Time Objective Intelligibility Measure&lt;br&gt;&lt;small&gt;Asger Heidemann Andersen; Jan Mark de Haan; Zheng-Hua Tan; Jesper Jensen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Predicting Automatic Speech Recognition Performance over Communication Channels from Instrumental Speech Quality and Intelligibility Scores&lt;br&gt;&lt;small&gt;Laura Fern\u00e1ndez Gallardo; Sebastian M\u00f6ller; John Beerends&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Predicting Speech Intelligibility Using a Gammachirp Envelope Distortion Index Based on the Signal-to-Distortion Ratio&lt;br&gt;&lt;small&gt;Katsuhiko Yamamoto; Toshio Irino; Toshie Matsui; Shoko Araki; Keisuke Kinoshita; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Speech intelligibility in cars: the effect of speaking style, noise and listener age&lt;br&gt;&lt;small&gt;Cassia Valentini-Botinhao; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - The effect of situation-specific non-speech acoustic cues on the intelligibility of speech in noise&lt;br&gt;&lt;small&gt;Lauren Ward; Ben Shirley; Yan Tang; William Davies&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Coding and Enhancement\"\n            data-category-ids=\"1060\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech Intelligibility<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4328\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech Production and Physiology\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Felicitas Kleber; Elizabeth Godoy&lt;br&gt;&lt;br&gt;10.00-10.20 - Aerodynamic features of French fricatives&lt;br&gt;&lt;small&gt;Rosario Signorello; Sergio Hassid; Didier Demolin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Inter-speaker variability: speaker normalisation and quantitative estimation of articulatory invariants in speech production for French&lt;br&gt;&lt;small&gt;Antoine Serrurier; Pierre Badin; Louis-Jean Boe; Laurent Lamalle; Christiane Neuschaefer-Rube&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Comparison of Basic Beatboxing Articulations between Expert and Novice Artists using Real-Time Magnetic Resonance Imaging&lt;br&gt;&lt;small&gt;Nimisha Patil; Timothy Greer; Reed Blaylock; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Speaker-specific Biomechanical Model-based Investigation of a Simple Speech Task based on Tagged-MRI&lt;br&gt;&lt;small&gt;Keyi Tang; Negar Mohaghegh Harandi; Jonghye Woo; Georges El Fakhri; Maureen Stone; Sidney Fels&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Sounds of the Human Vocal Tract&lt;br&gt;&lt;small&gt;Reed Blaylock; Nimisha Patil; Timothy Greer; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - A simulation study on the effect of glottal boundary conditions on vocal tract formants&lt;br&gt;&lt;small&gt;Yasufumi Uezu; Tokihiko Kaburagi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Perception, Production and Acquisition\"\n            data-category-ids=\"1055\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech Production and Physiology<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4355\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Show &amp; Tell 5\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"11\"\n            data-room-id=\"1070\"\n            data-room-name=\"E306\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"10.00-12.00 - A Thematicity-based Prosody Enrichment Tool for CTS&lt;br&gt;&lt;small&gt;Monica Dominguez; Mireia Farr\u00fas; Leo Wanner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Creating a Voice for MiRo, the World\u2019s First Commercial Biomimetic Robot&lt;br&gt;&lt;small&gt;Roger Moore; Ben Mitchinson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - SIAK - A Game for Foreign Language Pronunciation Learning&lt;br&gt;&lt;small&gt;Reima Karhila; Sari Ylinen; Seppo Enarvi; Kalle Palom\u00e4ki; Aleksander Nikulin; Olli Rantula; Vertti Viitanen; Krupakar Dhinakaran; Anna-Riikka Smolander; Heini Kallio; Maria Uther; Katja Junttila; Perttu H\u00e4m\u00e4l\u00e4inen; Mikko Kurimo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - TBT(Toolkit to Build TTS): A High Performance Framework to build Multiple Language HTS Voice&lt;br&gt;&lt;small&gt;Atish Ghone; Rachana Nerpagar; Pranaw Kumar; Arun Baby; Aswin Shanmugam; Sasikumar Mukundan; Hema Murthy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - WebSubDub - Experimental system for creating high-quality alternative audio track for TV broadcasting&lt;br&gt;&lt;small&gt;Martin Gr\u016fber; Jindrich Matousek; Zden\u011bk Hanzl\u00ed\u010dek; Jakub V\u00edt; Daniel Tihelka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Voice Conservation and TTS System for People Facing Total Laryngectomy&lt;br&gt;&lt;small&gt;Mark\u00e9ta J\u016fzov\u00e1; Daniel Tihelka; Jindrich Matousek; Zdenek Hanzlicek&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Show & Tell\"\n            data-category-ids=\"1063\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Show & Tell 5<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#EEA2A2; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4329\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Acoustic Model Adaptation\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"10\"\n            data-room-id=\"1060\"\n            data-room-name=\"E10\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Catherine Breslin; George Saon&lt;br&gt;&lt;br&gt;10.00-10.20 - Large-Scale Domain Adaptation via Teacher-Student Learning&lt;br&gt;&lt;small&gt;Jinyu Li; Michael Seltzer; Xi Wang; Rui Zhao; Yifan Gong&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Improving Children's Speech Recognition through Explicit Pitch Scaling based on Iterative Spectrogram Inversion&lt;br&gt;&lt;small&gt;Waquar Ahmad; Syed Shahnawazuddin; Hemant Kumar Kathania; Gayadhar Pradhan; A. B. Samaddar&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - RNN-LDA Clustering for Feature Based DNN Adaptation&lt;br&gt;&lt;small&gt;Xurong Xie; Xunying Liu; Tan Lee; Lan Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Robust online i-vectors for unsupervised adaptation of DNN acoustic models: A study in the context of digital voice assistants&lt;br&gt;&lt;small&gt;Harish Arsikere; Sri Garimella&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Semi-supervised Learning with Semantic Knowledge Extraction for Improved Speech Recognition in Air Traffic Control&lt;br&gt;&lt;small&gt;Ajay Srinivasamurthy; Petr Motlicek; Ivan Himawan; Gyorgy Szaszak; Youssef Oualil; Hartmut Helmke&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Dynamic Layer Normalization for Adaptive Neural Acoustic Modeling in Speech Recognition&lt;br&gt;&lt;small&gt;Taesup Kim; Inchul Song; Yoshua Bengio&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n            data-category-ids=\"1058\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Acoustic Model Adaptation<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4330\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech and Harmonic Analysis\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"3\"\n            data-room-id=\"1065\"\n            data-room-name=\"B4\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Abeer Alwan; Franz Pernkopf&lt;br&gt;&lt;br&gt;10.00-10.20 - A robust and alternative approach to zero frequency filtering method for epoch extraction&lt;br&gt;&lt;small&gt;Gangamohan Paidi; Bayya Yegnanarayana&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Improving YANGsaf F0 Estimator with Adaptive Kalman Filter&lt;br&gt;&lt;small&gt;Kanru Hua&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - A Spectro-Temporal Demodulation Technique for Pitch Estimation&lt;br&gt;&lt;small&gt;Jitendra Dhiman; Nagaraj Adiga; Chandra Sekhar Seelamantula&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Robust method for estimating F0 of complex tone based on pitch perception of amplitude modulated signal&lt;br&gt;&lt;small&gt;Kenichiro Miwa; Masashi Unoki&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Low-Complexity Pitch Estimation Based on Phase Differences Between Low-Resolution Spectra&lt;br&gt;&lt;small&gt;Simon Graf; Tobias Herbig; Markus Buck; Gerhard Schmidt&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Harvest: A high-performance fundamental frequency estimator from speech signals&lt;br&gt;&lt;small&gt;Masanori Morise&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Speech and Audio Signals\"\n            data-category-ids=\"1062\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech and Harmonic Analysis<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4356\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Show &amp; Tell 6\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"12\"\n            data-room-id=\"1071\"\n            data-room-name=\"E397\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"10.00-12.00 - A Robust Medical Speech-to-Speech\/Speech-to-Sign Phraselator&lt;br&gt;&lt;small&gt;Farhia Ahmed; Pierrette Bouillon; Chelle Destefano; Johanna Gerlach; Sonia Halimi; Angela Hooper; Manny Rayner; Herv\u00e9 Spechbach; Irene Strasly; Nikos Tsourakis&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Integrating the Talkamatic Dialogue Manager with Alexa&lt;br&gt;&lt;small&gt;Staffan Larsson; Fredrik Kronlid; Andreas Krona; Alex Berman&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Modifying Amazon\u2019s Alexa ASR Grammar and Lexicon \u2013 A Case Study&lt;br&gt;&lt;small&gt;Aman Kumar; Hassan Alam; Manan Vyas; Tina Werner; Rachmat Hartono&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Nora the Empathetic Psychologist&lt;br&gt;&lt;small&gt;Genta Indra Winata; Onno Kampman; Yang Yang; Anik Dey; Pascale Fung&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Towards an Autarkic Embedded Cognitive User Interface&lt;br&gt;&lt;small&gt;Frank Duckhorn; Markus Huber; Werner Meyer; Oliver Jokisch; Constanze Tsch\u00f6pe; Matthias Wolff&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Show & Tell\"\n            data-category-ids=\"1063\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Show & Tell 6<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#EEA2A2; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5253\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Lunch\"\n            data-abs-nbr=\"\"\n            data-ystart=\"12\"\n            data-yend=\"13.5\"\n            data-time=\"12:00-13:30\"\n            data-room=\"22\"\n            data-room-id=\"1079\"\n            data-room-name=\"Various locations\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Misc\"\n            data-category-ids=\"1068\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Lunch<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Various locations<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  12:00-13:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4348\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Articulatory and Acoustic Phonetics\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"17\"\n            data-room-id=\"1067\"\n            data-room-name=\"Poster 2\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mattias Heldner&lt;br&gt;&lt;br&gt;13.30-15.30 - Acoustic cues to the singleton-geminate contrast: the case of Libyan Arabic sonorants&lt;br&gt;&lt;small&gt;Amel Issa&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - An ultrasound study of alveolar and retroflex consonants in Arrernte: stressed and unstressed syllables&lt;br&gt;&lt;small&gt;Marija Tabain; Richard Beare&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - A Preliminary Phonetic Investigation of Alphabetic Words in Mandarin Chinese&lt;br&gt;&lt;small&gt;Hongwei Ding; Yuanyuan Zhang; Hongchao Liu; Chu-Ren Huang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - A Quantitative Measure of the Impact of Coarticulation on Phone Discriminability&lt;br&gt;&lt;small&gt;Thomas Schatz; Rory Turnbull; Francis Bach; Emmanuel Dupoux&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Effect of formant and F0 discontinuity on perceived vowel duration: Impacts for concatenative speech synthesis&lt;br&gt;&lt;small&gt;Tom\u00e1\u0161 Bo\u0159il; Pavel \u0160turm; Radek Skarnitzl; Jan Vol\u00edn&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Kinematic signatures of prosody in Lombard speech&lt;br&gt;&lt;small&gt;\u0160tefan Be\u0148u\u0161; Juraj \u0160imko; Mona Lehtinen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Locating burst onsets using SFF envelope and phase information&lt;br&gt;&lt;small&gt;Bhanu Teja Nellore; RaviShankar Prasad; Sudarsana Reddy Kadiri; Suryakanth V Gangashetty; Bayya Yegnanarayana&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Mel-cepstral distortion of German vowels in different information density contexts&lt;br&gt;&lt;small&gt;Erika Brandt; Frank Zimmerer; Bistra Andreeva; Bernd M\u00f6bius&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Mental Representation of Japanese Mora: focusing on intrinsic duration&lt;br&gt;&lt;small&gt;Kosuke Sugai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Reshaping the transformed LF model: generating the glottal source from the waveshape parameter Rd&lt;br&gt;&lt;small&gt;Christer Gobl&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Temporal Dynamics of Lateral Channel Formation in \/l\/: 3D EMA Data from Australian English&lt;br&gt;&lt;small&gt;Jia Ying; Christopher Carignan; Jason Shaw; Michael Proctor; Donald Derrick; Catherine Best&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - What do Finnish and Central Bavarian have in common? Towards an acoustically based quantity typology&lt;br&gt;&lt;small&gt;Markus Jochim; Felicitas Kleber&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Vowel and Consonant Sequences in three Bavarian varieties in Austria&lt;br&gt;&lt;small&gt;Nicola Klingler; Sylvia Moosm\u00fcller; Hannes Scheutz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Phonetics, Phonology, and Prosody\"\n            data-category-ids=\"1056\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Articulatory and Acoustic Phonetics<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4361\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Digital Revolution for Under-resourced Languages 2\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"16\"\n            data-room-id=\"1061\"\n            data-room-name=\"Poster 1\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Shyam Agrawal; Oddur Kjartansson&lt;br&gt;&lt;br&gt;13.30-15.30 - Building an ASR corpus using Althingi's Parliamentary Speeches&lt;br&gt;&lt;small&gt;Inga R\u00fan Helgad\u00f3ttir; R\u00f3bert Kjaran; Anna Bj\u00f6rk Nikul\u00e1sd\u00f3ttir; Jon Gudnason&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Building ASR corpora using Eyra&lt;br&gt;&lt;small&gt;Jon Gudnason; Matth\u00edas P\u00e9tursson; R\u00f3bert Kjaran; Simon Kluepfel; Anna Nikul\u00e1sd\u00f3ttir&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Eliciting meaningful units from speech&lt;br&gt;&lt;small&gt;Daniil Kocharov; Tatiana Kachkovskaia; Pavel Skrelin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Extracting Situation Frames from non-English Speech: Evaluation Framework and Pilot Results&lt;br&gt;&lt;small&gt;Nikolaos Malandrakis; Ondrej Glembek; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Implementation of a Radiology Speech Recognition System for Estonian using Open Source Software&lt;br&gt;&lt;small&gt;Tanel Alum\u00e4e; Andrus Paats; Ivo Fridolin; Einar Meister&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Improving DNN Bluetooth Narrowband Acoustic Models by Cross-bandwidth and Cross-lingual Initialization&lt;br&gt;&lt;small&gt;Xiaodan Zhuang; Arnab Ghoshal; Antti-Veikko Rosti; Matthias Paulik; Daben Liu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Joint Estimation of Articulatory Features and Acoustic models for Low-Resource Languages&lt;br&gt;&lt;small&gt;Basil Abraham; Srinivasan Umesh; Neethu Mariam Joy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Leveraging Text Data for Word Segmentation for Underresourced Languages&lt;br&gt;&lt;small&gt;Thomas Glarner; Benedikt Boenninghoff; Oliver Walter; Reinhold Haeb-Umbach&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Machine Assisted Analysis of Vowel Length Contrasts in Wolof&lt;br&gt;&lt;small&gt;Elodie Gauthier; Laurent Besacier; Sylvie Voisin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Nativization of foreign names in TTS for automatic reading of world news in Swahili&lt;br&gt;&lt;small&gt;Joseph Mendelson; Pilar Oplustil; Oliver Watts; Simon King&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Panelist poster 1&lt;br&gt;&lt;small&gt;Claudia Soria&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Panelist poster 2&lt;br&gt;&lt;small&gt;Alexey Karpov&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Panelist poster 3&lt;br&gt;&lt;small&gt;Emmanuel Dupoux&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Panelist poster 4&lt;br&gt;&lt;small&gt;Mary Harper&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Panelist poster 5&lt;br&gt;&lt;small&gt;Sebastian Stueker&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Panelist poster 6&lt;br&gt;&lt;small&gt;Sanjeev Khudanpur&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Panelist poster 7&lt;br&gt;&lt;small&gt;Linne Ha&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Rapid development of TTS corpora for four South African languages&lt;br&gt;&lt;small&gt;Daniel Van Niekerk; Charl Van Heerden; Marelie Davel; Neil Kleynhans; Oddur Kjartansson; Martin Jansche; Linne Ha&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - The ABAIR initiative: Bringing Spoken Irish into the Digital Space&lt;br&gt;&lt;small&gt;Ailbhe N\u00ed Chasaide; Neasa N\u00ed Chiar\u00e1in; Christoph Wendler; Harald Berthelsen; Andy Murphy; Christer Gobl&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Transfer Learning and Distillation Techniques to Improve the Acoustic Modeling of Low Resource Languages&lt;br&gt;&lt;small&gt;Basil Abraham; Tejaswi Seeram; Srinivasan Umesh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Uniform Multilingual Multi-Speaker Acoustic Model for Statistical Parametric Speech Synthesis of Low-Resourced Languages&lt;br&gt;&lt;small&gt;Alexander Gutkin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Unsupervised Speech Signal to Symbol Transformation for Zero Resource Speech Applications&lt;br&gt;&lt;small&gt;Saurabhchand Bhati; Shekhar Nayak; Sri Rama Murty Kodukula&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Very low resource radio browsing for agile developmental and humanitarian monitoring&lt;br&gt;&lt;small&gt;Armin Saeb; Raghav Menon; Hugh Cameron; William Kibira; John Quinn; Thomas Niesler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n            data-category-ids=\"1053\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Digital Revolution for Under-resourced Languages 2<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4350\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Disorders Related to Speech and Language\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"19\"\n            data-room-id=\"1068\"\n            data-room-name=\"Poster 4\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Jan Rusz&lt;br&gt;&lt;br&gt;13.30-15.30 - Acoustic evaluation of nasality in cerebellar syndromes&lt;br&gt;&lt;small&gt;Michal Novotn\u00fd; Jan Rusz; Karel Sp\u00e1lenka; Ji\u0159\u00ed Klemp\u00ed\u0159; Dana Hor\u00e1kov\u00e1; Ev\u017een R\u016f\u017ei\u010dka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - An Affect Prediction Approach through Depression Severity Parameter Incorporation in Neural Networks&lt;br&gt;&lt;small&gt;Rahul Gupta; Saurabh Sahu; Carol Espy-Wilson; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - An avatar-based system for identifying individuals likely to develop dementia&lt;br&gt;&lt;small&gt;Bahman Mirheidari; Daniel Blackburn; Kirsty Harkness; Traci Walker; Annalena Venneri; Markus Reuber; Heidi Christensen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - An N-Gram Based Approach to the Automatic Diagnosis of Alzheimer's Disease from Spoken Language&lt;br&gt;&lt;small&gt;Sebastian Wankerl; Elmar Noeth; Stefan Evert&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Cross-Database Models for the Classification of Dysarthria Presence&lt;br&gt;&lt;small&gt;Stephanie Gillespie; Yash-Yee Logan; Elliot Moore; Jacqueline Laures-Gore; Scott Russell; Rupal Patel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Cross-Domain Classification of Drowsiness in Speech: The Case of Alcohol Intoxication and Sleep Deprivation&lt;br&gt;&lt;small&gt;Yue Zhang; Felix Weninger; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Depression Detection Using Automatic Transcriptions of De-Identified Speech&lt;br&gt;&lt;small&gt;Paula Lopez-Otero; Laura Docio-Fernandez; Alberto Abad; Carmen Garcia-Mateo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Emotional Speech of Mentally and Physically Disabled Individuals: Introducing The EmotAsS Database and First Findings&lt;br&gt;&lt;small&gt;Simone Hantke; Hesam Sagha; Nicholas Cummins; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Exploiting Intra-annotator Rating Consistency through Copeland's Method for Estimation of Ground Truth Labels in Couples' Therapy&lt;br&gt;&lt;small&gt;Karel Mundnich; Md Nasir; Panayiotis Georgiou; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Float Like a Butterfly Sting Like a Bee: Changes in Speech Preceded Parkinsonism Diagnosis for Muhammad Ali&lt;br&gt;&lt;small&gt;Visar Berisha; Julie Liss; Timothy Huston; Alan Wisler; Yishan Jiao; Jonathan Eig&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Manual and Automatic Transcriptions in Dementia Detection from Speech&lt;br&gt;&lt;small&gt;Jochen Weiner; Mathis Engelbart; Tanja Schultz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Phonological markers of Oxytocin and MDMA ingestion&lt;br&gt;&lt;small&gt;Carla Agurto; Raquel Norel; Rachel Ostrand; Gillinder Bedi; Harriet de Wit; Matthew J. Baggott; Matthew G. Kirkpatrick; Margaret Wardle; Guillermo Cecchi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Rhythmic Characteristics of Parkinsonian Speech: A Study on Mandarin and Polish&lt;br&gt;&lt;small&gt;Massimo Pettorino; Wentao Gu; Pawe\u0142 P\u00f3\u0142rola; Ping Fan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Paralinguistics in Speech and Language\"\n            data-category-ids=\"1052\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Disorders Related to Speech and Language<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4335\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Noise Robust Speech Recognition\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"0\"\n            data-room-id=\"1064\"\n            data-room-name=\"A2\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Yifan Gong; Izhak Shafran&lt;br&gt;&lt;br&gt;13.30-13.50 - Speech Representation Learning Using Unsupervised Data-Driven Modulation Filtering for Robust ASR&lt;br&gt;&lt;small&gt;Purvi Agrawal; Sriram Ganapathy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Combined Multi-channel NMF-based Robust Beamforming for Noisy Speech Recognition&lt;br&gt;&lt;small&gt;Masato Mimura; Yoshiaki Bando; Kazuki Shimada; Shinsuke Sakai; Kazuyoshi Yoshii; Tatsuya Kawahara&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Recognizing Multi-talker Speech with Permutation Invariant Training&lt;br&gt;&lt;small&gt;Dong Yu; Xuankai Chang; Yanmin Qian&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Coupled initialization of multi-channel non-negative matrix factorization based on spatial and spectral information&lt;br&gt;&lt;small&gt;Yuuki Tachioka; Tomohiro Narita; Iori Miura; Takanobu Uramoto; Natsuki Monta; Shingo Uenohara; Ken'ichi Furuya; Shinji Watanabe; Jonathan Le Roux&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Channel Compensation in the Generalised Vector Taylor Series Approach to Robust ASR&lt;br&gt;&lt;small&gt;Erfan Loweimi; Jon Barker; Thomas Hain&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Robust Speech Recognition Via Anchor Word Representations&lt;br&gt;&lt;small&gt;Brian King; I-Fan Chen; Yonatan Vaizman; Yuzong Liu; Roland Maas; SHK (Hari) Parthasarathi; Bjorn Hoffmeister&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n            data-category-ids=\"1058\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Noise Robust Speech Recognition<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4349\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Music and Audio Processing\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"18\"\n            data-room-id=\"1069\"\n            data-room-name=\"Poster 3\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Unto Laine; Rohit Sinha&lt;br&gt;&lt;br&gt;13.30-15.30 - Acoustic Scene Classification using a CNN-SuperVector system trained with Auditory and Spectrogram Image Features&lt;br&gt;&lt;small&gt;Rakib Hyder; Shabnam Ghaffarzadegan; Zhe Feng; John H.L. Hansen; Taufiq Hasan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - A Domain Knowledge-Assisted Nonlinear Model for Head-Related Transfer Functions Based on Bottleneck Deep Neural Network&lt;br&gt;&lt;small&gt;Xiaoke Qi; Jianhua Tao&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - An audio based piano performance evaluation method using deep neural network based acoustic modeling&lt;br&gt;&lt;small&gt;Jing Pan; Ming Li; Zhanmei Song; Xin Li; Xiaolin Liu; Hua Yi; Manman Zhu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - AN ENVIRONMENTAL FEATURE REPRESENTATION FOR ROBUST SPEECH RECOGNITION AND FOR ENVIRONMENT IDENTIFICATION&lt;br&gt;&lt;small&gt;Xue Feng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - A Note Based Query By Humming System using Convolutional Neural Network&lt;br&gt;&lt;small&gt;Naziba Mostafa; Pascale Fung&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - A Transfer Learning Based Feature Extractor for Polyphonic Sound Event Detection Using Connectionist Temporal Classification&lt;br&gt;&lt;small&gt;Yun Wang; Florian Metze&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Attention and Localization based on a Deep Convolutional Recurrent Model for Weakly Supervised Audio Tagging&lt;br&gt;&lt;small&gt;Yong Xu; Qiuqiang Kong; Qiang Huang; Wenwu Wang; Mark D. Plumbley&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Audio Scene Classification with Deep Recurrent Neural Networks&lt;br&gt;&lt;small&gt;Huy Phan; Philipp Koch; Fabrice Katzberg; Marco Maass; Radoslaw Mazur; Alfred Mertins&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Automatic time-frequency analysis of echolocation signals using the matched Gaussian multitaper spectrogram&lt;br&gt;&lt;small&gt;Maria Sandsten; Isabella Reinhold; Josefin Starkhammar&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Classification-Based Detection of Glottal Closure Instants from Speech Signals&lt;br&gt;&lt;small&gt;Jindrich Matousek; Daniel Tihelka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Laryngeal Articulation during Trumpet Performance: An Exploratory Study&lt;br&gt;&lt;small&gt;Luis M.T. Jesus; Bruno Rocha; Andreia Hall&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Matrix of Polynomials Model based Polynomial Dictionary Learning Method for Acoustic Impulse Response Modeling&lt;br&gt;&lt;small&gt;Jian Guan; Xuan Wang; Pengming Feng; Jing Dong; Wenwu Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Music Tempo Estimation Using Sub-band Synchrony&lt;br&gt;&lt;small&gt;Shreyan Chowdhury; Tanaya Guha; Rajesh Hegde&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Novel Shifted Real Spectrum for Exact Signal Reconstruction&lt;br&gt;&lt;small&gt;Meet Soni; Rishabh Tak; Hemant Patil&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Sinusoidal Partials Tracking for Singing Analysis Using the Heuristic of the Minimal Frequency and Magnitude Difference&lt;br&gt;&lt;small&gt;Kin Wah Edward Lin; Hans Anderson; Clifford So; Simon Lui&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Unsupervised Filterbank Learning Using Convolutional Restricted Boltzmann Machine for Environmental Sound Classification&lt;br&gt;&lt;small&gt;Hardik Sailor; Dharmesh Agrawal; Hemant Patil&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Speech and Audio Signals\"\n            data-category-ids=\"1062\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Music and Audio Processing<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4334\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Language Recognition\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"10\"\n            data-room-id=\"1060\"\n            data-room-name=\"E10\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Yao Qian; Vidhyasaharan Sethu&lt;br&gt;&lt;br&gt;13.30-13.50 - Spoken Language Identification using LSTM-based Angular Proximity&lt;br&gt;&lt;small&gt;Gregory Gelly; Jean-Luc Gauvain&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - End-to-End Language Identification Using High-Order Utterance Representation with Bilinear Pooling&lt;br&gt;&lt;small&gt;Ma Jin; Yan Song; Ian McLoughlin; Wu Guo; Lirong Dai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Dialect Recognition Based on Unsupervised Bottleneck Features&lt;br&gt;&lt;small&gt;Qian Zhang; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Investigating Scalability in Hierarchical Language Identification System&lt;br&gt;&lt;small&gt;Saad Irtza; Vidhyasaharan Sethu; Eliathamby Ambikairajah; Haizhou Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Improving Sub-phone Modeling for Better Native Language Identification with Non-native English Speech&lt;br&gt;&lt;small&gt;Yao Qian; Keelan Evanini; Xinhao Wang; David Suendermann-Oeft; Robert A Pugh; Patrick L Lange; Hillary R Molloy; Frank K Soong&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - QMDIS: QCRI-MIT Advanced Dialect Identification System&lt;br&gt;&lt;small&gt;Sameer Khurana; Maryam Najafian; Ahmed Ali; Tuka Al Hanai; Yonatan Belinkov; James Glass&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speaker and Language Identification\"\n            data-category-ids=\"1054\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Language Recognition<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4357\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Show &amp; Tell 5\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"11\"\n            data-room-id=\"1070\"\n            data-room-name=\"E306\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"13.30-15.30 - A Thematicity-based Prosody Enrichment Tool for CTS&lt;br&gt;&lt;small&gt;Monica Dominguez; Mireia Farr\u00fas; Leo Wanner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Creating a Voice for MiRo, the World\u2019s First Commercial Biomimetic Robot&lt;br&gt;&lt;small&gt;Roger Moore; Ben Mitchinson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - SIAK - A Game for Foreign Language Pronunciation Learning&lt;br&gt;&lt;small&gt;Reima Karhila; Sari Ylinen; Seppo Enarvi; Kalle Palom\u00e4ki; Aleksander Nikulin; Olli Rantula; Vertti Viitanen; Krupakar Dhinakaran; Anna-Riikka Smolander; Heini Kallio; Maria Uther; Katja Junttila; Perttu H\u00e4m\u00e4l\u00e4inen; Mikko Kurimo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - TBT(Toolkit to Build TTS): A High Performance Framework to build Multiple Language HTS Voice&lt;br&gt;&lt;small&gt;Atish Ghone; Rachana Nerpagar; Pranaw Kumar; Arun Baby; Aswin Shanmugam; Sasikumar Mukundan; Hema Murthy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - WebSubDub - Experimental system for creating high-quality alternative audio track for TV broadcasting&lt;br&gt;&lt;small&gt;Martin Gr\u016fber; Jindrich Matousek; Zden\u011bk Hanzl\u00ed\u010dek; Jakub V\u00edt; Daniel Tihelka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Voice Conservation and TTS System for People Facing Total Laryngectomy&lt;br&gt;&lt;small&gt;Mark\u00e9ta J\u016fzov\u00e1; Daniel Tihelka; Jindrich Matousek; Zdenek Hanzlicek&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Show & Tell\"\n            data-category-ids=\"1063\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Show & Tell 5<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#EEA2A2; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4321\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speaker Comparison for Forensic and Investigative Applications 3\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"2\"\n            data-room-id=\"1072\"\n            data-room-name=\"B3\"\n            data-day=\"3\"\n            data-abs-path=\"\/abs\/4321.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Special event\"\n            data-category-ids=\"1064\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speaker Comparison for Forensic and Investigative Applications 3<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4338\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Lexical and Pronunciation Modeling\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"9\"\n            data-room-id=\"1062\"\n            data-room-name=\"D8\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Izhak Shafran; Helen Meng&lt;br&gt;&lt;br&gt;13.30-13.50 - Multitask Sequence-to-Sequence Models for Grapheme-to-Phoneme Conversion&lt;br&gt;&lt;small&gt;Benjamin Milde; Christoph Schmidt; Joachim K\u00f6hler&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Acoustic data-driven lexicon learning based on a greedy pronunciation selection framework&lt;br&gt;&lt;small&gt;Xiaohui Zhang; Vimal Manohar; Dan Povey; Sanjeev Khudanpur&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Semi-Supervised Learning of a Pronunciation Dictionary from Disjoint Phonemic Transcripts and Text&lt;br&gt;&lt;small&gt;Takahiro Shinozaki; Shinji Watanabe; Daichi Mochihashi; Graham Neubig&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Improved subword modeling for WFST-based speech recognition&lt;br&gt;&lt;small&gt;Peter Smit; Sami Virpioja; Mikko Kurimo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Pronunciation learning with RNN-transducers&lt;br&gt;&lt;small&gt;Antoine Bruguier; Danushen Gnanapragasam; Leif Johnson; Kanishka Rao; Francoise Beaufays&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Learning Similarity Functions for Pronunciation Variations&lt;br&gt;&lt;small&gt;Einat Naaman; Yossi Adi; Joseph Keshet&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Architecture, Search, and Linguistic Components\"\n            data-category-ids=\"1061\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Lexical and Pronunciation Modeling<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4362\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Computational Models in Child Language Acquisition\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"14\"\n            data-room-id=\"1059\"\n            data-room-name=\"F11\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Alejandrina Cristia; Kristina Nilsson Bj\u00f6rkenstam&lt;br&gt;&lt;br&gt;13.30-13.50 - Relating unsupervised word segmentation to reported vocabulary acquisition&lt;br&gt;&lt;small&gt;Elin Larsen; Alejandrina Cristia; Emmanuel Dupoux&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Approximating phonotactic input in children\u2019s linguistic environments from orthographic transcripts&lt;br&gt;&lt;small&gt;Sofia Str\u00f6mbergsson; Jens Edlund; Jana G\u00f6tze; Kristina Nilsson Bj\u00f6rkenstam&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Computational simulations of temporal vocalization behavior in adult-child interaction&lt;br&gt;&lt;small&gt;Ellen Marklund; David Pagmar; Tove Gerholm; Lisa Gustavsson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Modelling the Informativeness of Non-Verbal Cues in Parent\u2013Child Interaction&lt;br&gt;&lt;small&gt;Mats Wir\u00e9n; Kristina Nilsson Bj\u00f6rkenstam; Robert \u00d6stling&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Learning weakly-supervised multimodal phoneme embeddings&lt;br&gt;&lt;small&gt;Rahma Chaabouni; Ewan Dunbar; Neil Zeghidour; Emmanuel Dupoux&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Multi-Task Learning for Mispronunciation Detection on Singapore Children\u2019s Mandarin Speech&lt;br&gt;&lt;small&gt;Rong Tong; Nancy Chen; Bin Ma&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Perception, Production and Acquisition\"\n            data-category-ids=\"1055\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Computational Models in Child Language Acquisition<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4336\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Topic Spotting, Entity Extraction and Semantic Analysis\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"3\"\n            data-room-id=\"1065\"\n            data-room-name=\"B4\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Ville Hautamaki; Lin-shan Lee&lt;br&gt;&lt;br&gt;13.30-13.50 - Towards Zero-Shot Frame Semantic Parsing for Domain Scaling&lt;br&gt;&lt;small&gt;Ankur Bapna; Gokhan Tur; Dilek Hakkani-Tur; Larry Heck&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - ClockWork-RNN based architectures for Slot Filling&lt;br&gt;&lt;small&gt;Despoina Georgiadou; Vassilios Diakoloukas; Vassilios Tsiaras; Vassilios Digalakis&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Investigating the Effect of ASR tuning on Named Entity Recognition&lt;br&gt;&lt;small&gt;Mohamed Ben Jannet; Olivier Galibert; Martine Adda-Decker; Sophie Rosset&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Label-dependency coding in Simple Recurrent Networks for Spoken Language Understanding&lt;br&gt;&lt;small&gt;Marco Dinarelli; Vedran Vukotic; Christian Raymond&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Minimum Semantic Error Cost Training of Deep Long Short-Term Memory Networks for Topic Spotting on Conversational Speech&lt;br&gt;&lt;small&gt;Zhong Meng; Biing-Hwang (Fred) Juang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Topic Identification for Speech without ASR&lt;br&gt;&lt;small&gt;Chunxi Liu; Jan Trmal; Matthew Wiesner; Craig Harman; Sanjeev Khudanpur&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n            data-category-ids=\"1053\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Topic Spotting, Entity Extraction and Semantic Analysis<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4337\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Dialog Systems\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"5\"\n            data-room-id=\"1066\"\n            data-room-name=\"C6\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Gabriel Skantze; Timo Baumann&lt;br&gt;&lt;br&gt;13.30-13.50 - An End-to-End Trainable Neural Network Model with Belief Tracking for Task-Oriented Dialog&lt;br&gt;&lt;small&gt;Bing Liu; Ian Lane&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Deep Reinforcement Learning of Dialogue Policies with Less Weight Updates&lt;br&gt;&lt;small&gt;Heriberto Cuayahuitl; Seunghak Yu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Towards End-to-End Spoken Dialogue Systems with Turn Embeddings&lt;br&gt;&lt;small&gt;Ali Orkan Bayer; Evgeny Stepanov; Giuseppe Riccardi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Speech and Text Analysis for Multimodal Addressee Detection in Human-Human-Computer Interaction&lt;br&gt;&lt;small&gt;Oleg Akhtiamov; Maxim Sidorov; Alexey Karpov; Wolfgang Minker&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Rushing to Judgement: How Do Laypeople Rate Caller Engagement in Thin-Slice Videos of Human--Machine Dialog?&lt;br&gt;&lt;small&gt;Vikram Ramanarayanan; Chee Wee (Ben) Leong; David Suendermann-Oeft&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Hyperarticulation of Corrections in Multilingual Dialogue Systems&lt;br&gt;&lt;small&gt;Ivan Kraljevski; Diane Hirschfeld&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Spoken Dialog Systems and Analysis of Conversation\"\n            data-category-ids=\"1051\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Dialog Systems<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4358\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Show &amp; Tell 6\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"12\"\n            data-room-id=\"1071\"\n            data-room-name=\"E397\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"13.30-15.30 - A Robust Medical Speech-to-Speech\/Speech-to-Sign Phraselator&lt;br&gt;&lt;small&gt;Farhia Ahmed; Pierrette Bouillon; Chelle Destefano; Johanna Gerlach; Sonia Halimi; Angela Hooper; Manny Rayner; Herv\u00e9 Spechbach; Irene Strasly; Nikos Tsourakis&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Integrating the Talkamatic Dialogue Manager with Alexa&lt;br&gt;&lt;small&gt;Staffan Larsson; Fredrik Kronlid; Andreas Krona; Alex Berman&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Modifying Amazon\u2019s Alexa ASR Grammar and Lexicon \u2013 A Case Study&lt;br&gt;&lt;small&gt;Aman Kumar; Hassan Alam; Manan Vyas; Tina Werner; Rachmat Hartono&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Nora the Empathetic Psychologist&lt;br&gt;&lt;small&gt;Genta Indra Winata; Onno Kampman; Yang Yang; Anik Dey; Pascale Fung&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Towards an Autarkic Embedded Cognitive User Interface&lt;br&gt;&lt;small&gt;Frank Duckhorn; Markus Huber; Werner Meyer; Oliver Jokisch; Constanze Tsch\u00f6pe; Matthias Wolff&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Show & Tell\"\n            data-category-ids=\"1063\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Show & Tell 6<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#EEA2A2; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4333\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Cognition and Brain Studies\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Odette Scharenborg; Isabel Trancoso&lt;br&gt;&lt;br&gt;13.30-13.50 - An entrained rhythm's frequency, not phase, influences temporal sampling of speech&lt;br&gt;&lt;small&gt;Hans Rutger Bosker; Anne K\u00f6sem&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Context regularity indexed by auditory N1 and P2 event-related potentials&lt;br&gt;&lt;small&gt;Xiao Wang; Yanhui Zhang; Gang Peng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Discovering Language in Marmoset Vocalization&lt;br&gt;&lt;small&gt;Sakshi Verma; Lok Prateek Kotha; Karthik Pandia D S; Nauman Dawalatabad; Rogier Landman; Jitendra Sharma; Mriganka Sur; Hema Murthy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Subject-independent Classification of Japanese Spoken Sentences by Multiple Frequency Bands Phase Pattern of EEG Response during Speech Perception&lt;br&gt;&lt;small&gt;Hiroki Watanabe; Hiroki Tanaka; Sakriani Sakti; Satoshi Nakamura&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - The phonological status of the French Initial Accent and its role in semantic processing: an Event-Related Potentials study&lt;br&gt;&lt;small&gt;Noemie te Rietmolen; Radouane El Yagoubi; Alain Ghio; Corine Ast\u00e9sano&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - A Neuro-Experimental Evidence for the Motor Theory of Speech Perception&lt;br&gt;&lt;small&gt;Bin Zhao; Jianwu Dang; Gaoyan Zhang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Perception, Production and Acquisition\"\n            data-category-ids=\"1055\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Cognition and Brain Studies<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5259\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Refreshments\"\n            data-abs-nbr=\"\"\n            data-ystart=\"15.5\"\n            data-yend=\"16\"\n            data-time=\"15:30-16:00\"\n            data-room=\"22\"\n            data-room-id=\"1079\"\n            data-room-name=\"Various locations\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Misc\"\n            data-category-ids=\"1068\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Refreshments<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Various locations<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  15:30-16:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4342\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Multi-channel Speech Enhancement\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"5\"\n            data-room-id=\"1066\"\n            data-room-name=\"C6\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Hynek Boril; Reinhold Haeb-Umbach&lt;br&gt;&lt;br&gt;16.00-16.20 - Tight integration of spatial and spectral features for BSS with Deep Clustering embeddings&lt;br&gt;&lt;small&gt;Lukas Drude; Reinhold Haeb-Umbach&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Speaker-aware neural network based beamformer for speaker extraction in speech mixtures&lt;br&gt;&lt;small&gt;Katerina Zmolikova; Marc Delcroix; Keisuke Kinoshita; Takuya Higuchi; Atsunori Ogawa; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Eigenvector-based Speech Mask Estimation using Logistic Regression&lt;br&gt;&lt;small&gt;Lukas Pfeifenberger; Matthias Z\u00f6hrer; Franz Pernkopf&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Real-time Speech Enhancement with GCC-NMF&lt;br&gt;&lt;small&gt;Sean Wood; Jean Rouat&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - Coherence-based dual-channel noise reduction algorithm in a complex noisy environment&lt;br&gt;&lt;small&gt;Youna Ji; Jun Byun; Young-cheol Park&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Glottal Model Based Speech Beamforming for Ad-Hoc Microphone Arrays&lt;br&gt;&lt;small&gt;Yang Zhang; Dinei Florencio; Mark Hasegawa-Johnson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Coding and Enhancement\"\n            data-category-ids=\"1060\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Multi-channel Speech Enhancement<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4354\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Voice Conversion 2\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"19\"\n            data-room-id=\"1068\"\n            data-room-name=\"Poster 4\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Chandra Sekhar Seelamantula&lt;br&gt;&lt;br&gt;16.00-18.00 - CAB: An Energy-Based Speaker Clustering Model for Rapid Adaptation in Non-Parallel Voice Conversion&lt;br&gt;&lt;small&gt;Toru Nakashika&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Denoising Recurrent Neural Network for Deep Bidirectional LSTM based Voice Conversion&lt;br&gt;&lt;small&gt;Jie Wu; Dongyan Huang; Lei Xie; Haizhou Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Emotional Voice Conversion with Adaptive Scales F0 based on Wavelet Transform using Limited Amount of Emotional Data&lt;br&gt;&lt;small&gt;Zhaojie Luo; Jinhui Chen; Tetsuya Takiguchi; Yasuo Ariki&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Generative adversarial network-based glottal waveform model for statistical parametric speech synthesis&lt;br&gt;&lt;small&gt;Bajibabu Bollepalli; Lauri Juvela; Paavo Alku&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Generative Adversarial Network-based Postfilter for STFT Spectrograms&lt;br&gt;&lt;small&gt;Takuhiro Kaneko; Shinji Takaki; Hirokazu Kameoka; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Phoneme-Discriminative Features for Dysarthric Speech Conversion&lt;br&gt;&lt;small&gt;Ryo Aihara; Tetsuya Takiguchi; Yasuo Ariki&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Segment Level Voice Conversion with Recurrent Neural Networks&lt;br&gt;&lt;small&gt;Miguel Ramos; Alan W Black; Ram\u00f3n Astudillo; Isabel Trancoso; Nuno Fonseca&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speaker adaptation in DNN-based speech synthesis using d-vectors&lt;br&gt;&lt;small&gt;Rama Sanand Doddipatla; Norbert Braunschweiler; Ranniery Maia&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speaker Dependent Approach for Enhancing a Glossectomy Patient's Speech via GMM-based Voice Conversion&lt;br&gt;&lt;small&gt;Kei Tanaka; Sunao Hara; Masanobu Abe; Masaaki Sato; Shogo Minagi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Spectro-Temporal Modelling with Time-Frequency LSTM and Structured Output Layer for Voice Conversion&lt;br&gt;&lt;small&gt;Runnan Li; Zhiyong Wu; Yishuang Ning; Lifa Sun; Helen Meng; Lianhong Cai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Voice Conversion from Unaligned Corpora Using Variational Autoencoding Wasserstein Generative Adversarial Networks&lt;br&gt;&lt;small&gt;Chin-Cheng Hsu; Hsin-Te Hwang; YICHIAO WU; Yu Tsao; Hsin-Min Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Synthesis and Spoken Language Generation\"\n            data-category-ids=\"1059\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Voice Conversion 2<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4340\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Language models for ASR\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"10\"\n            data-room-id=\"1060\"\n            data-room-name=\"E10\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Yannick Est\u00e8ve; Dilek Hakkani-T\u00fcr&lt;br&gt;&lt;br&gt;16.00-16.20 - Effectively Building Tera Scale MaxEnt Language Models Incorporating Non-Linguistic Signals&lt;br&gt;&lt;small&gt;Fadi Biadsy; Mohammadreza Ghodsi; Diamantino Caseiro&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Semi-supervised Adaptation of RNNLMs by Fine-tuning with Domain-specific Auxiliary Features&lt;br&gt;&lt;small&gt;Salil Deena; Raymond W. M. Ng; Pranava Madhyastha; Lucia Specia; Thomas Hain&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Approximated and domain-adapted LSTM language models for first-pass decoding in speech recognition&lt;br&gt;&lt;small&gt;Mittul Singh; Youssef Oualil; Dietrich Klakow&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Sparse Non-negative Matrix Language Modeling: Maximum Entropy Flexibility on the Cheap&lt;br&gt;&lt;small&gt;Ciprian Chelba; Diamantino Caseiro; Fadi Biadsy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - Multi-scale Context Adaptation for Improving Child Automatic Speech Recognition in Child-Adult Spoken Interactions&lt;br&gt;&lt;small&gt;Manoj Kumar; Daniel Bone; Kelly McWilliams; Shanna Williams; Thomas Lyon; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Using Knowledge Graph And Search Query Click Logs in Statistical Language Model For Speech Recognition&lt;br&gt;&lt;small&gt;Weiwu Zhu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Architecture, Search, and Linguistic Components\"\n            data-category-ids=\"1061\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Language models for ASR<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4339\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speaker Database and Anti-spoofing\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Nicholas Evans; Karthika Vijayan&lt;br&gt;&lt;br&gt;16.00-16.20 - Detection of Replay Attacks using Single Frequency Filtering Cepstral Coefficients&lt;br&gt;&lt;small&gt;K N R K Raju Alluri; Sivanand Achanta; Sudarsana Reddy Kadiri; Suryakanth V Gangashetty; Anil Kumar Vuppala&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Unsupervised Representation Learning Using Convolutional Restricted Boltzmann Machine for Spoof Speech Detection&lt;br&gt;&lt;small&gt;Hardik Sailor; Madhu Kamble; Hemant Patil&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Independent Modelling of High and Low Energy Speech Frames for Spoofing Detection&lt;br&gt;&lt;small&gt;Gajan Suthokumar; Kaavya Sriskandaraja; Vidhyasaharan Sethu; Chamith Wijenayake; Eliathamby Ambikairajah&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Improving Speaker Verification Performance in Presence of Spoofing Attacks Using Out-of-Domain Spoofed Data&lt;br&gt;&lt;small&gt;Achintya Sarkar; Md Sahidullah; Zheng-Hua Tan; Tomi Kinnunen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - VoxCeleb: A large-scale speaker identification dataset&lt;br&gt;&lt;small&gt;Arsha Nagrani; Joon Son Chung; Andrew Zisserman&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Call My Net Corpus: A Multilingual Corpus for Evaluation of Speaker Recognition Technology&lt;br&gt;&lt;small&gt;Karen Jones; Stephanie Strassel; Kevin Walker; David Graff; Jonathan Wright&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speaker and Language Identification\"\n            data-category-ids=\"1054\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speaker Database and Anti-spoofing<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4353\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Language Understanding and Generation\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"18\"\n            data-room-id=\"1069\"\n            data-room-name=\"Poster 3\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Jose David Lopes; Heriberto Cuayahuitl&lt;br&gt;&lt;br&gt;16.00-18.00 - ASR error management for improving spoken language understanding&lt;br&gt;&lt;small&gt;Edwin Simonnet; Sahar Ghannay; Nathalie Camelin; Yannick Est\u00e8ve; Renato de Mori&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Character-based Embedding Models and Reranking Strategies for Understanding Natural Language Meal Descriptions&lt;br&gt;&lt;small&gt;Mandy Korpusik; Zachary Collins; James Glass&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Internal Memory Gate for Recurrent Neural Networks with Application to Spoken Language Understanding&lt;br&gt;&lt;small&gt;Mohamed Morchid&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Jointly Trained Sequential Labeling and Classification by Sparse Attention Neural Networks&lt;br&gt;&lt;small&gt;Mingbo Ma; Kai Zhao; Liang Huang; Bing Xiang; Bowen Zhou&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Online adaptation of an attention-based neural network for natural language generation&lt;br&gt;&lt;small&gt;Matthieu Riou; Bassam Jabaian; St\u00e9phane Huet; Fabrice Lefevre&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - OpenMM: An Open-source Multimodal Feature Extraction Tool&lt;br&gt;&lt;small&gt;Michelle Morales; Stefan Scherer; Rivka Levitan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Parallel Hierarchical Attention Networks with Shared Memory Reader for Multi-Stream Conversational Document Classification&lt;br&gt;&lt;small&gt;Naoki Sawada; Ryo Masumura; Hiromitsu Nishizaki&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Quaternion Denoising Encoder-Decoder for Theme Identification of Telephone Conversations&lt;br&gt;&lt;small&gt;Titouan Parcollet; Mohamed Morchid; Georges Linares&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Spanish Sign Language Recognition with Different Topology Hidden Markov Models&lt;br&gt;&lt;small&gt;Carlos-D. Mart\u00ednez-Hinarejos; Zuzanna Parcheta&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speaker Dependency Analysis, Audiovisual Fusion Cues and A Multimodal BLSTM for Conversational Engagement Recognition&lt;br&gt;&lt;small&gt;Yuyun Huang; Emer Gilmartin; Nick Campbell&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - To Plan or not to Plan? Discourse planning in slot-value informed sequence to sequence models for language generation&lt;br&gt;&lt;small&gt;Neha Nayak; Dilek Hakkani-Tur; Marilyn Walker; Larry Heck&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Zero-shot Learning for Natural Language Understanding using Domain-Independent Sequential Structure and Question Types&lt;br&gt;&lt;small&gt;Kugatsu Sadamitsu; Yukinori Homma; Ryuichiro Higashinaka; Yoshihiro Matsuo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Spoken Dialog Systems and Analysis of Conversation\"\n            data-category-ids=\"1051\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Language Understanding and Generation<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4341\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech Translation\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"3\"\n            data-room-id=\"1065\"\n            data-room-name=\"B4\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Nicholas Ruiz; Roland Kuhn&lt;br&gt;&lt;br&gt;16.00-16.20 - Sequence-to-Sequence Models Can Directly Translate Foreign Speech&lt;br&gt;&lt;small&gt;Ron Weiss; Jan Chorowski; Navdeep Jaitly; Yonghui Wu; Zhifeng Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Structured-based Curriculum Learning for End-to-end English-Japanese Speech Translation&lt;br&gt;&lt;small&gt;Takatomo Kano; Sakriani Sakti; Satoshi Nakamura&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Assessing the tolerance of Neural Machine Translation systems against Speech Recognition Errors&lt;br&gt;&lt;small&gt;Nicholas Ruiz; Mattia Antonino Di Gangi; Nicola Bertoldi; Marcello Federico&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - Toward Expressive Speech Translation: A Unified Sequence-to-Sequence LSTMs Approach for Translating Words and Emphasis&lt;br&gt;&lt;small&gt;Quoc Truong Do; Sakriani Sakti; Satoshi Nakamura&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - NMT-based Segmentation and Punctuation Insertion for Real-time Spoken Language Translation&lt;br&gt;&lt;small&gt;Eunah Cho; Jan Niehues; Alex Waibel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n            data-category-ids=\"1053\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech Translation<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4343\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech Recognition: Applications in Medical Practice\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"9\"\n            data-room-id=\"1062\"\n            data-room-name=\"D8\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Phil Green; Torbj\u00f8rn Svendsen&lt;br&gt;&lt;br&gt;16.00-16.20 - Acoustic Assessment of Disordered Voice with Continuous Speech Based on Utterance-level ASR Posterior Features&lt;br&gt;&lt;small&gt;Yuanyuan Liu; Tan Lee; P.C. Ching; Thomas K.T. Law; Kathy Y.S. Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.20-16.40 - Multi-Stage DNN Training for Automatic Recognition of Dysarthric Speech&lt;br&gt;&lt;small&gt;Emre Yilmaz; Mario Ganzeboom; Catia Cucchiarini; Helmer Strik&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.40-17.00 - Improving child speech disorder assessment by incorporating out-of-domain adult speech&lt;br&gt;&lt;small&gt;Daniel Smith; Alex Sneddon; Lauren Ward; Andreas Duenser; Jill Freyne; David Silvera-Tawil; Angela Morgan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.00-17.20 - On Improving Acoustic Models For TORGO Dysarthric Speech Database&lt;br&gt;&lt;small&gt;Neethu Mariam Joy; Srinivasan Umesh; Basil Abraham&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.20-17.40 - Glottal Source Features for Automatic Speech-based Depression Assessment&lt;br&gt;&lt;small&gt;Olympia Simantiraki; Paulos Charonyktakis; Anastasia Pampouchidou; Manolis Tsiknakis; Martin Cooke&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Speech Processing Approach for Diagnosing Dementia in an Early Stage&lt;br&gt;&lt;small&gt;Roozbeh Sadeghian; J. David Schaffer; Stephen Zahorian&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Technologies and Systems for New Applications\"\n            data-category-ids=\"1066\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech Recognition: Applications in Medical Practice<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4352\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speaker States and Traits\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"17\"\n            data-room-id=\"1067\"\n            data-room-name=\"Poster 2\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Emily Provost&lt;br&gt;&lt;br&gt;16.00-18.00 - An Investigation of Emotion Dynamics and Kalman Filtering for Speech-based Emotion Prediction&lt;br&gt;&lt;small&gt;Zhaocheng Huang; Julien Epps&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Approaching Human Performance in Behavior Estimation in Couples Therapy Using Deep Sentence Embeddings&lt;br&gt;&lt;small&gt;Shao-Yen Tseng; Brian Baucom; Panayiotis Georgiou&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Attention Networks for Modeling Behavior in Addiction Counseling&lt;br&gt;&lt;small&gt;James Gibson; Dogan Can; Panayiotis Georgiou; David Atkins; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Big Five vs. Prosodic Features as Cues to Detect Abnormality in SSPNET-Personality Corpus&lt;br&gt;&lt;small&gt;C\u00e9dric Fayet; Arnaud Delhay; Damien Lolive; Pierre-Francois Marteau&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Bilingual Word Embeddings for Cross-Lingual Personality Recognition Using Convolutional Neural Nets&lt;br&gt;&lt;small&gt;Farhad Bin Siddique; Pascale Fung&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Complexity in speech and its relation to emotional bond in therapist-patient interactions during suicide risk assessment interviews&lt;br&gt;&lt;small&gt;Md Nasir; Brian Baucom; Craig J. Bryan; Shrikanth Narayanan; Panayiotis Georgiou&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Computational Analysis of Acoustic Descriptors in Psychotic Patients&lt;br&gt;&lt;small&gt;Torsten W\u00f6rtwein; Tadas Baltru\u0161aitis; Eugene Laksana; Luciana Pennant; Elizabeth Liebson; Dost \u00d6ng\u00fcr; Justin Baker; Louis-Philippe Morency&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Emotion category mapping to emotional space by cross-corpus emotion labeling&lt;br&gt;&lt;small&gt;Yoshiko Arimoto; Hiroki Mori&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Implementing gender-dependent vowel-level analysis for boosting speech-based depression recognition&lt;br&gt;&lt;small&gt;Bogdan Vlasenko; Hesam Sagha; Nicholas Cummins; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Modeling Perceivers Neural-Responses using Lobe-dependent Convolutional Neural Network to Improve Speech Emotion Recognition&lt;br&gt;&lt;small&gt;Ya-Tse Wu; Hsuan-Yu Chen; Yu-Hsien Liao; Li-Wei Kuo; Chi-Chun Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Speech Rate Comparison when Talking to a System and Talking to a Human: A study from a Speech-to-Speech, Machine Translation mediated Map Task&lt;br&gt;&lt;small&gt;Akira Hayakawa; Carl Vogel; Saturnino Luz; Nick Campbell&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - The Perception of Emotions in Noisified Nonsense Speech&lt;br&gt;&lt;small&gt;Emilia Parada-Cabaleiro; Alice Baird; Anton Batliner; Nicholas Cummins; Simone Hantke; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Paralinguistics in Speech and Language\"\n            data-category-ids=\"1052\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speaker States and Traits<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4363\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Voice Attractiveness\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"14\"\n            data-room-id=\"1059\"\n            data-room-name=\"F11\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"16.00-16.10 - Introduction&lt;br&gt;&lt;small&gt;Melissa Barkat-Defradas; John Ohala&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - A gender bias in the acoustic-melodic features of charismatic speech?&lt;br&gt;&lt;small&gt;Eszter Novak-Tot; Oliver Niebuhr; Aoju Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - Attractiveness of French voices for German listeners - results from native and non-native read speech&lt;br&gt;&lt;small&gt;Juergen Trouvain; Frank Zimmerer&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - Does Posh English Sound Attractive?&lt;br&gt;&lt;small&gt;Li Jiao; Chengxia Wang; Cristiane Hsu; Peter Birkholz; Yi Xu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - Large-scale Speaker Ranking from Crowdsourced Pairwise Listener Ratings&lt;br&gt;&lt;small&gt;Timo Baumann&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - Perceptual Ratings of Voice Likability Collected through In-Lab Listening Tests vs. Mobile-Based Crowdsourcing&lt;br&gt;&lt;small&gt;Laura Fern\u00e1ndez Gallardo; Rafael Zequeira Jim\u00e9nez; Sebastian M\u00f6ller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - Personalized Quantification of Voice Attractiveness in Multidimensional Merit Space&lt;br&gt;&lt;small&gt;Yasunari Obuchi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - Pitch convergence as an effect of perceived attractiveness and likability&lt;br&gt;&lt;small&gt;Jan Michalsky; Heike Schoormann&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - Social Attractiveness in Dialogs&lt;br&gt;&lt;small&gt;Antje Schweitzer; Natalie Lewandowski; Daniel Duran&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.10-17.40 - The role of temporal amplitude modulations in the political arena: Hillary Clinton vs. Donald Trump&lt;br&gt;&lt;small&gt;Hans Rutger Bosker&lt;\/small&gt;&lt;br&gt;&lt;br&gt;17.40-18.00 - Discussion&lt;br&gt;&lt;small&gt;&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Paralinguistics in Speech and Language\"\n            data-category-ids=\"1052\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Voice Attractiveness<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4364\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Digital Revolution for Under-resourced Languages 3\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"0\"\n            data-room-id=\"1064\"\n            data-room-name=\"A2\"\n            data-day=\"3\"\n            data-abs-path=\"\/abs\/4364.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Joseph Mariani; Sakriani Sakti&lt;br&gt;&lt;br&gt;&lt;p&gt;Panel discussion with panelists:&lt;\/p&gt;&lt;p&gt;&lt;\/p&gt;&lt;ul&gt;&lt;li&gt;Claudia Soria, SIG ELRA-LRL&lt;br&gt;&lt;\/li&gt;&lt;li&gt;Alexey Karpov, SLTU Board&lt;br&gt;&lt;\/li&gt;&lt;li&gt;Emmanuel Dupoux, Zero Resource Speech Challenge&lt;br&gt;&lt;\/li&gt;&lt;li&gt;Mary Harper, BABEL Program&lt;br&gt;&lt;\/li&gt;&lt;li&gt;Sebastian Stueker, BULB Project&lt;\/li&gt;&lt;li&gt;Sanjeev Khundapur, John-Hopkins Summer Workshops&lt;\/li&gt;&lt;li&gt;Linne Ha, Google Research&lt;\/li&gt;&lt;li&gt;Irmgarda Kasinkaite-Buddeberg, UNESCO&lt;\/li&gt;&lt;li&gt;Marja-Liisa Olthuis, North-Europe Representative&lt;\/li&gt;&lt;li&gt;Ulrike Janke, South-Africa Representative&lt;\/li&gt;&lt;li&gt;Shyam Agrawal, South-Asia Representative&lt;\/li&gt;&lt;\/ul&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n            data-category-ids=\"1053\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Digital Revolution for Under-resourced Languages 3<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4351\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Prosody\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"18\"\n            data-time=\"16:00-18:00\"\n            data-room=\"16\"\n            data-room-id=\"1061\"\n            data-room-name=\"Poster 1\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Stefanie Jannedy&lt;br&gt;&lt;br&gt;16.00-18.00 - Canonical Correlation Analysis and Prediction of Perceived Rhythmic Prominences and Pitch Tones in Speech&lt;br&gt;&lt;small&gt;Elizabeth Godoy; James Williamson; Thomas Quatieri&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Creaky voice as a function of tonal categories and prosodic boundaries&lt;br&gt;&lt;small&gt;Jianjing Kuang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Evaluation of Spectral Tilt Measures for Sentence Prominence Under Different Noise Conditions&lt;br&gt;&lt;small&gt;Sofoklis Kakouros; Okko R\u00e4s\u00e4nen; Paavo Alku&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Exploring multidimensionality: Acoustic and articulatory correlates of Swedish word accents&lt;br&gt;&lt;small&gt;Malin Svensson Lundmark; Gilbert Ambrazaitis; Otto Ewald&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Focus Acoustics in Mandarin Nominals&lt;br&gt;&lt;small&gt;Yu-Yin Hsu; Anqi Xu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - How does the absence of shared knowledge between interlocutors affect the production of French prosodic forms?&lt;br&gt;&lt;small&gt;Amandine Michelas; C\u00e9cile Cau; Maud Champagne-Lavau&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Intonation of contrastive topic in Estonian&lt;br&gt;&lt;small&gt;Heete Sahkai; Meelis Mihkla&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Reanalyze Fundamental Frequency Peak Delay in Mandarin&lt;br&gt;&lt;small&gt;Lixia Hao; Wei Zhang; Yanlu Xie; Jinsong Zhang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - The Acoustics of Word Stress in Czech as a Function of Speaking Style&lt;br&gt;&lt;small&gt;Radek Skarnitzl; Anders Eriksson&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - The Perception of English Intonation Patterns by German L2 speakers of English&lt;br&gt;&lt;small&gt;Karin Puga; Robert Fuchs; Jane Setter; Peggy Mok&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Three Dimensions of Sentence Prosody and their (Non-)Interactions&lt;br&gt;&lt;small&gt;Michael Wagner; Michael McAuliffe&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Trisyllabic tone 3 sandhi patterns in Mandarin produced by Cantonese speakers&lt;br&gt;&lt;small&gt;Jung-Yueh Tu; Janice Wing-Sze Wong; Jih-Ho Cha&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - Using Prosody to Classify Discourse Relations&lt;br&gt;&lt;small&gt;Janine Kleinhans; Mireia Farr\u00fas; Agustin Gravano; Juan Manuel P\u00e9rez; Catherine Lai; Leo Wanner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;16.00-18.00 - What You See Is What You Get Prosodically Less - Visibility Shapes Prosodic Prominence Production in Spontaneous Interaction&lt;br&gt;&lt;small&gt;Petra Wagner; Nataliya Bryhadyr&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Phonetics, Phonology, and Prosody\"\n            data-category-ids=\"1056\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Prosody<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-18:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5252\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Banquet\"\n            data-abs-nbr=\"\"\n            data-ystart=\"19\"\n            data-yend=\"23\"\n            data-time=\"19:00-23:00\"\n            data-room=\"21\"\n            data-room-id=\"1081\"\n            data-room-name=\"Tekniska Museet and Etnografiska museet\"\n            data-day=\"3\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Social event\"\n            data-category-ids=\"1067\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Banquet<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Tekniska Museet and Etnografiska museet<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  19:00-23:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#C9EE91; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5248\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Registration\"\n            data-abs-nbr=\"\"\n            data-ystart=\"7.75\"\n            data-yend=\"17\"\n            data-time=\"07:45-17:00\"\n            data-room=\"23\"\n            data-room-id=\"1112\"\n            data-room-name=\"S\u00f6dra Huset, House A\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"\"\n            data-category-ids=\"\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Registration<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  07:45-17:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4269\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Keynote 3: Bj\u00f6rn Lindblom, Re-inventing speech \u2013 the biological way\"\n            data-abs-nbr=\"\"\n            data-ystart=\"8.5\"\n            data-yend=\"9.5\"\n            data-time=\"08:30-09:30\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"4\"\n            data-abs-path=\"\/abs\/4269.html\"\n            data-speaker=\"Bj\u00f6rn Lindblom\"\n            data-speakercell=\"Bj\u00f6rn Lindblom\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Olov Engwall&lt;br&gt;&lt;br&gt;\n                 &lt;p&gt;The session will also be broadcasted (with two-way communication) to rooms A2 and C6.&lt;br&gt;&lt;\/p&gt;\n                &lt;br&gt;&lt;br&gt;\"\n            data-category=\"Keynote\"\n            data-category-ids=\"1057\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Keynote 3: Bj\u00f6rn Lindblom, Re-inventing speech \u2013 the biological way<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Aula Magna<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  08:30-09:30                <\/span><\/div>\n\n              <div class=\"lecturer\"><span>Bj\u00f6rn Lindblom<\/span><\/div>\n            <\/div>\n\n\n                                <div style=\"background-color:#72D9EE; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5262\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Refreshments\"\n            data-abs-nbr=\"\"\n            data-ystart=\"9.5\"\n            data-yend=\"10\"\n            data-time=\"09:30-10:00\"\n            data-room=\"22\"\n            data-room-id=\"1079\"\n            data-room-name=\"Various locations\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Misc\"\n            data-category-ids=\"1068\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Refreshments<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Various locations<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  09:30-10:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4276\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speaker Diarization\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"0\"\n            data-room-id=\"1064\"\n            data-room-name=\"A2\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Eduardo Lleida; Kai Yu&lt;br&gt;&lt;br&gt;10.00-10.20 - Speaker Diarization Using Convolutional Neural Network for Statistics Accumulation Refinement&lt;br&gt;&lt;small&gt;Zbynek Zajic; Marek Hruz; Ludek Muller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Speaker2Vec: Unsupervised Learning and Adaptation of a Speaker Manifold using Deep Neural Networks with an Evaluation on Speaker Segmentation&lt;br&gt;&lt;small&gt;Arindam Jati; Panayiotis Georgiou&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - A Triplet Ranking-based Neural Network for Speaker Diarization and Linking&lt;br&gt;&lt;small&gt;Ga\u00ebl Le Lan; Delphine Charlet; Anthony Larcher; Sylvain Meignier&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Estimating Speaker Clustering Quality Using Logistic Regression&lt;br&gt;&lt;small&gt;Yishai Cohen; Itshak Lapidot&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Combining speaker turn embedding and incremental structure prediction for low-latency speaker diarization&lt;br&gt;&lt;small&gt;Guillaume Wisniewski; Herv\u00e9 Bredin; Gregory Gelly; Claude Barras&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - pyannote.metrics: a toolkit for reproducible evaluation, diagnostic, and error analysis of speaker diarization systems&lt;br&gt;&lt;small&gt;Herv\u00e9 Bredin&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speaker and Language Identification\"\n            data-category-ids=\"1054\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speaker Diarization<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4278\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Noise Reduction\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"5\"\n            data-room-id=\"1066\"\n            data-room-name=\"C6\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Yan Huang; Tim Fingscheidt&lt;br&gt;&lt;br&gt;10.00-10.20 - Deep Recurrent Neural Network based Monaural Speech Separation using Recurrent Temporal Restricted Boltzmann Machines&lt;br&gt;&lt;small&gt;Suman Samui; Indrajit Chakrabarti; Soumya Kanti Ghosh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Improved Codebook-based Speech Enhancement based on MBE Model&lt;br&gt;&lt;small&gt;Qizheng Huang; Changchun Bao; Xianyun Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Improving mask learning based speech enhancement system with restoration layers and residual connection&lt;br&gt;&lt;small&gt;Zhuo Chen; Yan Huang; Jinyu Li; Yifan Gong&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Exploring Low-Dimensional Structures of Modulation Spectra for Robust Speech Recognition&lt;br&gt;&lt;small&gt;Bi-Cheng Yan; Chin-Hong Shih; Shih-Hung Liu; Berlin Chen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - SEGAN: Speech Enhancement Generative Adversarial Network&lt;br&gt;&lt;small&gt;Santiago Pascual; Antonio Bonafonte; Joan Serr\u00e0&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Concatenative resynthesis using twin networks&lt;br&gt;&lt;small&gt;Soumi Maiti; Michael Mandel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Coding and Enhancement\"\n            data-category-ids=\"1060\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Noise Reduction<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4277\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Spoken Term Detection\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"3\"\n            data-room-id=\"1065\"\n            data-room-name=\"B4\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Sanjeev Khudanpur; Murat Saraclar&lt;br&gt;&lt;br&gt;10.00-10.20 - A Rescoring Approach for Keyword Search Using Lattice Context Information&lt;br&gt;&lt;small&gt;Zhipeng Chen; Ji Wu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - The Kaldi OpenKWS System: Improving Low Resource Keyword Search&lt;br&gt;&lt;small&gt;Jan Trmal; Matthew Wiesner; Vijayaditya Peddinti; Xiaohui Zhang; Pegah Ghahremani; Vimal Manohar; Yiming Wang; Hainan Xu; Dan Povey; Sanjeev Khudanpur&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - The STC Keyword Search System For OpenKWS 2016 Evaluation&lt;br&gt;&lt;small&gt;Yuri Khokhlov; Ivan Medennikov; Aleksei Romanenko; Valentin Mendelev; Maxim Korenevsky; Alexey Prudnikov; Natalia Tomashenko; Alexander Zatvornitskiy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Compressed time delay neural network for small-footprint keyword spotting&lt;br&gt;&lt;small&gt;Ming Sun; David Snyder; Yixin Gao; Varun Nagaraja; Mike Rodehorst; Sankaran Panchapagesan; Nikko Strom; Spyros Matsoukas; Shiv Vitaladevuni&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Symbol sequence search from telephone conversation&lt;br&gt;&lt;small&gt;Masayuki Suzuki; Gakuto Kurata; Abhinav Sethy; Bhuvana Ramabhadran; Kenneth Church; Mark Drake&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Similarity Learning Based Query Modeling for Keyword Search&lt;br&gt;&lt;small&gt;Batuhan Gundogdu; Murat Saraclar&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n            data-category-ids=\"1053\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Spoken Term Detection<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4279\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech Recognition: Multimodal Systems\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"9\"\n            data-room-id=\"1062\"\n            data-room-name=\"D8\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Patrick Wambacq; Florian Metze&lt;br&gt;&lt;br&gt;10.00-10.20 - Combining Residual Networks with LSTMs for Lipreading&lt;br&gt;&lt;small&gt;Themos Stafylakis; Georgios Tzimiropoulos&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Improving computer lipreading via DNN sequence discriminative training techniques&lt;br&gt;&lt;small&gt;Kwanchiva Thangthai; Richard Harvey&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Improving Speaker-Independent Lipreading with Domain-Adversarial Training&lt;br&gt;&lt;small&gt;Michael Wand; J\u00fcrgen Schmidhuber&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Turbo Decoders for Audio-visual Continuous Speech Recognition&lt;br&gt;&lt;small&gt;Ahmed Hussen Abdelaziz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - DNN-based Ultrasound-to-Speech Conversion for a Silent Speech Interface&lt;br&gt;&lt;small&gt;Tam\u00e1s G\u00e1bor Csap\u00f3; Tam\u00e1s Gr\u00f3sz; G\u00e1bor Gosztolya; L\u00e1szl\u00f3 T\u00f3th; Alexandra Mark\u00f3&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Visually grounded learning of keyword prediction from untranscribed speech&lt;br&gt;&lt;small&gt;Herman Kamper; Shane Settle; Gregory Shakhnarovich; Karen Livescu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Technologies and Systems for New Applications\"\n            data-category-ids=\"1066\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech Recognition: Multimodal Systems<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4280\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Noise Robust and Far-field ASR\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"16\"\n            data-room-id=\"1061\"\n            data-room-name=\"Poster 1\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Volker Leutnant\u00a0&lt;br&gt;&lt;br&gt;10.00-12.00 - Adaptive Multichannel Dereverberation for Automatic Speech Recognition&lt;br&gt;&lt;small&gt;Joe Caroselli; Izhak Shafran; Arun Narayanan; Richard Rose&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Attention-based LSTM with Multi-task Learning for Distant Speech Recognition&lt;br&gt;&lt;small&gt;Yu Zhang; Pengyuan Zhang; Yonghong Yan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - End-to-End Speech Recognition with Auditory Attention for Multi-Microphone Distance Speech Recognition&lt;br&gt;&lt;small&gt;Suyoun Kim; Ian Lane&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Factored deep convolutional neural networks for noise robust speech recognition&lt;br&gt;&lt;small&gt;Masakiyo Fujimoto&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Global SNR Estimation of Speech Signals for Unknown Noise Conditions using Noise Adapted Non-linear Regression&lt;br&gt;&lt;small&gt;Pavlos Papadopoulos; Ruchir Travadi; Shrikanth Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Improved Automatic Speech Recognition using Subband Temporal Envelope Features and Time-delay Neural Network Denoising Autoencoder&lt;br&gt;&lt;small&gt;Cong-Thanh Do; Yannis Stylianou&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Joint Training of Multi-channel-condition Dereverberation and Acoustic Modeling of Microphone Array Speech for Robust Distant Speech Recognition&lt;br&gt;&lt;small&gt;Fengpei Ge; Kehuang Li; Bo Wu; Sabato Marco Siniscalchi; Yonghong Yan; Chin-Hui Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Robust Speech Recognition Based on Binaural Auditory Processing&lt;br&gt;&lt;small&gt;Anjali Menon; Chanwoo Kim; Richard Stern&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - To Improve the Robustness of LSTM-RNN Acoustic Models Using Higher-order Feedback From Multiple Histories&lt;br&gt;&lt;small&gt;Hengguan Huang; Brian Mak&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Uncertainty decoding with adaptive sampling for noise robust DNN-based acoustic modeling&lt;br&gt;&lt;small&gt;Tien Dung Tran; Marc Delcroix; Atsunori Ogawa; Tomohiro Nakatani&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n            data-category-ids=\"1058\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Noise Robust and Far-field ASR<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4287\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Interspeech 2017 Computational Paralinguistics ChallengE (ComParE) 1\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"10\"\n            data-room-id=\"1060\"\n            data-room-name=\"E10\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Bj\u00f6rn Schuller; Anton Batliner&lt;br&gt;&lt;br&gt;10.00-10.15 - The INTERSPEECH 2017 Computational Paralinguistics Challenge: Addressee, Cold &amp; Snoring&lt;br&gt;&lt;small&gt;Bj\u00f6rn Schuller; Stefan Steidl; Anton Batliner; Elika Bergelson; Jarek Krajewski; Christoph Janott; Andrei Amatuni; Marisa  Casillas; Amanda Seidl; Melanie Soderstrom; Anne Warlaumont; Guillermo Hidalgo; Sebastian Schnieder; Clemens Heiser; Winfried Hohenhorst; Michael Herzog; Maximilian Schmitt; Kun Qian; Yue Zhang; George Trigeorgis; Panagiotis Tzirakis; Stefanos Zafeiriou&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.15-10.25 - Description of the UPPER RESPIRATORY TRACT INFECTION CORPUS (URTIC)&lt;br&gt;&lt;small&gt;Jarek Krajewski; Sebastian Schieder; Anton Batliner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.25-10.35 - Description of the Munich-Passau Snore Sound Corpus (MPSSC)&lt;br&gt;&lt;small&gt;Christoph Janott; Anton Batliner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.35-10.45 - Description of the HOMEBANK CHILD\/ADULT ADDRESSEE CORPUS (HB-CHAAC)&lt;br&gt;&lt;small&gt;Elika Bergelson; Andrei Amatuni; Marisa  Casillas; Amanda Seidl; Melanie Soderstorm; Anne Warlaumont  &lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.45-11.00 - It sounds like you have a cold! Testing voice features for the Interspeech 2017 Computational Paralinguistics Cold Challenge&lt;br&gt;&lt;small&gt;Mark Huckvale; Andr\u00e1s Beke&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.15 - End-to-End Deep Learning Framework for Speech Paralinguistics Detection Based on Perception Aware Spectrum&lt;br&gt;&lt;small&gt;Danwei Cai; Zhidong Ni; Wenbo Liu; Weicheng Cai; Gang Li; Ming Li&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.15-11.30 - Infected Phonemes: How a Cold Impairs Speech on a Phonetic Level&lt;br&gt;&lt;small&gt;Johannes Wagner; Thiago Fraga-Silva; Yvan Josse; Dominik Schiller; Andreas Seiderer; Elisabeth Andr\u00e9&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.30-11.45 - Phoneme state posteriorgram features for speech based automatic classification of speakers in cold and healthy conditions&lt;br&gt;&lt;small&gt;Akshay Kalkunte Suresh; Srinivasa Raghavan K M; Prasanta Ghosh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.45-12.00 - An Integrated Solution for Snoring Sound Classification Using Bhattacharyya Distance based GMM Supervectors with SVM, Feature Selection with Random Forest and Spectrogram with CNN&lt;br&gt;&lt;small&gt;Tin Lay Nwe; Tran Huy Dat; Ng Wen Zheng Terence; Bin Ma&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Paralinguistics in Speech and Language\"\n            data-category-ids=\"1052\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Interspeech 2017 Computational Paralinguistics ChallengE (ComParE) 1<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4282\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech Synthesis: Data, Evaluation, and Novel Paradigms\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"19\"\n            data-room-id=\"1068\"\n            data-room-name=\"Poster 4\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;S\u00e9bastien Le Maguer&lt;br&gt;&lt;br&gt;10.00-12.00 - A Neural Parametric Singing Synthesizer&lt;br&gt;&lt;small&gt;Merlijn Blaauw; Jordi Bonada&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - An Expanded Taxonomy of Semiotic Classes for Text Normalization&lt;br&gt;&lt;small&gt;Daan van Esch; Richard Sproat&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Bias and Statistical Significance in Evaluating Speech Synthesis with Mean Opinion Scores&lt;br&gt;&lt;small&gt;Andrew Rosenberg; Bhuvana Ramabhadran&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Complex-valued restricted Boltzmann machine for direct learning of frequency spectra&lt;br&gt;&lt;small&gt;Toru Nakashika; Shinji Takaki; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Evaluation of a Silent Speech Interface based on Magnetic Sensing and Deep Learning for a Phonetically Rich Vocabulary&lt;br&gt;&lt;small&gt;Jose A. Gonzalez; Lam A. Cheah; Phil D. Green; James M. Gilbert; Stephen R. Ell; Roger Moore; Ed Holdsworth&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Phase Modeling using Integrated Linear Prediction Residual for Statistical Parametric Speech Synthesis.&lt;br&gt;&lt;small&gt;Nagaraj Adiga; S R Mahadeva Prasanna&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Predicting Head Pose from Speech with a Conditional Variational Autoencoder&lt;br&gt;&lt;small&gt;David Greenwood; Stephen Laycock; Iain Matthews&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Principles for learning controllable TTS from annotated and latent variation&lt;br&gt;&lt;small&gt;Gustav Eje Henter; Jaime Lorenzo-Trueba; Xin Wang; Junichi Yamagishi&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Real-time reactive speech synthesis: incorporating interruptions&lt;br&gt;&lt;small&gt;Mirjam Wester; David Braude; Blaise Potard; Matthew Aylett; Francesca Shaw&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Sampling-based speech parameter generation using moment-matching networks&lt;br&gt;&lt;small&gt;Shinnosuke Takamichi; Tomoki Koriyama; Hiroshi Saruwatari&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Siri On-Device Deep Learning-Guided Unit Selection Text-to-Speech System&lt;br&gt;&lt;small&gt;Tim Capes; Paul Coles; Alistair Conkie; Ladan Golipour; Abie Hadjitarkhani; Qiong Hu; Nancy Huddleston; Melvyn Hunt; Jiangchuan Li; Matthias Neeracher; Kishore Prahallad; Tuomo Raitio; Ramya Rasipuram; Greg Townsend; Becci Williamson; David Winarsky; Zhizheng Wu; Hepeng Zhang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Tacotron: Towards End-To-End Speech Synthesis&lt;br&gt;&lt;small&gt;Yuxuan Wang; RJ Skerry-Ryan; Daisy Stanton; Yonghui Wu; Ron Weiss; Navdeep Jaitly; Zongheng Yang; Ying Xiao; Zhifeng Chen; Samy Bengio; Quoc Le; Yannis Agiomyrgiannakis; Rob Clark; Rif A. Saurous&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Unit selection with Hierarchical Cascaded Long Short Term Memory Bidirectional Recurrent Neural Nets&lt;br&gt;&lt;small&gt;Vincent Pollet; Enrico Zovato; Sufian Irhimeh; Pier Batzu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Utterance Selection for Optimizing Intelligibility of TTS Voices Trained on ASR Data&lt;br&gt;&lt;small&gt;Erica Cooper; Xinyue Wang; Alison Chang; Yocheved Levitan; Julia Hirschberg&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Synthesis and Spoken Language Generation\"\n            data-category-ids=\"1059\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech Synthesis: Data, Evaluation, and Novel Paradigms<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4275\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Discriminative Training for ASR\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Hagen Soltau; William  Hartmann&lt;br&gt;&lt;br&gt;10.00-10.20 - Multitask Learning with Low-Level Auxiliary Tasks for Encoder-Decoder Based Speech Recognition&lt;br&gt;&lt;small&gt;Shubham Toshniwal; Hao Tang; Liang Lu; Karen Livescu&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - Optimizing expected word error rate via sampling for speech recognition&lt;br&gt;&lt;small&gt;Matt Shannon&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - Annealed F-smoothing as a Mechanism to Speed up Neural Network Training&lt;br&gt;&lt;small&gt;Tara Sainath; Vijay Peddinti; Olivier Siohan; Arun Narayanan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Non-Uniform MCE Training of Deep Long Short-Term Memory Recurrent Neural Networks for Keyword Spotting&lt;br&gt;&lt;small&gt;Zhong Meng; Biing-Hwang (Fred) Juang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - Exploiting Eigenposteriors for Semi-supervised Training of DNN Acoustic Models with Sequence Discrimination&lt;br&gt;&lt;small&gt;Pranay Dighe; Afsaneh Asaei; Herve Bourlard&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Discriminative Autoencoders for Acoustic Modeling&lt;br&gt;&lt;small&gt;Ming-Han Yang; Hung-Shin Lee; Yu-Ding Lu; Kuan-Yu Chen; Yu Tsao; Berlin Chen; Hsin-Min Wang&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n            data-category-ids=\"1058\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Discriminative Training for ASR<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4288\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: State of the Art in Physics-based Voice Simulation\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"14\"\n            data-room-id=\"1059\"\n            data-room-name=\"F11\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Sten Ternstr\u00f6m; Oriol Guasch&lt;br&gt;&lt;br&gt;10.00-10.20 - Acoustic analysis of detailed three-dimensional shape of the human nasal cavity and paranasal sinuses&lt;br&gt;&lt;small&gt;Tatsuya Kitamura; Hironori Takemoto; Hisanori Makinae; Tetsutaro Yamaguchi; Kotaro Maki&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.20-10.40 - A semi-polar grid strategy for the three-dimensional finite element simulation of vowel-vowel sequences&lt;br&gt;&lt;small&gt;Marc Arnela; Saeed Dabbaghchian; Oriol Guasch; Olov Engwall&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.40-11.00 - A Fast Robust 1D Flow Model for a Self-Oscillating Coupled 2D FEM Vocal Fold Simulation&lt;br&gt;&lt;small&gt;Arvind Vasudevan; Victor Zappi; Peter Anderson; Sidney Fels&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.00-11.20 - Waveform patterns in pitch glides near a vocal tract resonance&lt;br&gt;&lt;small&gt;Tiina Murtola; Jarmo Malinen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.20-11.40 - A unified numerical simulation of vowel production that comprises phonation and the emitted sound&lt;br&gt;&lt;small&gt;Niyazi Cem Degirmenci; Johan Jansson; Johan Hoffman; Marc Arnela; Patricia Sanchez-Martin; Oriol Guasch; Pr. Sten Ternstr\u00f6m&lt;\/small&gt;&lt;br&gt;&lt;br&gt;11.40-12.00 - Synthesis of VV Utterances from Muscle Activation to Sound with a 3D Model&lt;br&gt;&lt;small&gt;Saeed Dabbaghchian; Marc Arnela; Olov Engwall; Oriol Guasch&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Synthesis and Spoken Language Generation\"\n            data-category-ids=\"1059\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: State of the Art in Physics-based Voice Simulation<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4281\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Styles, Varieties, Forensics and Tools\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"18\"\n            data-room-id=\"1069\"\n            data-room-name=\"Poster 3\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Kiyoko Yoneyama&lt;br&gt;&lt;br&gt;10.00-12.00 - Automatic Labelling of Prosodic Prominence, Phrasing and Disfluencies in French Speech by Simulating the Perception of Nai\u0308ve and Expert Listeners&lt;br&gt;&lt;small&gt;George Christodoulides; Mathieu Avanzi; Anne Catherine Simon&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Cross-linguistic Distinctions between Professional and Non-Professional Speaking Styles&lt;br&gt;&lt;small&gt;Plinio Barbosa; Sandra Madureira; Philippe Boula de Mare\u00fcil&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Developing an Embosi (Bantu C25) Speech Variant Dictionary to Model Vowel Elision and Morpheme Deletion&lt;br&gt;&lt;small&gt;Jamison Cooper-Leavitt; Lori Lamel; Annie Rialland; Martine Adda-Decker; Gilles Adda&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Don't Count on ASR to Transcribe for You: Breaking Bias with Two Crowds&lt;br&gt;&lt;small&gt;Michael Levit; Yan Huang; Shuangyu Chang; Yifan Gong&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Effect of Language, Speaking Style and Speaker on Long-term F0 Estimation&lt;br&gt;&lt;small&gt;Pablo Arantes; Anders Eriksson; Suska Gutzeit&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Effects of training data variety in generating glottal pulses from acoustic features with DNNs&lt;br&gt;&lt;small&gt;Manu Airaksinen; Paavo Alku&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Electrophysiological correlates of familiar voice recognition&lt;br&gt;&lt;small&gt;Julien Plante-Hebert; Victor Boucher; Boutheina Jemel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Glottal source estimation from coded telephone speech using a deep neural network&lt;br&gt;&lt;small&gt;Narendra N P; Manu Airaksinen; Paavo Alku&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Mapping across feature spaces in forensic voice comparison: the contribution of auditory-based voice quality to (semi-)automatic system testing&lt;br&gt;&lt;small&gt;Vincent Hughes; Philip Harrison; Paul Foulkes; Peter French; Colleen Kavanagh; Eugenia San Segundo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Perception and production of word-final \/\u0281\/ in broadcast and spontaneous French&lt;br&gt;&lt;small&gt;Cedric Gendrot&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Polyglot and Speech Corpus Tools: a system for representing, integrating, and querying speech corpora&lt;br&gt;&lt;small&gt;Michael McAuliffe; Elias Stengel-Eskin; Michaela Socolof; Morgan Sonderegger&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Rd as a control parameter to explore affective correlates of the tense-lax continuum&lt;br&gt;&lt;small&gt;Andy Murphy; Irena Yanushevskaya; Ailbhe N\u00ed Chasaide; Christer Gobl&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Stability of prosodic characteristics across age and gender groups&lt;br&gt;&lt;small&gt;Jan Vol\u00edn; Tereza Tykalova; Tom\u00e1\u0161 Bo\u0159il&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - The effects of real and placebo alcohol on deaffrication&lt;br&gt;&lt;small&gt;Urban Zihlmann&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Towards Intelligent Crowdsourcing for Audio Data Annotation: Integrating Active Learning in the Real World&lt;br&gt;&lt;small&gt;Simone Hantke; Zixing Zhang; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Phonetics, Phonology, and Prosody\"\n            data-category-ids=\"1056\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Styles, Varieties, Forensics and Tools<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4284\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Show &amp; Tell 7\"\n            data-abs-nbr=\"\"\n            data-ystart=\"10\"\n            data-yend=\"12\"\n            data-time=\"10:00-12:00\"\n            data-room=\"11\"\n            data-room-id=\"1070\"\n            data-room-name=\"E306\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"10.00-12.00 - Remote articulation test system based on WebRTC&lt;br&gt;&lt;small&gt;Ikuyo Masuda-Katsuse&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Soundtracing for realtime speech adjustment to environmental conditions in 3D simulations&lt;br&gt;&lt;small&gt;Szymon Pa\u0142ka; Tomasz P\u0119dzim\u0105\u017c; Bartosz Ziolko&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - The ModelTalker Project: A web-based voice banking pipeline for ALS\/MND patients&lt;br&gt;&lt;small&gt;H Timothy Bunnell; Jason Lilley; Kathleen McGrath&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Visible Vowels: a Tool for the Visualization of Vowel Variation&lt;br&gt;&lt;small&gt;Wilbert Heeringa; Hans Van de Velde&lt;\/small&gt;&lt;br&gt;&lt;br&gt;10.00-12.00 - Vocal-tract Model with Static Articulators: Lips, Teeth, Tongue, and More&lt;br&gt;&lt;small&gt;Takayuki Arai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Show & Tell\"\n            data-category-ids=\"1063\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Show & Tell 7<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  10:00-12:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#EEA2A2; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5254\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Lunch\"\n            data-abs-nbr=\"\"\n            data-ystart=\"12\"\n            data-yend=\"13.5\"\n            data-time=\"12:00-13:30\"\n            data-room=\"22\"\n            data-room-id=\"1079\"\n            data-room-name=\"Various locations\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Misc\"\n            data-category-ids=\"1068\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Lunch<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Various locations<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  12:00-13:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4285\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Open Doors Event\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13\"\n            data-yend=\"15.5\"\n            data-time=\"13:00-15:30\"\n            data-room=\"22\"\n            data-room-id=\"1079\"\n            data-room-name=\"Various locations\"\n            data-day=\"4\"\n            data-abs-path=\"\/abs\/4285.html\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;p&gt;&lt;strong&gt;Location:&lt;\/strong&gt;&amp;nbsp;Tobii Technology and Furhat Robotics&lt;\/p&gt;&lt;p&gt;ISCA-SAC is organising a company visit event on this year\u2019s INTERSPEECH. Students will be given the opportunity to visit the headquarters of Stockholm companies interested in speech communication. The companies will demonstrate their technologies and products and let students try out different equipment, following an open discussion and networking. This year the students will visit Furhat Robotics and Tobii Pro. The event will take place on Thursday, August 24 and aims to bring students and researchers together discussing potential collaboration or even possibly hiring opportunities.&lt;\/p&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Special event\"\n            data-category-ids=\"1064\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Open Doors Event<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:00-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4286\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Special Session: Interspeech 2017 Computational Paralinguistics ChallengE (ComParE) 2\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"10\"\n            data-room-id=\"1060\"\n            data-room-name=\"E10\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Bj\u00f6rn Schuller; Anton Batliner&lt;br&gt;&lt;br&gt;13.30-13.45 - A dual source-filter model of snore audio for snorer group classification&lt;br&gt;&lt;small&gt;Achuth Rao MV; Shivani Yadav; Prasanta Ghosh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.45-14.00 - An 'End-to-Evolution' Hybrid Approach for Snore Sound Classification&lt;br&gt;&lt;small&gt;Michael Freitag; Shahin Amiriparian; Nicholas Cummins; Maurice Gerczuk; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.00-14.15 - Snore Sound Classification Using Image-based Deep Spectrum Features&lt;br&gt;&lt;small&gt;Shahin Amiriparian; Maurice Gerczuk; Sandra Ottl; Nicholas Cummins; Michael Freitag; Sergey Pugachevskiy; Alice Baird; Bj\u00f6rn Schuller&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.15-14.30 - Exploring Fusion Methods and Feature Space for the Classification of Paralinguistic Information&lt;br&gt;&lt;small&gt;David Tavarez; Xabier Sarasola; Agustin Alonso; Jon Sanchez; Luis Serrano; Eva Navas; Inma Hern\u00e1ez&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.45 - DNN-based Feature Extraction and Classifier Combination for Child-Directed Speech, Cold and Snoring Identification&lt;br&gt;&lt;small&gt;G\u00e1bor Gosztolya; R\u00f3bert Busa-Fekete; Tam\u00e1s Gr\u00f3sz; L\u00e1szl\u00f3 T\u00f3th&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.45-15.00 - Introducing Weighted Kernel Classifiers for Handling Imbalanced Paralinguistic Corpora: Snoring, Addressee and Cold&lt;br&gt;&lt;small&gt;Heysem Kaya; Alexey Karpov&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.00-15.15 - The INTERSPEECH 2017 Computational Paralinguistics Challenge: A Summary of Results&lt;br&gt;&lt;small&gt;Stefan Steidl  &lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.15-15.30 - Discussion&lt;br&gt;&lt;small&gt;Bj\u00f6rn Schuller; Anton Batliner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Paralinguistics in Speech and Language\"\n            data-category-ids=\"1052\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Special Session: Interspeech 2017 Computational Paralinguistics ChallengE (ComParE) 2<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4283\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Show &amp; Tell 7\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"11\"\n            data-room-id=\"1070\"\n            data-room-name=\"E306\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"13.30-15.30 - Remote articulation test system based on WebRTC&lt;br&gt;&lt;small&gt;Ikuyo Masuda-Katsuse&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Soundtracing for realtime speech adjustment to environmental conditions in 3D simulations&lt;br&gt;&lt;small&gt;Szymon Pa\u0142ka; Tomasz P\u0119dzim\u0105\u017c; Bartosz Ziolko&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - The ModelTalker Project: A web-based voice banking pipeline for ALS\/MND patients&lt;br&gt;&lt;small&gt;H Timothy Bunnell; Jason Lilley; Kathleen McGrath&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Visible Vowels: a Tool for the Visualization of Vowel Variation&lt;br&gt;&lt;small&gt;Wilbert Heeringa; Hans Van de Velde&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.30-15.30 - Vocal-tract Model with Static Articulators: Lips, Teeth, Tongue, and More&lt;br&gt;&lt;small&gt;Takayuki Arai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Show & Tell\"\n            data-category-ids=\"1063\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Show & Tell 7<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n                                <div style=\"background-color:#EEA2A2; position: absolute !important ; height: 4px !important; left: 4px; right: 4px; bottom : 2px !important;  border\"><\/div>\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4273\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Multimodal Resources and Annotation\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"3\"\n            data-room-id=\"1065\"\n            data-room-name=\"B4\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Stephanie Strassel; Febe De Wet&lt;br&gt;&lt;br&gt;13.30-13.50 - CALYOU: A Comparable Spoken Algerian Corpus Harvested from YouTube&lt;br&gt;&lt;small&gt;Karima Abidi; Mohamed amine Menacer; Kamel Smaili&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - PRAV: A Phonetically Rich Audio Visual Corpus&lt;br&gt;&lt;small&gt;Abhishek Avinash Narwekar; Prasanta Ghosh&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - NTCD-TIMIT: A New Database and Baseline for Noise-robust Audio-visual Speech Recognition&lt;br&gt;&lt;small&gt;Ahmed Hussen Abdelaziz&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - The Extended SPaRKy Restaurant Corpus: designing a corpus with variable information density&lt;br&gt;&lt;small&gt;David M. Howcroft; Dietrich Klakow; Vera Demberg&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Automatic Construction of the Finnish Parliament Speech Corpus&lt;br&gt;&lt;small&gt;Andr\u00e9 Mansikkaniemi; Peter Smit; Mikko Kurimo&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Building audio-visual phonetically annotated Arabic corpus for expressive text to speech&lt;br&gt;&lt;small&gt;Omnia Abdo; Sherif Abdou; Mervat Fashal&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Spoken Language Processing: Translation, Information Retrieval, Summarization, Resources and Evaluation\"\n            data-category-ids=\"1053\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Multimodal Resources and Annotation<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4272\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Robust Speaker Recognition\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"0\"\n            data-room-id=\"1064\"\n            data-room-name=\"A2\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;John Hansen; Tomi Kinnunen&lt;br&gt;&lt;br&gt;13.30-13.50 - CNN-based joint mapping of short and long utterance i-vectors for speaker verification using short utterances&lt;br&gt;&lt;small&gt;Jinxi Guo; Usha Nookala; Abeer Alwan&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Curriculum Learning based Probabilistic Linear Discriminant Analysis for Noise Robust Speaker Recognition&lt;br&gt;&lt;small&gt;Shivesh Ranjan; Abhinav Misra; John H.L. Hansen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - I-vector Transformation Using a Novel Discriminative Denoising Autoencoder for Noise-robust Speaker Recognition&lt;br&gt;&lt;small&gt;Shivangi Mahto; Hitoshi Yamamoto; Takafumi Koshinaka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Unsupervised Discriminative Training of PLDA for Domain Adaptation in Speaker Verification&lt;br&gt;&lt;small&gt;Qiongqiong Wang; Takafumi Koshinaka&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Speaker Verification Under Adverse Conditions Using I-vector Adaptation and Neural Networks&lt;br&gt;&lt;small&gt;Md Jahangir Alam; Patrick Kenny; Gautam Bhattacharya; Marcel Kockmann&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Improving Robustness of Speaker Recognition to New Conditions Using Unlabeled Data&lt;br&gt;&lt;small&gt;Diego Castan; Mitchell McLaren; Luciana Ferrer; Aaron Lawson; Alicia Lozano-Diez&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speaker and Language Identification\"\n            data-category-ids=\"1054\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Robust Speaker Recognition<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4271\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Speech and Audio Segmentation and Classification 1\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"14\"\n            data-room-id=\"1059\"\n            data-room-name=\"F11\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Mahadeva Prasanna; Tomoki Toda&lt;br&gt;&lt;br&gt;13.30-13.50 - Occupancy Detection in Commercial and Residential Environments Using Audio Signal&lt;br&gt;&lt;small&gt;Shabnam Ghaffarzadegan; Attila Reiss; Mirko Ruhs; Robert Duerichen; Zhe Feng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Data Augmentation, Missing Feature Mask and Kernel Classification for Through-The-Wall Acoustic Surveillance&lt;br&gt;&lt;small&gt;Tran-Huy Dat; Wen Zheng Terence Ng; Yi Ren Leng&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Endpoint detection using grid long short-term memory network for streaming speech recognition&lt;br&gt;&lt;small&gt;Shuo-Yiin Chang; Bo Li; Tara Sainath; Gabor Simko; Carolina Parada&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Deep Learning Techniques in Tandem with Signal Processing Cues for Phonetic Segmentation for Text to Speech Synthesis in Indian Languages&lt;br&gt;&lt;small&gt;Arun Baby; Jeena Prakash; Rupak Vignesh; Hema Murthy&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - Gate Activation Signal Analysis for Gated Recurrent Neural Networks and Its Correlation with Phoneme Boundaries&lt;br&gt;&lt;small&gt;Yu-Hsuan Wang; Cheng-Tao Chung; Hung-yi Lee&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Speaker Change Detection in Broadcast TV using Bidirectional Long Short-Term Memory Networks&lt;br&gt;&lt;small&gt;Ruiqing Yin; Herv\u00e9 Bredin; Claude Barras&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Analysis of Speech and Audio Signals\"\n            data-category-ids=\"1062\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Speech and Audio Segmentation and Classification 1<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4274\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Forensic Phonetics and Sociophonetic Varieties\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"9\"\n            data-room-id=\"1062\"\n            data-room-name=\"D8\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Agustin Gravano; Melanie Weirich&lt;br&gt;&lt;br&gt;13.30-13.50 - What is the relevant population? Considerations for the computation of likelihood ratios in forensic voice comparison&lt;br&gt;&lt;small&gt;Vincent Hughes; Paul Foulkes&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Voice disguise vs. Impersonation: Acoustic and perceptual measurements of vocal flexibility in non experts&lt;br&gt;&lt;small&gt;Veronique Delvaux; Lise Caucheteux; Kathy Huet; Myriam Piccaluga; Bernard Harmegnies&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Schwa Realization in French: Using Automatic Speech Processing to Study Phonological and Socio-linguistic Factors in Large Corpora&lt;br&gt;&lt;small&gt;Yaru WU; Martine Adda-Decker; Cecile Fougeron; Lori Lamel&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - The Social Life of Tswana Ejectives&lt;br&gt;&lt;small&gt;Daniel Duran; Jagoda Bruni; Grzegorz Dogil; Justus Roux&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - How long is too long? How pause features after requests affect the perceived willingness of affirmative answers&lt;br&gt;&lt;small&gt;Lea S. Kohtz; Oliver Niebuhr&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Shadowing Synthesized Speech \u2013 Segmental Analysis of Phonetic Convergence&lt;br&gt;&lt;small&gt;Iona Gessinger; Eran Raveh; S\u00e9bastien Le Maguer; Bernd M\u00f6bius; Ingmar Steiner&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Phonetics, Phonology, and Prosody\"\n            data-category-ids=\"1056\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Forensic Phonetics and Sociophonetic Varieties<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"4270\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Neural Network Acoustic Models for ASR 3\"\n            data-abs-nbr=\"\"\n            data-ystart=\"13.5\"\n            data-yend=\"15.5\"\n            data-time=\"13:30-15:30\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"&lt;b&gt;Chairs: &lt;\/b&gt;Bhuvana Ramabhadran; Rohit Prabhavalkar&lt;br&gt;&lt;br&gt;13.30-13.50 - Deep Neural Factorization for Speech Recognition&lt;br&gt;&lt;small&gt;Jen-Tzung Chien; Chen Shen&lt;\/small&gt;&lt;br&gt;&lt;br&gt;13.50-14.10 - Semi-supervised DNN training with word selection for ASR&lt;br&gt;&lt;small&gt;Karel Vesely; Lukas Burget; Jan \u010cernock\u00fd&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.10-14.30 - Gaussian Prediction based Attention for Online End-to-End Speech Recognition&lt;br&gt;&lt;small&gt;Junfeng Hou; ShiLiang Zhang; Lirong Dai&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.30-14.50 - Efficient knowledge distillation from an ensemble of teachers&lt;br&gt;&lt;small&gt;Takashi Fukuda; Masayuki Suzuki; Gakuto Kurata; Samuel Thomas; Jia Cui; Bhuvana Ramabhadran&lt;\/small&gt;&lt;br&gt;&lt;br&gt;14.50-15.10 - An Analysis of &quot;Attention&quot; in Sequence-to-Sequence Models&lt;br&gt;&lt;small&gt;Rohit Prabhavalkar; Tara Sainath; Bo Li; Kanishka Rao; Navdeep Jaitly&lt;\/small&gt;&lt;br&gt;&lt;br&gt;15.10-15.30 - Neural Speech Recognizer: Acoustic-to-Word LSTM Model for Large Vocabulary Speech Recognition&lt;br&gt;&lt;small&gt;Hagen Soltau; Hank Liao; Hasim Sak&lt;\/small&gt;&lt;br&gt;&lt;br&gt;\"\n            data-category=\"Speech Recognition: Signal Processing, Acoustic Modeling, Robustness, Adaptation\"\n            data-category-ids=\"1058\"\n            data-span-all=\"\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Neural Network Acoustic Models for ASR 3<\/span>\n              <\/div>\n              <div class=\"span_room\"><span><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  13:30-15:30                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5260\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Refreshments\"\n            data-abs-nbr=\"\"\n            data-ystart=\"15.5\"\n            data-yend=\"16\"\n            data-time=\"15:30-16:00\"\n            data-room=\"22\"\n            data-room-id=\"1079\"\n            data-room-name=\"Various locations\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\"\n            data-category=\"Misc\"\n            data-category-ids=\"1068\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Refreshments<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Various locations<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  15:30-16:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n          <div class=\"event \"\n            data-id=\"5263\"\n            data-project=\"project_242_2017_01_12\"\n            data-title=\"Closing session\"\n            data-abs-nbr=\"\"\n            data-ystart=\"16\"\n            data-yend=\"17\"\n            data-time=\"16:00-17:00\"\n            data-room=\"1\"\n            data-room-id=\"1063\"\n            data-room-name=\"Aula Magna\"\n            data-day=\"4\"\n            data-abs-path=\"\"\n            data-speaker=\"\"\n            data-speakercell=\"\"\n            data-info=\"\n                 &lt;p&gt;The session will also be broadcasted (with two-way communication) to rooms A2 and C6.&lt;br&gt;&lt;\/p&gt;\n                &lt;br&gt;&lt;br&gt;\"\n            data-category=\"Misc\"\n            data-category-ids=\"1068\"\n            data-span-all=\"1\"\n                      >\n\n            <div class=\"event_content\">\n              <div class=\"title\">\n                                <span>Closing session<\/span>\n              <\/div>\n              <div class=\"span_room\"><span>Aula Magna<br\/><\/span><\/div>\n              <div class=\"time\">\n                <span>\n                  16:00-17:00                <\/span><\/div>\n\n              \n            <\/div>\n\n\n            \n          <\/div>\n\n      \n      <div id=\"eventInfo\" class=\"modal fade bs-example-modal-lg\" tabindex=\"-1\" role=\"dialog\" style=\"font-size: medium; line-height: normal; text-transform: none;\">\n        <div class=\"modal-dialog\">\n          <div class=\"modal-content\">\n            <div class=\"modal-header\">\n              <button type=\"button\" class=\"close\" data-dismiss=\"modal\" aria-label=\"Close\">\n                <span aria-hidden=\"true\">&times;<\/span><\/button>\n              <h4 class=\"modal-title\"><\/h4>\n            <\/div>\n            <div class=\"modal-body\">\n              <p class=\"modal-speaker\"><\/p>\n              <p class=\"modal-time\"><\/p>\n              <p class=\"modal-info\"><\/p>\n              <p id=\"modal-abs\"><\/p>\n              <p id=\"modal-abs-oldie\">\n                <span style=\"display: none;\">Du anv\u00e4nder en f\u00f6r\u00e5ldrar webbl\u00e4sare.<\/span>\n                <a id=\"abs-url-oldie\" target=\"_blank\" style=\"display: none;\">Klicka f\u00e4r f\u00f6r att l\u00e4sa bidraget<\/a>\n                <iframe id=\"abs-iframe-oldie\" width=\"100%\" height=\"300px\"><\/iframe>\n              <\/p>\n              <i id=\"modal-abs-spinner\" class=\"fa fa-spinner fa-spin\" aria-hidden=\"true\" style=\"display: none;\"><\/i>\n            <\/div>\n            <div class=\"modal-footer\">\n              <span style=\"text-align: left; float: left;\" class=\"modal-category\"><\/span>\n              <button type=\"button\" class=\"btn btn-default\" data-dismiss=\"modal\">Close<\/button>\n            <\/div>\n          <\/div><!-- \/.modal-content -->\n        <\/div><!-- \/.modal-dialog -->\n      <\/div><!-- \/.modal -->\n\n    <\/div>\n\n  <\/div>\n\n\n","protected":false},"excerpt":{"rendered":"","protected":false},"author":1,"featured_media":0,"parent":0,"menu_order":0,"comment_status":"closed","ping_status":"closed","template":"page-template-blank.php","meta":{"_et_pb_use_builder":"","_et_pb_old_content":"","_et_gb_content_width":""},"_links":{"self":[{"href":"https:\/\/program.appinconf.com\/program\/wp-json\/wp\/v2\/pages\/206"}],"collection":[{"href":"https:\/\/program.appinconf.com\/program\/wp-json\/wp\/v2\/pages"}],"about":[{"href":"https:\/\/program.appinconf.com\/program\/wp-json\/wp\/v2\/types\/page"}],"author":[{"embeddable":true,"href":"https:\/\/program.appinconf.com\/program\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/program.appinconf.com\/program\/wp-json\/wp\/v2\/comments?post=206"}],"version-history":[{"count":6,"href":"https:\/\/program.appinconf.com\/program\/wp-json\/wp\/v2\/pages\/206\/revisions"}],"predecessor-version":[{"id":239,"href":"https:\/\/program.appinconf.com\/program\/wp-json\/wp\/v2\/pages\/206\/revisions\/239"}],"wp:attachment":[{"href":"https:\/\/program.appinconf.com\/program\/wp-json\/wp\/v2\/media?parent=206"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}