{"id":8910,"date":"2022-12-08T14:58:46","date_gmt":"2022-12-08T06:58:46","guid":{"rendered":"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/%e6%9b%b9%e6%98%b1-yu-tsao-2\/"},"modified":"2023-07-25T11:36:32","modified_gmt":"2023-07-25T03:36:32","slug":"%e6%9b%b9%e6%98%b1-yu-tsao","status":"publish","type":"portfolio","link":"https:\/\/eeweb.cycu.edu.tw\/en\/portfolio-item\/%e6%9b%b9%e6%98%b1-yu-tsao\/","title":{"rendered":"\u66f9\u6631 Yu Tsao"},"content":{"rendered":"\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-1vozkxo-a480e893c9f0c85e8839d82a46071de6\">\n.avia-section.av-1vozkxo-a480e893c9f0c85e8839d82a46071de6{\nbackground-repeat:no-repeat;\nbackground-image:url(https:\/\/eeweb.cycu.edu.tw\/wp-content\/uploads\/2022\/11\/member_bn3.jpg);\nbackground-position:50% 100%;\nbackground-attachment:fixed;\n}\n.avia-section.av-1vozkxo-a480e893c9f0c85e8839d82a46071de6 .av-section-color-overlay{\nopacity:0.1;\nbackground-color:#0a0a0a;\n}\n<\/style>\n<div id='av_section_1'  class='avia-section av-1vozkxo-a480e893c9f0c85e8839d82a46071de6 main_color avia-section-no-padding avia-no-border-styling  avia-builder-el-0  el_before_av_section  avia-builder-el-first  avia-full-stretch avia-bg-style-fixed av-section-color-overlay-active  container_wrap sidebar_right'  data-section-bg-repeat='stretch'><div class='av-section-color-overlay-wrap'><div class=\"av-section-color-overlay\"><\/div><div class='container av-section-cont-open' ><main  role=\"main\" itemprop=\"mainContentOfPage\"  class='template-page content  av-content-small alpha units'><div class='post-entry post-entry-type-page post-entry-8910'><div class='entry-content-wrapper clearfix'>\n\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-1tvurcs-f2e9e6dd7d866fd357b2009e034dc7cc\">\n#top .hr.hr-invisible.av-1tvurcs-f2e9e6dd7d866fd357b2009e034dc7cc{\nheight:130px;\n}\n<\/style>\n<div  class='hr av-1tvurcs-f2e9e6dd7d866fd357b2009e034dc7cc hr-invisible  avia-builder-el-1  el_before_av_hr  avia-builder-el-first   av-medium-hide av-small-hide av-mini-hide'><span class='hr-inner '><span class=\"hr-inner-style\"><\/span><\/span><\/div>\n\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-6dipb0-99187fc870060eac60e7a086196eb8d4\">\n#top .hr.hr-invisible.av-6dipb0-99187fc870060eac60e7a086196eb8d4{\nheight:80px;\n}\n<\/style>\n<div  class='hr av-6dipb0-99187fc870060eac60e7a086196eb8d4 hr-invisible  avia-builder-el-2  el_after_av_hr  el_before_av_one_fourth   av-desktop-hide'><span class='hr-inner '><span class=\"hr-inner-style\"><\/span><\/span><\/div>\n<div class='flex_column_table av-1pkaep8-148049f6706246a6e403b25299a3a3ca sc-av_one_fourth av-equal-height-column-flextable'><div class='flex_column av-1pkaep8-148049f6706246a6e403b25299a3a3ca av_one_fourth  avia-builder-el-3  el_after_av_hr  el_before_av_one_half  first flex_column_table_cell av-equal-height-column av-align-top '   ><\/div><div class='av-flex-placeholder'><\/div><div class='flex_column av-5za88c-b3fb53108d3afcf68d8540185510cdff av_one_half  avia-builder-el-4  el_after_av_one_fourth  el_before_av_one_fourth  flex_column_table_cell av-equal-height-column av-align-top  av-animated-generic bottom-to-top av-zero-column-padding '   ><style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-1mo6zak-c44b7e92729d3940a4c9af7c4f95aeae\">\n#top .av-special-heading.av-1mo6zak-c44b7e92729d3940a4c9af7c4f95aeae{\nmargin:0px 0px 0px 0px;\npadding-bottom:10px;\ncolor:#ffffff;\n}\n.av-special-heading.av-1mo6zak-c44b7e92729d3940a4c9af7c4f95aeae .av-special-heading-tag{\npadding:0px 0px 0px 0px;\n}\n.av-special-heading.av-1mo6zak-c44b7e92729d3940a4c9af7c4f95aeae .special-heading-inner-border{\nborder-color:#ffffff;\n}\n.av-special-heading.av-1mo6zak-c44b7e92729d3940a4c9af7c4f95aeae .av-subheading{\nfont-size:15px;\ncolor:#ffffff;\n}\nbody .av-special-heading.av-1mo6zak-c44b7e92729d3940a4c9af7c4f95aeae .av-special-heading-tag .heading-wrap:before{\nborder-color:#ffffff;\n}\nbody .av-special-heading.av-1mo6zak-c44b7e92729d3940a4c9af7c4f95aeae .av-special-heading-tag .heading-wrap:after{\nborder-color:#ffffff;\n}\n<\/style>\n<div  class='av-special-heading av-1mo6zak-c44b7e92729d3940a4c9af7c4f95aeae av-special-heading-h1 custom-color-heading blockquote elegant-quote elegant-centered  avia-builder-el-5  avia-builder-el-no-sibling  '><div class='av_custom_color av-subheading av-subheading_above '><p>Faculty<\/p>\n<\/div><h1 class='av-special-heading-tag '  itemprop=\"headline\"  ><span class=\"heading-wrap\">Full-Time Faculty<\/span><\/h1><div class='special-heading-border'><div class='special-heading-inner-border'><\/div><\/div><\/div><\/div><div class='av-flex-placeholder'><\/div><div class='flex_column av-1kjfk9o-045acd399a77db3c02a3ed63156b666d av_one_fourth  avia-builder-el-6  el_after_av_one_half  el_before_av_hr  flex_column_table_cell av-equal-height-column av-align-top '   ><\/div><\/div><!--close column table wrapper. Autoclose: 1 -->\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-1k4gaos-b24e9ae481645b7b57e4fbb5ea1ab3af\">\n#top .hr.hr-invisible.av-1k4gaos-b24e9ae481645b7b57e4fbb5ea1ab3af{\nheight:90px;\n}\n<\/style>\n<div  class='hr av-1k4gaos-b24e9ae481645b7b57e4fbb5ea1ab3af hr-invisible  avia-builder-el-7  el_after_av_one_fourth  el_before_av_hr   av-medium-hide av-small-hide av-mini-hide'><span class='hr-inner '><span class=\"hr-inner-style\"><\/span><\/span><\/div>\n\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-1i9je0c-8cc193b442e815fbac246f862b240f29\">\n#top .hr.hr-invisible.av-1i9je0c-8cc193b442e815fbac246f862b240f29{\nheight:40px;\n}\n<\/style>\n<div  class='hr av-1i9je0c-8cc193b442e815fbac246f862b240f29 hr-invisible  avia-builder-el-8  el_after_av_hr  el_before_av_one_full   av-desktop-hide'><span class='hr-inner '><span class=\"hr-inner-style\"><\/span><\/span><\/div>\n<div class='flex_column av-1fo5c24-327ddfe951b1736a7edc4bb40d1f0ab3 av_one_full  avia-builder-el-9  el_after_av_hr  avia-builder-el-last  first flex_column_div '   ><section class=\"av_textblock_section  av-medium-hide av-small-hide av-mini-hide\"  itemscope=\"itemscope\" itemtype=\"https:\/\/schema.org\/CreativeWork\" ><div class='avia_textblock  av_inherit_color  av-medium-font-size-overwrite av-medium-font-size-30 av-small-font-size-overwrite av-small-font-size-25 av-mini-font-size-overwrite av-mini-font-size-18'  style='font-size:40px; color:#faeddc; '  itemprop=\"text\" ><p style=\"text-align: center; opacity: 0.3; font-size: 54px; margin-bottom: -15px;\"><strong>The Electrical Engineering Department<\/strong><\/p>\n<\/div><\/section><br \/>\n<section class=\"av_textblock_section  av-desktop-hide\"  itemscope=\"itemscope\" itemtype=\"https:\/\/schema.org\/CreativeWork\" ><div class='avia_textblock  av_inherit_color  av-medium-font-size-overwrite av-medium-font-size-30 av-small-font-size-overwrite av-small-font-size-25 av-mini-font-size-overwrite av-mini-font-size-18'  style='font-size:40px; color:#ffffff; '  itemprop=\"text\" ><p style=\"opacity: 0.3; font-size: 30px; text-align: left; line-height: 1.2em;\"><strong>The<br \/>\nElectrical <\/strong><strong>Engineering<br \/>\n<\/strong><strong>Department<\/strong><\/p>\n<\/div><\/section><\/p><\/div>\n\n<\/div><\/div><\/main><!-- close content main element --><\/div><\/div><\/div><div id='av_section_2'  class='avia-section av-1be5s5o-4682d0e87b313525ffcbbb90285b44f4 alternate_color avia-section-default avia-no-border-styling  avia-builder-el-12  el_after_av_section  el_before_av_section  avia-bg-style-scroll  container_wrap sidebar_right'  ><div class='container av-section-cont-open' ><div class='template-page content  av-content-small alpha units'><div class='post-entry post-entry-type-page post-entry-8910'><div class='entry-content-wrapper clearfix'>\n<div class='flex_column_table av-19rnq98-52a76e537ea4324937ce259a47420d41 sc-av_one_fourth av-equal-height-column-flextable'>\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-19rnq98-52a76e537ea4324937ce259a47420d41\">\n.flex_column.av-19rnq98-52a76e537ea4324937ce259a47420d41{\npadding:30px 30px 30px 30px;\nbackground-color:#e56612;\n}\n<\/style>\n<div class='flex_column av-19rnq98-52a76e537ea4324937ce259a47420d41 av_one_fourth  avia-builder-el-13  el_before_av_three_fourth  avia-builder-el-first  first no_margin flex_column_table_cell av-equal-height-column av-align-top '   ><style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-17vd3gc-94664d58878f17e096bdd4a2641de09e\">\n.avia-image-container.av-17vd3gc-94664d58878f17e096bdd4a2641de09e .av-image-caption-overlay-center{\ncolor:#ffffff;\n}\n<\/style>\n<div  class='avia-image-container av-17vd3gc-94664d58878f17e096bdd4a2641de09e av-styling-no-styling avia-align-center  avia-builder-el-14  avia-builder-el-no-sibling  '  itemprop=\"image\" itemscope=\"itemscope\" itemtype=\"https:\/\/schema.org\/ImageObject\" ><div class=\"avia-image-container-inner\"><div class=\"avia-image-overlay-wrap\"><img decoding=\"async\" class='wp-image- avia-img-lazy-loading-not- avia_image' src=\"https:\/\/eeweb.cycu.edu.tw\/wp-content\/uploads\/2022\/12\/\u66f9\u6631\u8001\u5e2b.png\" alt='' title=''   itemprop=\"thumbnailUrl\"  \/><\/div><\/div><\/div><\/div>\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-168ejf0-ade49c752a170df8c76fd18b341d6466\">\n.flex_column.av-168ejf0-ade49c752a170df8c76fd18b341d6466{\npadding:30px 30px 30px 30px;\nbackground-color:#ffffff;\n}\n<\/style>\n<div class='flex_column av-168ejf0-ade49c752a170df8c76fd18b341d6466 av_three_fourth  avia-builder-el-15  el_after_av_one_fourth  avia-builder-el-last  no_margin flex_column_table_cell av-equal-height-column av-align-top '   ><p>\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-lbkg1cg9-3363bb4ed85e2e6569be365bd4c57dc9\">\n#top .av-special-heading.av-lbkg1cg9-3363bb4ed85e2e6569be365bd4c57dc9{\nmargin:0px 0px 0px 0px;\npadding-bottom:0px;\n}\n.av-special-heading.av-lbkg1cg9-3363bb4ed85e2e6569be365bd4c57dc9 .av-special-heading-tag{\npadding:0px 0px 0px 0px;\n}\n<\/style>\n<div  class='av-special-heading av-lbkg1cg9-3363bb4ed85e2e6569be365bd4c57dc9 av-special-heading-h2 blockquote modern-quote  avia-builder-el-16  el_before_av_textblock  avia-builder-el-first  linkline strong av-thin-font '><h2 class='av-special-heading-tag '  itemprop=\"headline\"  ><strong>\u66f9\u6631<\/strong> <span style=\"font-size: 18px;\">Yu Tsao<\/span><\/h2><div class='special-heading-border'><div class='special-heading-inner-border'><\/div><\/div><\/div><br \/>\n<section class=\"av_textblock_section \"  itemscope=\"itemscope\" itemtype=\"https:\/\/schema.org\/CreativeWork\" ><div class='avia_textblock  '   itemprop=\"text\" ><p><span class=\"redDark15Br\">Joint Appointment Professor (Academia Sinica) (Communications &amp; Control Systems Group &amp; Machine Learning Group)<\/span><\/p>\n<\/div><\/section><br \/>\n\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-158no98-c1eecabe219577b64fba3055dbbe8a20\">\n#top .hr.av-158no98-c1eecabe219577b64fba3055dbbe8a20{\nmargin-top:0px;\nmargin-bottom:10px;\n}\n.hr.av-158no98-c1eecabe219577b64fba3055dbbe8a20 .hr-inner{\nwidth:100%;\nborder-color:#d6d6d6;\n}\n<\/style>\n<div  class='hr av-158no98-c1eecabe219577b64fba3055dbbe8a20 hr-custom  avia-builder-el-18  el_after_av_textblock  el_before_av_textblock  hr-center hr-icon-no '><span class='hr-inner inner-border-av-border-thin'><span class=\"hr-inner-style\"><\/span><\/span><\/div><br \/>\n<section class=\"av_textblock_section \"  itemscope=\"itemscope\" itemtype=\"https:\/\/schema.org\/CreativeWork\" ><div class='avia_textblock  '   itemprop=\"text\" ><p>Email\uff1a <a href=\"mailto:yu.tsao@citi.sinica.edu.tw\">yu.tsao@citi.sinica.edu.tw<\/a><br \/>\nSpecialty\uff1a Acoustics Assistive Technology\u3001Speech Signal Processing\u3001Biomedical Signal Processing\u3001Deeping Learning\u3001Acoustic Model<br \/>\nTel\uff1a 886-2-2787-2300 #2787<br \/>\nLaboratory\uff1a Research Center for Information Technology Innovation<\/p>\n<\/div><\/section><br \/>\n\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-3qbpe4-585c5ca08ae8b21690d7b86f2b29f90c\">\n#top .hr.hr-invisible.av-3qbpe4-585c5ca08ae8b21690d7b86f2b29f90c{\nheight:10px;\n}\n<\/style>\n<div  class='hr av-3qbpe4-585c5ca08ae8b21690d7b86f2b29f90c hr-invisible  avia-builder-el-20  el_after_av_textblock  el_before_av_buttonrow  '><span class='hr-inner '><span class=\"hr-inner-style\"><\/span><\/span><\/div><br \/>\n<div  class='avia-buttonrow-wrap av-ywg0d8-7954bcbe320443bf8f83a91ac02488c5 avia-buttonrow-left  avia-builder-el-21  el_after_av_hr  avia-builder-el-last  '>\n\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-386ml8-8e3109d17d0c68c527ecb23c8f58ef6c\">\n#top #wrap_all .avia-button.av-386ml8-8e3109d17d0c68c527ecb23c8f58ef6c{\nmargin-bottom:5px;\nmargin-right:5px;\n}\n<\/style>\n<a href='https:\/\/www.citi.sinica.edu.tw\/pages\/yu.tsao\/contact_zh.html' class='avia-button av-386ml8-8e3109d17d0c68c527ecb23c8f58ef6c avia-icon_select-yes-left-icon avia-size-medium av-icon-on-hover avia-color-theme-color-highlight' target=\"_blank\" rel=\"noopener noreferrer\"><span class='avia_button_icon avia_button_icon_left ' aria-hidden='true' data-av_icon='\ue88d' data-av_iconfont='entypo-fontello'><\/span><span class='avia_iconbox_title' >Website<\/span><\/a>\n<\/div><\/p><\/div><\/div><!--close column table wrapper. Autoclose: 1 -->\n\n<\/div><\/div><\/div><!-- close content main div --><\/div><\/div><div id='av_section_3'  class='avia-section av-u9nmyk-371a6d973ede05fb69561adf76fddf0e main_color avia-section-default avia-no-border-styling  avia-builder-el-22  el_after_av_section  avia-builder-el-last  avia-bg-style-scroll  container_wrap sidebar_right'  ><div class='container av-section-cont-open' ><div class='template-page content  av-content-small alpha units'><div class='post-entry post-entry-type-page post-entry-8910'><div class='entry-content-wrapper clearfix'>\n<div class='flex_column av-tnto7g-50a43de890a75522aeacb02d505b7d9f av_one_full  avia-builder-el-23  el_before_av_one_full  avia-builder-el-first  first flex_column_div '   ><style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-qu0jrg-d8f427a990452a80a7a703e38f9ffa53\">\n#top .av-special-heading.av-qu0jrg-d8f427a990452a80a7a703e38f9ffa53{\nmargin:0px 0px 0px 0px;\npadding-bottom:0px;\n}\n.av-special-heading.av-qu0jrg-d8f427a990452a80a7a703e38f9ffa53 .av-special-heading-tag{\npadding:0px 0px 0px 0px;\n}\n<\/style>\n<div  class='av-special-heading av-qu0jrg-d8f427a990452a80a7a703e38f9ffa53 av-special-heading-h2 blockquote modern-quote  avia-builder-el-24  avia-builder-el-no-sibling  linkline strong av-thin-font '><h2 class='av-special-heading-tag '  itemprop=\"headline\"  ><strong>Details<\/strong><\/h2><div class='special-heading-border'><div class='special-heading-inner-border'><\/div><\/div><\/div><\/div>\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-936ak-e310e5b6e1b2ea804204976590cef35e\">\n.flex_column.av-936ak-e310e5b6e1b2ea804204976590cef35e{\nborder-width:1px;\nborder-style:solid;\npadding:30px 30px 15px 30px;\nbackground-color:#ffffff;\n}\n<\/style>\n<div class='flex_column av-936ak-e310e5b6e1b2ea804204976590cef35e av_one_full  avia-builder-el-25  el_after_av_one_full  el_before_av_one_full  box_bl first flex_column_div column-top-margin'   ><p>\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-nym74s-574ae00754cebdb013d8947f655f24fc\">\n#top .av-special-heading.av-nym74s-574ae00754cebdb013d8947f655f24fc{\npadding-bottom:10px;\n}\n<\/style>\n<div  class='av-special-heading av-nym74s-574ae00754cebdb013d8947f655f24fc av-special-heading-h3 blockquote modern-quote  avia-builder-el-26  el_before_av_textblock  avia-builder-el-first  '><h3 class='av-special-heading-tag '  itemprop=\"headline\"  >Education :<\/h3><div class='special-heading-border'><div class='special-heading-inner-border'><\/div><\/div><\/div><br \/>\n<section class=\"av_textblock_section \"  itemscope=\"itemscope\" itemtype=\"https:\/\/schema.org\/CreativeWork\" ><div class='avia_textblock  '   itemprop=\"text\" ><ul>\n<li>\u535a\u58eb, \u96fb\u6a5f\u96fb\u8166\u5de5\u7a0b\u5b78\u7cfb, \u55ac\u6cbb\u4e9e\u7406\u5de5\u5b78\u9662, \u7f8e\u570b (2003\/8\u20132008\/12)<br \/>\nPhD Electrical and Computer Engineering, Georgia Institute of Technology, USA (2003\/8\u20132008\/12)<\/li>\n<li>\u78a9\u58eb, \u96fb\u6a5f\u5de5\u7a0b\u5b78\u7cfb, \u570b\u7acb\u53f0\u7063\u5927\u5b78, \u4e2d\u83ef\u6c11\u570b (1999\/8\u20132001\/6)<br \/>\nMSc Electrical Engineering, National Taiwan University (1999\/8\u20132001\/6)<\/li>\n<li>\u5927\u5b78, \u96fb\u6a5f\u5de5\u7a0b\u5b78\u7cfb, \u570b\u7acb\u53f0\u7063\u5927\u5b78, \u4e2d\u83ef\u6c11\u570b (1995\/8\u20131999\/6)<br \/>\nBSc Electrical Engineering, National Taiwan University (1995\/8\u20131999\/6)<\/li>\n<\/ul>\n<\/div><\/section><\/p><\/div>\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-k5jkj0-e9e79e8c01bc7607e24a3ab33243e142\">\n.flex_column.av-k5jkj0-e9e79e8c01bc7607e24a3ab33243e142{\nborder-width:1px;\nborder-style:solid;\npadding:30px 30px 15px 30px;\nbackground-color:#ffffff;\n}\n<\/style>\n<div class='flex_column av-k5jkj0-e9e79e8c01bc7607e24a3ab33243e142 av_one_full  avia-builder-el-28  el_after_av_one_full  el_before_av_one_full  box_bl first flex_column_div column-top-margin'   ><p>\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-jh3xv0-6448ad2529dcb50546427c8b19033633\">\n#top .av-special-heading.av-jh3xv0-6448ad2529dcb50546427c8b19033633{\npadding-bottom:10px;\n}\n<\/style>\n<div  class='av-special-heading av-jh3xv0-6448ad2529dcb50546427c8b19033633 av-special-heading-h3 blockquote modern-quote  avia-builder-el-29  el_before_av_textblock  avia-builder-el-first  '><h3 class='av-special-heading-tag '  itemprop=\"headline\"  >Experience\uff1a<\/h3><div class='special-heading-border'><div class='special-heading-inner-border'><\/div><\/div><\/div><br \/>\n<section class=\"av_textblock_section \"  itemscope=\"itemscope\" itemtype=\"https:\/\/schema.org\/CreativeWork\" ><div class='avia_textblock  '   itemprop=\"text\" ><p>Current Position :<\/p>\n<ul>\n<li>Research Fellow (Professor) and Deputy Director with the Research Center for Information Technology Innovation, Republic of China<\/li>\n<\/ul>\n<p>Work Experience\u00a0:<\/p>\n<ul>\n<li>Chair , APSIPA, Speech, Language, and Audio (SLA) Technical Committee, (2021\/1\u20132021\/12)<\/li>\n<li>Research Fellow (Professor) and Deputy Director with the Research Center for Information Technology Innovation, Republic of China (2020\/9\u2013present)<\/li>\n<li>Deputy Director, Academia Sinica, Research Center for Information Technology Innovation, Republic of China (2020\/6\u2013present)<\/li>\n<li>Executive Director, Center for AI Innovation and Application, Academia Sinica, Republic of China (2020\/1\u20132020\/8)<\/li>\n<li>Associate Research Fellow (Associate Professor), Academia Sinica, Research Center for Information Technology Innovation, Republic of China (2016\/5\u20132020\/8)<\/li>\n<li>Assistant Research Fellow (Assistant Professor)Research Center for Information Technology Innovation, Republic of China (2011\/11\u20132016\/4)<\/li>\n<li>Expert Researcher, National Institute of Information and Communications Technology, Spoken Language Communication Group, \u65e5\u672c (2009\/4\u20132011\/9)<\/li>\n<li>Postdoc Researcher, Georgia Institute of Technology, Electrical and Computer Engineering, United States (2009\/1\u20132009\/3)<\/li>\n<\/ul>\n<\/div><\/section><\/p><\/div>\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-gi14po-b1febea8664a992b86320676e355966f\">\n.flex_column.av-gi14po-b1febea8664a992b86320676e355966f{\nborder-width:1px;\nborder-style:solid;\npadding:30px 30px 15px 30px;\nbackground-color:#ffffff;\n}\n<\/style>\n<div class='flex_column av-gi14po-b1febea8664a992b86320676e355966f av_one_full  avia-builder-el-31  el_after_av_one_full  el_before_av_one_full  box_or first flex_column_div column-top-margin'   ><p>\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-do75ek-c5e2ac85d636c4a519ee9fa2310114f0\">\n#top .av-special-heading.av-do75ek-c5e2ac85d636c4a519ee9fa2310114f0{\npadding-bottom:10px;\n}\n<\/style>\n<div  class='av-special-heading av-do75ek-c5e2ac85d636c4a519ee9fa2310114f0 av-special-heading-h3 blockquote modern-quote  avia-builder-el-32  el_before_av_textblock  avia-builder-el-first  '><h3 class='av-special-heading-tag '  itemprop=\"headline\"  >Field of Investigation\uff1a<\/h3><div class='special-heading-border'><div class='special-heading-inner-border'><\/div><\/div><\/div><br \/>\n<section class=\"av_textblock_section \"  itemscope=\"itemscope\" itemtype=\"https:\/\/schema.org\/CreativeWork\" ><div class='avia_textblock  '   itemprop=\"text\" ><ul>\n<li>\u8072\u5b78\u8f14\u52a9\u79d1\u6280(Acoustics Assistive Technology)<\/li>\n<li>\u8a9e\u97f3\u8a0a\u865f\u8655\u7406(Speech Signal Processing)<\/li>\n<li>\u751f\u7406\u8a0a\u865f\u8655\u7406(Biomedical Signal Processing)<\/li>\n<li>\u6df1\u5ea6\u5b78\u7fd2(Deeping Learning)<\/li>\n<li>\u8072\u5b78\u6a21\u578b(Acoustic Model\uff09<\/li>\n<\/ul>\n<\/div><\/section><\/p><\/div>\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-5wh9z0-116fdf5e2c203234083bf107b2faaa78\">\n.flex_column.av-5wh9z0-116fdf5e2c203234083bf107b2faaa78{\nborder-width:1px;\nborder-style:solid;\npadding:30px 30px 15px 30px;\nbackground-color:#ffffff;\n}\n<\/style>\n<div class='flex_column av-5wh9z0-116fdf5e2c203234083bf107b2faaa78 av_one_full  avia-builder-el-34  el_after_av_one_full  el_before_av_one_full  box_or first flex_column_div column-top-margin'   ><p>\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-fy36k-e7557104cad5c329693f552da800ad26\">\n#top .av-special-heading.av-fy36k-e7557104cad5c329693f552da800ad26{\npadding-bottom:10px;\n}\n<\/style>\n<div  class='av-special-heading av-fy36k-e7557104cad5c329693f552da800ad26 av-special-heading-h3 blockquote modern-quote  avia-builder-el-35  el_before_av_textblock  avia-builder-el-first  '><h3 class='av-special-heading-tag '  itemprop=\"headline\"  >Honors <span class='special_amp'>&amp;<\/span> Awards<\/h3><div class='special-heading-border'><div class='special-heading-inner-border'><\/div><\/div><\/div><br \/>\n<section class=\"av_textblock_section \"  itemscope=\"itemscope\" itemtype=\"https:\/\/schema.org\/CreativeWork\" ><div class='avia_textblock  '   itemprop=\"text\" ><p>1.PhD Thesis Award, ACLCLP (2020)<br \/>\n2.National Innovation Award, Taiwan (2020)<br \/>\n3.National Innovation Award, Taiwan (2019)<br \/>\n4.Mater Thesis Award, ACLCLP (2019)<br \/>\n5.Outstanding Elite Award, Chung Hwa Rotary Educational Foundation 2019-2020 (2019\u2013present)<br \/>\n6.Gold Medal, Merry Electroacoustic Award, Taiwan (2019)<br \/>\n7.Distinguished Lecture , APSIPA (2019\u20132021)<br \/>\n8.PhD Thesis Award, ACLCLP (2018)<br \/>\n9.Best Student Paper Award, ISCSLP 2018 (2018)<br \/>\n10.National Innovation Award, Taiwan (2018)<br \/>\n11.Forth Place, National Science Foundation (NSF\/CISE) Hearables Challenge (2017)<br \/>\n12.Career Development Award, Academia Sinica, Taiwan (2017\u2013present)<br \/>\n13.Best Paper Award, ROCLING 2017 (2017)<br \/>\n14.Best Poster Presentation Award, IEEE MIT URTC (2017)<br \/>\n15.Poster Presentation Award, APSIPA 2017 (2017)<br \/>\n16.Featured Article, IEEE Access (2017)<br \/>\n17.Third Place, BigMM2016 Challenge (2016)<br \/>\n18.Third Place, OC16 MixASR-CHEN Challenge, O-COCOSDA-2016 (2016)<br \/>\n19.Excellent Paper Award, TAAI 2012 (2012)<br \/>\n20.Travel Grant, Interspeech 2012 (2012)<\/p>\n<\/div><\/section><\/p><\/div>\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-1xuoak-0ec82ffff0f1a29b843af87001d71d01\">\n.flex_column.av-1xuoak-0ec82ffff0f1a29b843af87001d71d01{\nborder-width:1px;\nborder-style:solid;\npadding:30px 30px 15px 30px;\nbackground-color:#ffffff;\n}\n<\/style>\n<div class='flex_column av-1xuoak-0ec82ffff0f1a29b843af87001d71d01 av_one_full  avia-builder-el-37  el_after_av_one_full  avia-builder-el-last  box_or first flex_column_div column-top-margin'   ><p>\n<style type=\"text\/css\" data-created_by=\"avia_inline_auto\" id=\"style-css-av-lbkgep6r-cdb0c35b4061db35ba14ccca9a730b96\">\n#top .av-special-heading.av-lbkgep6r-cdb0c35b4061db35ba14ccca9a730b96{\npadding-bottom:10px;\n}\n<\/style>\n<div  class='av-special-heading av-lbkgep6r-cdb0c35b4061db35ba14ccca9a730b96 av-special-heading-h3 blockquote modern-quote  avia-builder-el-38  el_before_av_textblock  avia-builder-el-first  '><h3 class='av-special-heading-tag '  itemprop=\"headline\"  >Publications<\/h3><div class='special-heading-border'><div class='special-heading-inner-border'><\/div><\/div><\/div><br \/>\n<section class=\"av_textblock_section \"  itemscope=\"itemscope\" itemtype=\"https:\/\/schema.org\/CreativeWork\" ><div class='avia_textblock  '   itemprop=\"text\" ><p>Journal Articles<\/p>\n<p>1. R.-Y. Tseng, T.-W. Wang, S.-W. Fu, C.-Y. Lee, and Y. Tsao, &#8220;A Study of Joint Effect on Denoising Techniques and Visual Cues to Improve Speech Intelligibility in Cochlear Implant Simulation,&#8221; to appear in IEEE Transactions on Cognitive and Developmental Systems.<br \/>\n2. F. S. Abousaleh, W.-H. Cheng, N.-H. Yu, and Y. Tsao, &#8220;Multimodal Deep Learning Framework for Image Popularity Prediction on Social Media,&#8221; to appear in IEEE Transactions on Cognitive and Developmental Systems.<br \/>\n3. W. Ariyanti, T. Hussain, J.-C. Wang, C.-T. Wang, S.-H. Fang, and Y. Tsao, &#8220;Ensemble and Multimodal Learning for Pathological Voice Classification,&#8221; IEEE Sensors Journal, volume 5, number 7, pages 1-4, July 2021.<br \/>\n4. K.-C. Liu, M. Chan, C.-Y. Hsieh, H.-Y. Huang, C.-T. Chan, Y. Tsao, &#8220;Domain-adaptive Fall Detection Using Deep Adversarial Training,&#8221; IEEE Transactions on Neural Systems &amp; Rehabilitation Engineering, volume 29, pages 1243-1251, June 2021.<br \/>\n5. T.-H. Lin ,T. Akamatsu,Y. Tsao, &#8220;Sensing ecosystem dynamics via audio source separation: A case study of marine soundscapes off northeastern Taiwan,,&#8221; PLOS Computational Biology, volume 1, number 1, pages 1-23, February 2021.<br \/>\n6. T. Hussain, S. M. Siniscalchi, H.-L. S. Wang, Y. Tsao, S. V. Mario, and W.-H. Liao, &#8220;Ensemble Hierarchical Extreme Learning Machine for Speech Dereverberation,&#8221; IEEE Transactions on Cognitive and Developmental Systems, volume 12, number 4, pages 744-758, December 2020.<br \/>\n7. N. Y.-H. Wang, H.-L. S. Wang, T.-W. Wang, S.-W. Fu, X. Lu, H.-M. Wang, and Y. Tsao, &#8220;Improving the Intelligibility of Speech for Simulated Electric and Acoustic Stimulation Using Fully Convolutional Neural Networks,&#8221; IEEE Transactions on Neural Systems &amp; Rehabilitation Engineering, volume 29, pages 184-195, December 2020.<br \/>\n8. J.-K. Wang, Y.-F. Chang, K.-H. Tsai, W.-C. Wang, C.-Y. Tsai, C.-H. Cheng, and Y. Tsao, &#8220;Automatic recognition of murmurs of ventricular septal defect using convolutional recurrent neural networks with temporal attentive pooling,&#8221; Scientific Reports, volume 10, number 21797, pages 1-10, December 2020.<br \/>\n9. K.-H. Tsai, W.-C. Wang, C.-H. Cheng, C.-Y. Tsai, J.-K. Wang, T.-H. Lin, S.-H. Fang, L.-C. Chen, and Y. Tsao, &#8220;Blind Monaural Source Separation on Heart and Lung Sounds Based on Periodic-Coded Deep Autoencoder,&#8221; IEEE Journal of Biomedical and Health Informatics, volume 24, number 11, pages 3203-3214, November 2020.<br \/>\n10. X. Wang et al.,, &#8220;ASVspoof 2019: A Large-scale Public Database of Synthetized, Converted and Replayed Speech,&#8221; Computer Speech and Language, volume 64, pages 1-27, November 2020.<br \/>\n11. T.-A. Hsieh, H.-M. Wang, X. Lu, and Y. Tsao, &#8220;WaveCRN: An Efficient Convolutional Recurrent Neural Network for End-to-end Speech Enhancement,&#8221; IEEE Signal Processing Letters, volume 27, pages 2149-2153, November 2020.<br \/>\n12. H.-S. Lee, Y. Tsao, S.-K. Jeng, and H.-M. Wang, &#8220;Subspace-based Representation and Learning for Phonotactic Spoken Language Recognition,&#8221; IEEE Transactions on Audio, Speech and Language Processing, volume 28, pages 3065-3079, November 2020.<br \/>\n13. C. Yu, R. E. Zezario, S.-S. Wang, J. Sherman, Y.-Y. Hsieh, X. Lu, H.-M. Wang, and Y. Tsao, &#8220;Speech Enhancement based on Denoising Autoencoder with Multi-branched Encoders,&#8221; IEEE Transactions on Audio, Speech and Language Processing, volume 28, pages 2756-2769, October 2020.<br \/>\n14. W.-C. Huang, H. Luo, H.-T. Hwang, C.-C. Lo, Y.-H. Peng, Y. Tsao, and H.-M. Wang, &#8220;Unsupervised Representation Disentanglement using Cross Domain Features and Adversarial Learning in Variational Autoencoder based Voice Conversion,&#8221; IEEE Transactions on Emerging Topics in Computational Intelligence, volume 4, number 4, pages 468-479, August 2020.<br \/>\n15. N. Y.-H. Wang, C.-H. Chiang, H.-L. S. Wang and Y. Tsao, &#8220;Atypical Frequency Sweep Processing in Chinese Children With Reading Difficulties: Evidence From Magnetoencephalography,&#8221; Frontiers in Psychology, volume 99, pages 99, July 2020.<br \/>\n16. C. Yu, K.-H. Hung, S.-S. Wang, Y. Tsao, and J.-w. Hung, &#8220;Time-Domain Multi-modal Bone\/air Conducted Speech Enhancement,&#8221; IEEE Signal Processing Letters, volume 27, pages 1035-1039, June 2020.<br \/>\n17. M. Lee, L. Lin, C.-Y. Chen, Y. Tsao, T.-H. Yao, M.-H. Fei and S.-H. Fang, &#8220;Forecasting Air Quality in Taiwan by Using Machine Learning,&#8221; Scientific Reports, number 4153, pages 1-13, March 2020.<br \/>\n18. S. C. Hidayati, T. W. Goh, Ji.-S. G. Chan, C.-C. Hsu, J. See, L.-K. Wong, K.-L. Hua, Y. Tsao, and W.-H. Cheng, &#8220;Dress With Style: Learning Style from Joint Deep Embedding of Clothing Styles and Body Shapes,&#8221; IEEE Transactions on Multimedia, volume 23, pages 365-377, March 2020.<br \/>\n19. Y.-H. Lai, W.-N. Chen, T.-C. Hsu, C. Lin, Y. Tsao and S. Wu, &#8220;Overall Survival Prediction of Non-small Cell Lung Cancer by Integrating Microarray and Clinical Data with Deep Learning,&#8221; Scientific Reports, number 4679, pages 1-11, March 2020.<br \/>\n20. C.-L. Liu , S.-W. Fu, Y.-J. Li, J.-W. Huang, H.-M. Wang, and Y. Tsao, &#8220;Multichannel Speech Enhancement by Raw Waveform-mapping using Fully Convolutional Networks,&#8221; IEEE Transactions on Audio, Speech and Language Processing, volume 28, pages 1888-1900, February 2020.<br \/>\n21. S.-W. Fu, C.-F. Liao, Y. Tsao, &#8220;Learning with Learned Loss Function: Speech Enhancement with Quality-Net to Improve Perceptual Evaluation of Speech Quality,&#8221; IEEE Signal Processing Letters, volume 27, pages 26-30, December 2019.<br \/>\n22. T.-H. Lin amd Y. Tsao, &#8220;Source Separation in Ecoacoustics: A Roadmap towards Versatile Soundscape Information Retrieval,&#8221; Remote Sensing in Ecology and Conservation, volume online, pages 1-12, December 2019.<br \/>\n23. J.-Y. Wu , C. Yu , S.-W. Fu, C.-T. Liu , S.-Y. Chien , Y. Tsao, &#8220;Increasing Compactness of Deep Learning based Speech Enhancement Models with Parameter Pruning and Quantization Techniques,&#8221; IEEE Signal Processing Letters, volume 26, number 12, pages 1887-1891, December 2019.<br \/>\n24. C.-T. Wang, F.-C. Lin, J.-Y. Chen, M.-J. Hsiao, S.-H. Fang, Y.-H. Lai, Y. Tsao, &#8220;Detection of Pathological Voice Using Cepstrum Vectors: A Deep Learning Approach,&#8221; Journal of Voice, volume 33, number 5, pages pp. 634-641, September 2019.<br \/>\n25. S.-H. Fang, C.-T. Wang, J.-Y. Chen, Y. Tsao and F.-C. Lin, &#8220;Combining Acoustic Signals and Medical Records to Improve Pathological Voice Classification,&#8221; APSIPA Transactions on Signal and Information Processing, volume 8, pages 1-11, June 2019.<br \/>\n26. C.-W. Lee et al.,, &#8220;Bioimaging: New Templated Ostwald Ripening Process of Mesostructured FeOOH for Third\u2010Harmonic Generation Bioimaging,&#8221; Small, volume 15, number 20, pages 1-11, May 2019.<br \/>\n27. H.-T. Chiang, Y.-Y. Hsieh, S.-W. Fu, K.-H. Hung, Y. Tsao, S.-Y. Chien, &#8220;Noise Reduction in ECG Signals Using Fully Convolutional Denoising Autoencoders,&#8221; IEEE Access, volume 7, pages 60806-60813, April 2019.<br \/>\n28. Y.-C. Chu, Y.-F. Cheng, Y.-H. Lai, Y. Tsao, T.-Y. Tu, S. T. Young, T.-S. Chen, Y.-F. Chung, F. Lai, W.-H. Liao, &#8220;A Mobile Phone\u2013Based Approach for Hearing Screening of School-Age Children: Cross-Sectional Validation Study,&#8221; JMIR Mhealth Uhealth, volume 1, pages 1-13, April 2019.<br \/>\n29. Y. Tsao, T.-H. Lin, F. Chen, Y.-F. Chang, C.-H. Cheng, and K.-H. Tsai, &#8220;Robust S1 and S2 heart sound recognition based on spectral restoration and multi-style training,&#8221; Biomedical Signal Processing and Control, volume 49, pages 173-180, March 2019.<br \/>\n30. H.-L. S. Wanga , N. Y.-H. Wang , I-C. Chen, and Y. Tsao, &#8220;Auditory Identification of Frequency-Modulated Sweeps and Reading Difficulties in Chinese,&#8221; Research in Developmental Disabilities, volume 86, pages 53-61, January 2019.<br \/>\n31. C.-T. Liu, T.-W. Lin, Y.-H. Wu, Y.-S. Lin, H. Lee, Y. Tsao, and S.-Y. Chien, &#8220;Computation-Performance Optimization of Convolutional Neural Networks with Redundant Filter Removal,&#8221; IEEE Transactions on Circuits and Systems I, volume 66, pages 1908-1921, December 2018.<br \/>\n32. H.-P. Liu, Y. Tsao, and C.-S. Fuh, &#8220;Bone Conducted Speech Enhancement Using Deep Denoising Autoencoder,&#8221; Speech Communication, volume 104, pages 106-112, November 2018.<br \/>\n33. H.-T. Hwang, Y.-C. Wu, S.-S. Wang, C.-C. Hsu, Y. Tsao, H.-M. Wang, Y.-R. Wang, and S.-H. Chen, &#8220;Locally linear Embedding Based Post-filtering for Speech Enhancement,&#8221; Journal of Information Science and Engineering, volume 34, number 6, pages 1469-1491, October 2018.<br \/>\n34. S.-W. Fu, T.-W. Wang, Y. Tsao, X. Lu, and H. Kawai, &#8220;End-to-End Waveform Utterance Enhancement for Direct Evaluation Metrics Optimization by Fully Convolutional Neural Networks,&#8221; IEEE Transactions on Audio, Speech and Language Processing, volume 26, number 9, pages 1570-1584, September 2018.<br \/>\n35. S.-Y. Tsui, Y. Tsao, C.-W. Lin, S.-H. Fang, and C.-T. Wang, &#8220;Demographic and Symptomatic Features of Voice Disorders and Their Potential Application in Classification using Machine Learning Algorithms,&#8221; Folia Phoniatrica et Logopaedica, volume 70, pages 174-182, September 2018.<br \/>\n36. Y.-H. Lai, Y. Tsao, X. Lu, F. Chen, Y.-T. Su, K.-C. Chen, Y.-H. Chen, L.-C. Chen, P.-H. Li, and C.-H. Lee, &#8220;Deep Learning based Noise Reduction Approach to Improve Speech Intelligibility for Cochlear Implant Recipients,&#8221; Ear and Hearing, volume 39(4), number 4, pages 795-809, July 2018, This work receives the National Innovation Award 2018 (2018\u5e74\u570b\u5bb6\u65b0\u5275\u734e)<br \/>\n37. J.-C. Hou, S.-S. Wang, Y.-H. Lai, Y. Tsao, H.-W. Chang, and H.-M. Wang, &#8220;Audio-visual Speech Enhancement using Multimodal Deep Convolutional Neural Networks,&#8221; IEEE Transactions on Emerging Topics in Computational Intelligence, volume 2, number 2, pages 117-128, April 2018.<br \/>\n38. Y. Tsao, H.-C. Chu, S.-H. Fang, J. Lee, and C.-M. Lin, &#8220;Adaptive Noise Cancellation using Deep Cerebellar Model Articulation Controller,&#8221; IEEE Access, volume 6, pages 37395-37402, April 2018.<br \/>\n39. T.-H. Lin, T. Akamatsu, and Y, Tsao, &#8220;Comparison of passive acoustic soniferous fish monitoring with supervised and unsupervised approaches,&#8221; Journal of the Acoustical Society of America (JASA), volume 143, number 4, pages published onlione, April 2018.<br \/>\n40. S.-S. Wang, P. Lin, Y. Tsao, J.-W. Hung, and B. Su, &#8220;Suppression by Selecting Wavelets for Feature Compression in Distributed Speech Recognition,&#8221; IEEE Transactions on Audio, Speech and Language Processing, volume 26, number 3, pages 564-579, March 2018.<br \/>\n41. J. Torres-Sospedra et al., &#8220;Off-Line Evaluation of Mobile-Centric Indoor Positioning Systems: The Experiences from the 2017 IPIN Competition,&#8221; Sensors, volume 18, number 2, pages 487, February 2018.<br \/>\n42. H.-T. Hwang, Y.-C. Wu, Y.-H. Peng, C.-C. Hsu, Y. Tsao, H.-M. Wang, Y.-R. Wang, and S.-H. Chen, &#8220;Voice Conversion based on Locally Linear Embedding,&#8221; Journal of Information Science and Engineering, volume 34, number 6, pages 1493-1516, January 2018.<br \/>\n43. S.-W. Fu, P.-C. Li, Y.-H. Lai, C.-C. Yang, L.-C. Hsieh, and Y. Tsao, &#8220;Joint Dictionary Learning-based Non-Negative Matrix Factorization for Voice Conversion to Improve Speech Intelligibility After Oral Surgery,&#8221; IEEE Transactions on Biomedical Engineering, volume 64, number 11, pages 2584 &#8211; 2594, November 2017.<br \/>\n44. P. Lin, D. Lyu, F. Chen, S.-S. Wang, and Y. Tsao, &#8220;Multi-style Learning with Denoising Autoencoders for Acoustic Modeling in the Internet of Things (IoT),&#8221; Computer Speech and Language, volume 46, pages 481-495, November 2017.<br \/>\n45. T. Hussain, S. M. Siniscalchi, C.-C. Lee, S.-S. Wang, Y. Tsao and W.-H. Liao, &#8220;Experimental Study on Extreme Learning Machine Applications for Speech Enhancement,&#8221; IEEE Access, volume 99, number 99, pages 1-1, October 2017.<br \/>\n46. S.-W. Hsiao, H.-C. Sun, M.-C. Hsieh, M.-H. Tsai, Y. Tsao, and C.-C. Lee, &#8220;Toward Automating Oral Presentation Scoring during Principal Certification Program using Audio-Video Low-level Behavior Profiles,&#8221; IEEE Transactions on Affective Computing, volume PP, number PP, pages PP, September 2017.<br \/>\n47. S.-H. Fang, Y.-X. Fei, Z. Xu, and Y. Tsao, &#8220;Learning Transportation Modes from Smartphone Sensors Based on Deep Neural Network,&#8221; IEEE Sensors Journal, volume 17, pages 6111 &#8211; 6118, September 2017.<br \/>\n48. F. Chen, D. Zheng, Y. Tsao, &#8220;Effects of Noise Suppression and Envelope Dynamic Range Compression on the Intelligibility of Vocoded Sentences for a Tonal Language,&#8221; Journal of the Acoustical Society of America (JASA), volume 142, number 3, pages 1157-1166, September 2017.<br \/>\n49. T.-H. Lin, S.-H. Fang, and Y, Tsao, &#8220;Improving Biodiversity Assessment via Unsupervised Separation of Biological Sounds from Long-duration Recordings,&#8221; Scientific Reports, volume 7, number 4547, pages 1, July 2017.<br \/>\n50. X. Lu, P. Shen, Y. Tsao, and H. Kawai, &#8220;Regularization of Neural Network Model with Distance Metric Learning for I-vector based Spoken Language Identification,&#8221; Computer Speech and Language, volume 44, pages 48-60, July 2017.<br \/>\n51. Y.-H. Lai, F. Chen, S.-S. Wang, X. Lu, Y. Tsao, and C.-H. Lee, &#8220;A Deep Denoising Autoencoder Approach to Improving the Intelligibility of Vocoded Speech in Cochlear Implant Simulation,&#8221; IEEE Transactions on Biomedical Engineering, volume 64, number 7, pages 1568 &#8211; 1578, July 2017.<br \/>\n52. A. Chern, Y.-H. Lai, Y.-p. Chang, Y. Tsao, R. Y. Chang, and H.-W. Chang, &#8220;A Smartphone-Based Multi-Functional Hearing Assistive System to Facilitate Speech Recognition in the Classroom,&#8221; IEEE Access, volume 5, pages 10339 &#8211; 10351, June 2017, This paper has been selected as a Featured Article (http:\/\/ieeeaccess.ieee.org\/special-sections\/featured-articles\/smartphone-based-multi-functional-hearing-assistive-system-facilitate-speech-recognition-classroom\/)<br \/>\n53. T.-E. Chen, S.-I Yang, L.-T. Ho, K.-H. Tsai, Y.-H. Chen, Y.-F. Chang, Y.-H. Lai, S.-S. Wang, Y. Tsao*, and C.-C. Wu, &#8220;S1 and S2 Heart Sound Recognition using Deep Neural Networks,&#8221; IEEE Transactions on Biomedical Engineering, volume 64, number 2, pages 372 &#8211; 380, February 2017.<br \/>\n54. H.-y. Lee, B.-H. Tseng, T.-H. Wen, and Y. Tsao, &#8220;Personalizing Recurrent Neural Network based Language Model by Social Network,&#8221; IEEE Transactions on Audio, Speech and Language Processing, volume 25, number 3, pages 519 &#8211; 530, December 2016.<br \/>\n55. S.-H. Fang, W.-H. Chang, Y. Tsao, H.-C. Shih, and C. Wang, &#8220;Channel State Reconstruction Using Multilevel Discrete Wavelet Transform for Improved Fingerprinting-Based Indoor Localization,&#8221; IEEE Sensors Journal, volume 16, number 21, pages 7784 &#8211; 7791, November 2016.<br \/>\n56. T. Guan, G.-x. Chu, Y. Tsao, F. Chen, &#8220;Assessing the Perceptual Contributions of Level-dependent Segments to Sentence Intelligibility,&#8221; Journal of the Acoustical Society of America (JASA), volume 140, number 5, pages 3745-3754, November 2016.<br \/>\n57. H.-L. S. Wang, I-C. Chen, C.-H. Chiang, Y.-H. Lai, and Y. Tsao, &#8220;Auditory Perception, Suprasegmental Speech Processing, and Vocabulary Development in Chinese Preschoolers,&#8221; Perceptual and Motor Skills, volume 123, number 2, pages 365-382, October 2016.<br \/>\n58. S.-H. Fang , H.-H. Liao , Y.-X. Fei , K.-H. Chen , J.- W. Huang , Y.-D. Lu and Y. Tsao, &#8220;Transportation Modes Classification Using Sensors on Smartphones,&#8221; Sensors, volume 19;16, number 8, pages 1324, August 2016.<br \/>\n59. S.-S. Wang, A. Chern, Y. Tsao, J.-w. Hung, X. Lu, Y.-H. Lai, B. Su, &#8220;Wavelet Speech Enhancement based on Nonnegative Matrix Factorization,&#8221; IEEE Signal Processing Letters, volume 23, number 8, pages 1101-1105, August 2016.<br \/>\n60. P. Lin, S.-W. Fu, S.-S.Wang, Y.-H. Lai, and Y. Tsao, &#8220;Maximum Entropy Learning with Deep Belief Networks,&#8221; Entropy, volume 18, number 7, pages 251, July 2016.<br \/>\n61. F. Chen, Y. Tsao, and Y.-H. Lai, &#8220;Modeling Speech Intelligibility with Recovered Envelope from Temporal Fine Structure Stimulus,&#8221; Speech Communication, volume 81, pages 120\u2013128, July 2016.<br \/>\n62. Y. Tsao and Y.-H. Lai, &#8220;Generalized Maximum a Posteriori Spectral Amplitude Estimation for Speech Enhancement,&#8221; Speech Communication, volume 76, pages 112\u2013126, February 2016.<br \/>\n63. S.-H. Fang, C.-H. Wang, and Y. Tsao, &#8220;Compensating for Orientation Mismatch in Robust WiFi Localization Using Histogram Equalization,&#8221; IEEE Transactions on Vehicular Technology, volume 64, number 11, pages 5210-5220, November 2015.<br \/>\n64. C.-C. Hsu, K.-M. Cheong, T.-S. Chi, and Y. Tsao, &#8220;Robust Voice Activity Detection Algorithm Based on Feature of Frequency Modulation of Harmonics and Its DSP Implementation,&#8221; IEICE Transactions on Information and Systems, volume E98-D, number 10, pages 1808-1817, October 2015.<br \/>\n65. Y.-C. Lin, Y.-H. Lai, H.-W. Chang, Y. Tsao, Y.-p. Chang, and R. Y. Chang,, &#8220;A Smartphone-Based Remote Microphone Hearing Assistive System Using Wireless Technologies,&#8221; IEEE Systems Journal, volume PP, pages 1-10, October 2015, Smarthear Demo: https:\/\/www.youtube.com\/watch?v=e9HqIj09QJs<br \/>\n66. Y.-J, Lee, Y.-R. Chien, and Y. Tsao, &#8220;Rapid Converging M-max Partial Update Least Mean Square Algorithms with New Variable Step-size Methods,&#8221; IEICE Transaction on Communications, volume Vol.E98-A, number No.12, pages 2650-2657, August 2015.<br \/>\n67. Y.-H. Lai, Y. Tsao, F. Chen, &#8220;Effects of Adaptation Rate and Noise Suppression on the Intelligibility of Compressed-Envelope Based Speech,&#8221; PLoS ONE, volume 10.1371, pages journal.pone.0133519, July 2015.<br \/>\n68. Y. Tsao, P. Lin, T.-y. Hu, and X. Lu, &#8220;Ensemble Environment Modeling using Affine Transform Group,&#8221; Speech Communication, volume 68, pages 55\u201368, April 2015.<br \/>\n69. Y. Tsao, S.-H. Fang, and Y. Hsiao, &#8220;Acoustic Echo Cancellation Using a Vector-Space-Based Adaptive Filtering Algorithm,&#8221; IEEE Signal Processing Letters, volume 22, pages 351-355, March 2015.<br \/>\n70. Y. Tsao, T.-y. Hu, S. Sakti, S. Nakamura, and L.-s. Lee, &#8220;Variable Selection Linear Regression for Robust Speech Recognition,&#8221; IEICE Transactions on Information and Systems, volume E97-D, number 6, pages 1477-1487, June 2014.<br \/>\n71. Y. Tsao, X. Lu, P. Dixon, T.-y. Hu, S. Matsuda, and C. Hori, &#8220;Incorporating Local Information of the Acoustic Environments to MAP-based Feature Compensation and Acoustic Model Adaptation,&#8221; Computer Speech and Language, volume 28, number 3, pages 709-726, May 2014.<br \/>\n72. Y. Tsao, S. Matsuda, C. Hori, H. Kashioka, and C.-H. Lee, &#8220;A MAP-based Online Estimation Approach to Ensemble Speaker and Speaking Environment,&#8221; IEEE Transactions on Audio, Speech and Language Processing, volume 22, number 2, pages 403-416, February 2014.<br \/>\n73. Y.-H. Lai, Y. Tsao, and F. Chen, &#8220;A Study of Adaptive WDRC in Hearing Aids under Noisy Conditions,,&#8221; International Journal of Speech &amp; Language Pathology and Audiology, volume 1, number 2, pages 43-51, December 2013, (invited paper)<br \/>\n74. Y. Tsao and C.-H. Lee, &#8220;An Ensemble Speaker and Speaking Environment Modeling Approach to Robust Speech Recognition,&#8221; IEEE Transactions on Audio, Speech and Language Processing, volume 17, pages 1025 &#8211; 1037, June 2009.<br \/>\n75. Y. Tsao, S.-M. Lee, and L.-S. Lee, &#8220;Segmental Eigenvoice with Delicate Eigenspace for Improved Speaker Adaptation,&#8221; IEEE Transactions on Speech and Audio Processing, volume 13, pages 399 &#8211; 411, April 2005.<\/p>\n<p>Conference Papers<\/p>\n<p>1. Y.-W. Chen, K.-H. Hung, S.-Y. Chuang, J. Sherman, X. Lu, Y. Tsao, &#8220;A Study of Incorporating Articulatory Movement Information in Speech Enhancement,&#8221; to appear in EUSIPCO 2021,.<br \/>\n2. G.-X. Lin, S.-W. Hu, Y.-J. Lu, Y. Tsao, and C.-S. Lu, &#8220;QISTA-Net-Audio: Audio Super-resolution via Non-Convex Lq-normMinimization,&#8221; to appear in Interspeech 2021,.<br \/>\n3. R. E Zezario, C.-S. Fuh, H.-M. Wang, Y. Tsao, &#8220;Speech Enhancement with Zero-Shot Model Selection,&#8221; to appear in EUSIPCO 2021,.<br \/>\n4. W.-C. Huang, K. Kobayashi, Y.-H. Peng, C.-F. Liu, Y. Tsao, H.-M. Wang, T. Toda, &#8220;A Preliminary Study of a Two-Stage Paradigm for Preserving SpeakerIdentity in Dysarthric Voice Conversion,&#8221; to appear in Interspeech 2021,.<br \/>\n5. T.-A. Hsieh, C. Yu, S.-W. Fu, X. Lu, and Y. Tsao, &#8220;Improving Perceptual Quality by Phone-Fortified Perceptual Loss using Wasserstein Distance for Speech Enhancement,&#8221; to appear in Interspeech 2021,.<br \/>\n6. S.-W. Fu, C. Yu, T.-A. Hsieh, P. Plantinga, M. Ravanelli, X. Lu, Y. Tsao, &#8220;MetricGAN +: An Improved Version of MetricGAN for Speech Enhancement,&#8221; to appear in Interspeech 2021,.<br \/>\n7. Y,-C. Wu, C.-H. Hu, H.-S. Lee, Y.-H. Peng, W.-C. Huang, Y. Tsao, H.-M. Wang and T. Toda, &#8220;Relational Data Selection for Data Augmentation of Speaker-dependent Multi-band MelGAN Vocoder,&#8221; to appear in Interspeech 2021,.<br \/>\n8. X. Lu, P. Shen, Y. Tsao, H. Kawai, &#8220;Unsupervised neural adaptation model based on optimal transport for spoken language identification,&#8221; ICASSP 2021, June 2021.<br \/>\n9. Y.-K. Wu, K.-P. Huang, Y. Tsao, H.-y. Lee, &#8220;One shot learning for speech separation,&#8221; ICASSP 2021, June 2021.<br \/>\n10. C.-J. Peng, Y.-J. Chan, C. Yu, S.-S. Wang, Y. Tsao, T.-S. Chi, &#8220;Attention-based multi-task learning for speech-enhancement and speaker-identification in multi-speaker dialogue scenario,&#8221; ISCAS 2021, May 2021.<br \/>\n11. Y.-W. Chen, K.-H. Hung, S.-Y. Chuang, J. Sherman, W.-C. Huang, X. Lu, Y. Tsao, &#8220;EMA2S: An End-to-End Multimodal Articulatory-to-Speech System,&#8221; ISCAS 2021, May 2021.<br \/>\n12. Y.-T. Chang, Y.-H. Yang, Y.-H. Peng, S.-S. Wang, T.-S. Chi, Y. Tsao and H.-M. Wang, &#8220;MoEVC: A Mixture of Experts Voice Conversion System With Sparse Gating Mechanism for Online Computation Acceleration,&#8221; ISCSLP 2021, January 2021.<br \/>\n13. R. E. Zezario, S.-W. Fu, C.-S. Fuh, Y. Tsao, and H.-M. Wang, &#8220;STOI-Net: A Deep Learning based Non-Intrusive Speech Intelligibility Assessment Model,&#8221; APSIPA 2020, December 2020.<br \/>\n14. S.-W. Fu et al., &#8220;Boosting Objective Scores of Speech Enhancement Model through MetricGAN Post-Processing,&#8221; APSIPA 2020, December 2020.<br \/>\n15. Y.-J. Lu, C.-F. Liao, X. Lu, J.-w. Hung, Y. Tsao, &#8220;Incorporating Broad Phonetic Information for Speech Enhancement,&#8221; Interspeech 2020, October 2020.<br \/>\n16. C.-Y. Chen, W.-Z. Zheng, S.-S. Wang, Y. Tsao, P.-C. Li and Y.-H. Lai, &#8220;Enhancing Intelligibility of Dysarthric Speech Using Gated Convolutional-based Voice Conversion System,&#8221; Interspeech 2020, October 2020.<br \/>\n17. C.-C. Lee, Y.-C. Lin, H.-T. Lin, H.-M. Wang, Y. Tsao, &#8220;SERIL: Noise Adaptive Speech Enhancement using Regularization-based Incremental Learning,&#8221; Interspeech 2020, October 2020.<br \/>\n18. S.-Y. Chuang, Y. Tsao, C.-C. Lo, H.-M. Wang, &#8220;Lite Audio-Visual Speech Enhancement,&#8221; Interspeech 2020, October 2020.<br \/>\n19. H. Li, S.-W. Fu, Y. Tsao, J. Yamagishi, &#8220;iMetricGAN: Intelligibility Enhancement for Speech-in-Noise using Generative Adversarial Network-based Metric Learning,&#8221; Interspeech 2020, October 2020.<br \/>\n20. R. E. Zezario, T. Hussain, X. Lu, H.-M. Wang, and Y. Tsao, &#8220;Self-supervised Denoising Autoencoder with Linear Regression Decoder for Speech Enhancement,&#8221; ICASSP 2020, May 2020.<br \/>\n21. W.-C. Lin, Y. Tsao, F. Chen, and H.-M. Wang, &#8220;Investigation of Neural Network Approaches for Unified Spectral and Prosodic Feature Enhancement,&#8221; APSIPA 2019, pages 1179-1184, November 2019.<br \/>\n22. T. Hussain k, Y. Tsao, H.-M. Wang, J.-C. Wang, S. M. Siniscalchi, and W.-H. Liao, &#8220;Compressed Multimodal Hierarchical Extreme Learning Machine for Speech Enhancement,&#8221; APSIPA 2019, November 2019.<br \/>\n23. F. Ye, Y. Tsao, and F. Chen, &#8220;Subjective Feedback-based Neural Network Pruning for Speech Enhancement,&#8221; APSIPA 2019, November 2019.<br \/>\n24. K.-Y. Liu, S.-S. Wang, Y. Tsao, J.-w. Hung, &#8220;Speech Enhancement Based on the Integration of Fully Convolutional Network, Temporal Lowpass Filtering and Spectrogram Masking,&#8221; ROCLING 2019, October 2019.<br \/>\n25. W.-C. Huang , Y.-C. Wu, H.-T. Hwang , P. L. Tobing, T. Hayashiy, K. Kobayashi, T. Toda, Y. Tsao , H.-M. Wang, &#8220;Refined WaveNet Vocoder for Variational Autoencoder Based Voice Conversion,&#8221; EUSIPCO 2019, September 2019.<br \/>\n26. C.-F. Liao, Y. Tsao, H.-y. Lee and H.-M. Wang, &#8220;Noise Adaptive Speech Enhancement using Domain Adversarial Training,&#8221; Interspeech 2019, September 2019, (with ISCA Travel Grant)<br \/>\n27. F.-K. Chuang, S.-S. Wang, J.-w. Hung, Y. Tsao, and S.-H. Fang, &#8220;Speaker-aware Deep Denoising Autoencoder with Embedded Speaker Identity for Speech Enhancement,&#8221; Interspeech 2019, September 2019.<br \/>\n28. Y.-C. Lin, Y.-T. Hsu, S.-W. Fu, Y. Tsao, and T.-W. Kuo, &#8220;IA-NET: Acceleration and Compression of Speech Enhancement using Integer-adder Deep Neural Network,&#8221; Interspeech 2019, September 2019.<br \/>\n29. W.-C. Huang et al.,, &#8220;Generalization of Spectrum Differential based Direct Waveform Modification for Voice Conversion,&#8221; ISCA SSW 10, September 2019.<br \/>\n30. C.-C. Lo, S.-w. Fu, W. C. Huang, X. Wang, J. Yamagishi, Y. Tsao and H.-M. Wang, &#8220;MOSNet: Deep Learning based Objective Assessment for Voice Conversion,&#8221; Interspeech 2019, September 2019.<br \/>\n31. L.-W. Chen, H.-Y. Lee, and Y. Tsao, &#8220;Generative Adversarial Networks for Unpaired Voice Transformation on Impaired Speech,&#8221; Interspeech 2019, September 2019.<br \/>\n32. X. Lu, P. Shen, S. Li, Y. Tsao, and H. Kawai, &#8220;Class-wise Centroid Distance Metric Learning for Acoustic Event Detection,&#8221; Interspeech 2019, September 2019.<br \/>\n33. C.-F. Liao, Y. Tsao, X. Lu and H. Kawai, &#8220;Incorporating Symbolic Sequential Modeling for Speech Enhancement,&#8221; Interspeech 2019, September 2019, (with ISCA Travel Grant)<br \/>\n34. R. E. Zezario, S.-W. Fu, X. Lu, H.-M. Wang, and Y. Tsao, &#8220;Specialized Speech Enhancement Model Selection Based on Learned Non-Intrusive Quality Assessment Metric,&#8221; Interspeech 2019, September 2019.<br \/>\n35. P.-T. Huang, H.-S. Lee, S.-S. Wang, K.-Y. Chen, Y. Tsao and H.-M. Wang, &#8220;Exploring the Encoder Layers of Discriminative Autoencoders for LVCSR,&#8221; Interspeech 2019, September 2019, (with ISCA Travel Grant)<br \/>\n36. T. Hussain, Y. Tsao, H.-M. Wang, J.-C. Wang, S. M. Siniscalchi, W.-H. Liao, &#8220;Audio-Visual Speech Enhancement Using Hierarchical Extreme Learning Machine,&#8221; EUSIPCO 2019, September 2019.<br \/>\n37. W.-C. Huang, Y.-C. Wu, C.-C. Lo, P. L. Tobing, T. Hayashi, K. Kobayashi, T. Toda, Y. Tsao and H.-M. Wang, &#8220;Investigation of F0 conditioning and Fully Convolutional Networks in Variational Autoencoder based Voice Conversion,&#8221; Interspeech 2019, September 2019, (with ISCA Travel Grant)<br \/>\n38. S.-W. Fu, C.-F. Liao, Y. Tsao, S.-D. Lin, &#8220;MetricGAN: Generative Adversarial Networks based Black-box Metric Scores Optimization for Speech Enhancement,&#8221; ICML 2018, June 2019, Long Oral with ICML Travel Grant; Codes: https:\/\/github.com\/JasonSWFu\/MetricGAN<br \/>\n39. Y.-L. Shen, C.-Y. Huang, S.-S. Wang, Y. Tsao, H.-M. Wang, and T.-S. Chi, &#8220;Reinforcement Learning Based Speech Enhancement for Robust Speech Recognition,&#8221; ICASSP 2019, May 2019.<\/p>\n<p>40. T. Hussain, Y. Tsao, S. M. Sinicalchi, J.-C. Wang, H.-M. Wang, and W.-H. Liao, &#8220;Bone-conducted Speech Enhancement using Hierarchical Extreme Learning Machine,&#8221; IWSDS 2019, April 2019.<br \/>\n41. K.-Y. Liu, S.-k. Lee, S.-S. Wang, Y. Tsao, J.-w. Hung, &#8220;Reducing noise and reverberation in speech signals via the integration of denoising autoencoder and temporal lowpass filtering,&#8221; ICASI 2019, April 2019.<br \/>\n42. Shang-Chih Lin*, Yu Tsao, Shun-Feng Su, Yennun Huang, and Zi-Qing Zhong, &#8220;An Abnormal Detection Strategy of Rotating Electric Machine based on Frequency Distribution,&#8221; The 39th Symposium on Electrical Power Engineering, December 2018.<br \/>\n43. R. E. Zezario, J.-W. Huang, X. Lu, Y. Tsao, H.-T. Hwang, H.-M. Wang, &#8220;Deep Denoising Autoencoder Based Post Filtering for Speech Enhancement,&#8221; APSIPA 2018, December 2018.<\/p>\n<p>44. S.-k. Lee, S.-S. Wang, Y. Tsao, J.-w. Hung, &#8220;Speech Enhancement based on Reducing the Detail Portion of Speech Spectrograms in Modulation Domain via Discrete Wavelet Transform,&#8221; ISCSLP 2018, November 2018.<br \/>\n45. Y.-T. Hsu, Y.-C. Lin, S.-W. Fu, Y. Tsao, T.-W. Kuo, &#8220;A study on speech enhancement using exponent-only floating point quantized neural network (EOFP-QNN),&#8221; SLT 2018, November 2018.<br \/>\n46. Shang-Chih Lin*, Yu Tsao, Shun-Feng Su, and Yennun Huang, &#8220;An Industrial IoT Analysis System Based on Machining Data of Metal Materials,&#8221; International Conference on Fuzzy Theory and Its Applications, November 2018.<br \/>\n47. Hung-Chung Li, Shang-Chih Lin, Yu Tsao, Shun-Feng Su, Pei-Li Sun and Yennun Huang, &#8220;A Supervised Learning Algorithm Considering Light Conditions for Visual Inspection of Metal Objects,&#8221; The 54th Annual Conference of Chinese Society for Quality 2018 International Symposium of Quality Management, November 2018, (Makalot Industry-Academic Collaboration Award) (\u7372\u63a8\u85a6\u8f49\u6295EI\u671f\u520a, \u64f4\u5145\u7814\u7a76\u4fee\u6539\u4e2d)<br \/>\n48. Y.-T. Hsu, Z. Zhu, C.-T. Wang, S.-H. Fang, F. Rudzicz, and Y. Tsao, &#8220;Robustness against the channel effect in pathological voice detection,&#8221; NeurIPS 2018, Machine Learning for Health (ML4H) Workshop, November 2018.<br \/>\n49. Shang-Chih Lin*, Chuan-Hsiang Su, Yu Tsao, Shun-Feng Su, Hong-Yuan Mark Liao, and Yennun Huang, &#8220;FIS-based Domestic Milling Machine PHM System Considering Multi-speed Frequency Variation,&#8221; IEEE International Conference on Advanced Manufacturing, November 2018, (Best Paper Award) (\u7372\u63a8\u85a6\u8f49\u6295SCI\u671f\u520a, \u64f4\u5145\u7814\u7a76\u4fee\u6539\u4e2d)<br \/>\n50. W.-C. Huang, H.-T. Hwang, Y.-H. Peng, Y. Tsao, H.-M. Wang, &#8220;Voice Conversion Based on Cross-Domain Features Using Variational Auto Encoders,&#8221; ISCSLP 2018, November 2018, Best Student Paper Award<br \/>\n51. X. Lu, P. Shen, S. Li, Y. Tsao, H. Kawai, &#8220;Temporal Attentive Pooling for Acoustic Event Detection,&#8221; Interspeech 2018, September 2018.<br \/>\n52. B.-S. Yu, Y. Tsao, S.-W. Yang, Y.-K. Chen, and S.Y. Chien, &#8220;Architecture Design of Convolutional Neural Networks for Face Detection on an FPGA Platform,&#8221; IEEE SiPS 2018, September 2018.<br \/>\n53. Y.-H. Peng, H.-T. Hwang, Y.-C. Wu, Y. Tsao, H.-M. Wang, &#8220;Exemplar-Based Spectral Detail Compensation for Voice Conversion,&#8221; Interspeech 2018, September 2018.<br \/>\n54. Y.-Y. Kao, H.-P. Hsu, C.-F. Liao, Y. Tsao, H.-C. Yang, J.-L. Li, C.-C. Lee, H.-S. Lee, and H.-M. Wang, &#8220;Automatic Detection of Speech Under Cold Using Discriminative Autoencoders and Strength Modeling with Multiple Sub-Dictionary Generation,&#8221; IEEE IWAENC, September 2018.<br \/>\n55. S.-W. Fu, Y. Tsao, H.-T. Hwang, H.-M. Wang, &#8220;Quality-Net: An End-to-End Non-intrusive Speech Quality Assessment Model based on BLSTM,&#8221; Interspeech 2018, September 2018.<br \/>\n56. L. Sun, J. Du, T. Gao, Y.-D. Lu, Y. Tsao, C.-H. Lee, N. Ryant, &#8220;A Novel LSTM-based Speech Preprocessor For Speaker Diarization in Realistic Mismatch Conditions,&#8221; ICASSP, April 2018.<br \/>\n57. N. Ryant et al., &#8220;Enhancement and Analysis of Conversational Speech: JSALT 2017,&#8221; ICASSP, April 2018.<br \/>\n58. W.-J. Lee, S.-S. Wang, F. Chen, X. Lu, S.-Y. Chien, and Y. Tsao,, &#8220;Speech Dereverberation Based on Integrated Deep and Ensemble Learning Algorithm,&#8221; ICASSP, April 2018.<br \/>\n59. Y.-H. Lai, W.-Z. Zheng, S.-T. Tang, S.-H. Fang, W.-H. Liao, and Y. Tsao, &#8220;Improving the Performance of Hearing Aids in Noisy Environments based on Deep Learning Technology,&#8221; EMBC 2018, April 2018.<br \/>\n60. S.-S. Wang, Y. Tsao, H.-L. S. Wang, Y.-H. Lai*, and L. P.-H. Li, &#8220;A Deep Learning based Noise Reduction Approach to Improve Speech Intelligibility for Cochlear Implant Recipients in the Presence of Competing Speech Noise,&#8221; APSIPA 2017, November 2017.<br \/>\n61. Y.-H. Peng, C.-C. Hsu, Y.-C. Wu, H.-T. Hwang, Y.-W. Liu, Y. Tsao, and H.-M. Wang, &#8220;Fast Locally Linear Embedding Algorithm for Exemplar-based Voice Conversion,&#8221; APSIPA 2017, November 2017, (Poster Presentation Award)<br \/>\n62. S.-W. Fu, Y. Tsao, X. Lu, and H. Kawai, &#8220;Raw Waveform-based Speech Enhancement by Fully Convolutional Networks,&#8221; APSIPA 2017, November 2017.<br \/>\n63. T.-H. Lin, Y.-H. Wang, S.-S. Lu, H.-W. Yen, and Y. Tsao, &#8220;Computing Biodiversity Change via a Soundscape Monitoring Network,&#8221; PNC 2017 Annual Conference and Joint Meetings, November 2017.<br \/>\n64. T.-H. Lin and Y. Tsao, &#8220;Deblending of Simultaneous-source Seismic Data via Periodicity-coded Nonnegative Matrix Factorization,&#8221; IEEE Dataport, September 2017.<br \/>\n65. S.-W. Fu, T.-y. Hu, Y. Tsao, X. Lu, &#8220;Complex Spectrogram Enhancement by Convolutional Neural Network with Multi-metrics Learning,&#8221; IEEE MLSP 2017, September 2017.<br \/>\n66. Y.-C. Wu, H.-T. Hwang, S.-S. Wang, C.-C. Hsu, Y. Tsao, and H.-M. Wang, &#8220;A Post-filtering Approach Based on Locally Linear Embedding Difference Compensation for Speech Enhancement,&#8221; Interspeech2017, August 2017.<br \/>\n67. C.-C. Hsu, H.-T. Hwang, Y.-C. Wu, Y. Tsao, and H.-M. Wang, &#8220;Voice Conversion from Unaligned Corpora Using Variational Autoencoding Wasserstein Generative Adversarial Networks,&#8221; Interspeech2017, August 2017.<br \/>\n68. M.-H. Yang, H.-S. Lee, Y.-D. Lu, K.-Y. Chen, Y. Tsao, B. Chen, and H.-M. Wang, &#8220;Discriminative Autoencoders for Acoustic Modeling,&#8221; Interspeech2017, August 2017.<br \/>\n69. C.-L. Wu, H.-P. Hsu, S.-S. Wang, J.-W. Hung, Y.-H. Lai, H.-M. Wang, and Y. Tsao, &#8220;Wavelet Speech Enhancement Based on Robust Principal Component Analysis,&#8221; Interspeech2017, August 2017.<br \/>\n70. S.-T. Lin, Y.-H. Liao, Y. Tsao, and S.-Y. Chien,, &#8220;Object-based on-line video summarization for internet of video things,&#8221; EEE ISCAS, May 2017.<br \/>\n71. Y.-C. Wu, H.-T. Hwang, S.-S. Wang, C.-C. Hsu, Y.-H. Lai, Y. Tsao, and H.-M. Wang, &#8220;A Locally Linear Embbeding Based Postfiltering Approach for Speech Enhancement,&#8221; IEEE ICASSP, March 2017.<br \/>\n72. H.-S. Lee, Y.-D. Lu, C.-C. Hsu, Y. Tsao, H.-M. Wang, and S.-K. Jeng, &#8220;Discriminative Autoencoders for Speaker Verification,&#8221; IEEE ICASSP, March 2017.<br \/>\n73. C.-C. Hsu, H.-T. Hwang, Y.-C. Wu, Y. Tsao and H.-M. Wang, &#8220;Voice Conversion from Non-parallel Corpora Using Variational Auto-encoder,&#8221; APSIPA ASC, December 2016.<br \/>\n74. J.-C. Hou, S.-S. Wang, Y.-H. Lai, J.-C. Lin, Y. Tsao, H.-W. Chang, and H.-M. Wang, &#8220;Audio-Visual Speech Enhancement using Deep Neural Networks,&#8221; APSIPA 2016, December 2016.<br \/>\n75. C.-C. Hsu, H.-T. Hwang, Y.-C. Wu, Y. Tsao, and H.-M. Wang, &#8220;Dictionary Update for NMF-based Voice Conversion Using an Encoder-Decoder Network,&#8221; ISCSLP, November 2016.<br \/>\n76. Y.-H. Lai, S.-S. Wang, Y.-T. Su, H.-C. Cheng, F. K. Fu, and Y. Tsao, &#8220;Improving the Performance of Speech Perception in Noisy Environment based on a FAME Strategy,&#8221; ISCSLP 2016, October 2016.<br \/>\n77. C.-Y. Hsu, R. E. Zezario, J.-C. Wang, X. Lu, and Y. Tsao, &#8220;Incorporating Local Environment Information with Ensemble Neural Networks to Robust Automatic Speech Recognition,&#8221; ISCSLP 2016, October 2016.<br \/>\n78. Y.-Y. Hsieh, C.-D. Wu, Y. Tsao, and S.-S. Lu, &#8220;A Linear Regression Model with Dynamic Pulse Transit Time Features for Noninvasive Blood Pressure Prediction,&#8221; BioCAS, October 2016.<br \/>\n79. X. Lu, P. Shen, Y. Tsao, H. Kawai, &#8220;Pair-wise Distance Metric Learning of Neural Network Model for Spoken Language Identification,&#8221; Interspeech, September 2016.<br \/>\n80. H.-S. Lee, Y. Tsao, C.-C. Lee, H.-M. Wang, W.-C. Lin, W.-C. Chen, S.-W. Hsiao, S.-K. Jeng, &#8220;Minimization of Regression and Ranking Losses with Shallow Neural Networks on Automatic Sincerity Evaluation,&#8221; Interspeech, September 2016.<br \/>\n81. Y.-C. Wu, H.-T. Hwang, C.-C. Hsu, Y. Tsao, H.-M. Wang, &#8220;Locally Linear Embedding for Exemplar-Based Spectral Conversion,&#8221; Interspeech, September 2016.<br \/>\n82. S.-W. Fu, Y. Tsao, X. Lu, &#8220;SNR-Aware Convolutional Neural Network Modeling for Speech Enhancement,&#8221; Interspeech, September 2016.<br \/>\n83. Y.-H. Lai, C.-H. Wang, S.-Y. Hou, B.-Y. Chen, Y. Tsao, and Y.-W. Liu, &#8220;DCASE Report for Task 3: Sound Event Detection in Real Life Audio,&#8221; DCASE 2016 workshop, September 2016.<\/p>\n<p>84. C.-W. Wu, M.-T. Zhong, Y. Tsao, S.-W. Yang, Y.-K. Chen, and S.-Y. Chien, &#8220;Track-clustering Error Evaluation for Track-based Multi-camera Tracking System Employing Human Re-identification,&#8221; CVPR workshop, August 2016, Codes: https:\/\/github.com\/cw1204772\/ClustTMCT<br \/>\n85. Y.-T. Liu, Y. Tsao, R. Y. Chang:, &#8220;Nonnegative Matrix Factorization-based Frequency Lowering Technology for Mandarin-speaking Hearing Aid Users,&#8221; IEEE ICASSP2016, pages 5905-5909, May 2016.<br \/>\n86. Syu-Siang Wang, Jeremy Chiaming Yang, Yu Tsao, and Jeih-weih Hung, &#8220;Leveraging Nonnegative Matrix Factorization in Processing the Temporal Modulation Spectrum for Speech Enhancement,&#8221; IEEE ICCE-Taiwan 2016, May 2016.<br \/>\n87. Jeremy Chiaming Yang, Syu-Siang Wang, Yu Tsao, and Jeih-weih Hung, &#8220;Speech Enhancement via Ensemble Modeling NMF Adaptation,&#8221; IEEE ICCE-Taiwan 2016, May 2016.<br \/>\n88. S.-S. Wang and Y. Tsao, &#8220;Temporal Modulation Spectral Restoration for Robust Speech Recognition,&#8221; IEEE International Conference on Multimedia Big Data, April 2016.<br \/>\n89. Ying-Hui Lai, Chien-Hsun Chen, Shih-Tsang Tang, Zong-Mu Yeh, and Yu Tsao, &#8220;Improving the Performance of Noise Reduction in Hearing Aids Based on the Genetic Algorithm,&#8221; IFMBE Proceedings 57, March 2016.<br \/>\n90. H.-T. Hwang, Y. Tsao, H.-M. Wang, Y.-R. Wang, and S.-H. Chen, &#8220;A Probabilistic Interpretation for Artificial Neural Network-based Voice Conversion,&#8221; APSIPA 2015, December 2015.<br \/>\n91. Y.-T. Liu, R. Y. Chang, Y. Tsao, and Y.-p. Chang, &#8220;A New Frequency Lowering Technique for Mandarin-Speaking Hearing Aid Users,&#8221; GlobalSIP 2015, December 2015.<br \/>\n92. S.-S. Wang, H.-T. Hwang, Y.-H. Lai, Y. Tsao, X. Lu, H.-M. Wang, and B. Su, &#8220;Improving Denoising Auto-encoder Based Speech Enhancement With the Speech Parameter Generation Algorithm,&#8221; APSIPA 2015, December 2015.<br \/>\n93. P. Lin, D.-C. Lyu, Y.-F. Chang, and Y. Tsao, &#8220;Temporal Alignment for Deep Neural Networks,&#8221; GlobalSIP 2015, December 2015.<br \/>\n94. X. Lu, P. Shen, Y. Tsao, C. Hori, H. Kawai, &#8220;Sparse Representation with Temporal Max-Smoothing for Acoustic Event Detection,&#8221; Interspeech 2015, ISCA, editor, pages 1176-1180, September 2015.<br \/>\n95. P. Lin, D.-C. Lyu, Y.-F. Chang, and Y. Tsao, &#8220;Speech Recognition with Temporal Neural Networks,&#8221; Interspeech 2015, ISCA, editor, pages 21\u201325, September 2015.<br \/>\n96. P. Lin, S.-S. Wang, and Y. Tsao, &#8220;Temporal Information in Tone Recognition,&#8221; IEEE ICCE 2015, June 2015.<br \/>\n97. W.-C. Chen, P.-T. Lai, Y. Tsao, and C.-C. Lee, &#8220;Multimodal Arousal Rating using Unsupervised Fusion Technique,&#8221; ICASSP 2015, April 2015.<br \/>\n98. Y.-H. Lai, S.-S. Wang, P.-C. Li, and Yu Tsao, &#8220;A Discriminative Post-filter for Speech Enhancement in Hearing Aids,&#8221; ICASSP 2015, April 2015.<br \/>\n99. H. Jing, A.-C. Liang, S.-D. Lin, and Y. Tsao, &#8220;A Transfer Probabilistic Collective Factorization Model to Handle Sparse Data in Collaborative Filtering,&#8221; ICDM 2014, December 2014, accepted as a regular paper (acceptance rate=9.5%)<br \/>\n100. Y.-H. Lai, F. Chen, and Y. Tsao, &#8220;Effect of Adaptive Envelope Compression in Simulated Electric Hearing in Reverberation,&#8221; ISIC 2014, December 2014.<br \/>\n101. Y.-F. Chang, P. Lin, S.-H. Cheng, K.-H. Chan, Y.-C. Zeng, C.-W. Liao, W.-T. Chang, Y.-C. Wang, Y. Tsao, &#8220;Robust Anchorperson Detection Based on Audio Streams using a Hybrid I-vector and DNN System,&#8221; APSIPA 2014, December 2014.<br \/>\n102. X. Lu, Y. Tsao, P. Shen, and C. Hori, &#8220;Spectral Patch Based Sparse Coding for Acoustic Event Detection,&#8221; ISCSLP 2014, September 2014.<br \/>\n103. X. Lu, Y. Tsao, S. Matsuda, and C. Hori, &#8220;Ensemble Modeling of Denoising Autoencoder for Speech Spectrum Restoration,&#8221; Interspeech 2014, September 2014.<br \/>\n104. Y. H. Lai, F. Chen, and Y. Tsao, &#8220;An Adaptive Envelope Compression Strategy for Speech Processing in Cochlear Implants,&#8221; Interspeech 2014, September 2014.<br \/>\n105. H.-S. Lee, Y. Tsao, H.-M. Wang and S.-K. Jen, &#8220;Clustering-Based I-Vector Formulation for Speaker Recognition,&#8221; Interspeech 2014, September 2014.<br \/>\n106. S.-S. Wang, P. Lin, D.-C. Lyu, Y. Tsao, H.-T. Hwang, B. Su and H.-M. Wang, &#8220;Acoustic Feature Conversion using a Polynomial based Feature Transferring Algorithm,&#8221; ISCSLP 2014, September 2014.<br \/>\n107. P. Lin, F. Chen, S.-S. Wang, Y. Tsao and Y. H. Lai, &#8220;Automatic Speech Recognition with Primarily Temporal Envelope Information,&#8221; Interspeech 2014, September 2014.<br \/>\n108. H. Jing, T.-Y. Hu, H.-S. Lee, W.-C. Chen, C.-C. Lee, Y. Tsao and H.-M. Wang, &#8220;Ensemble of Machine Learning Algorithms for Cognitive and Physical Speaker Load Detection,&#8221; Interspeech 2014, September 2014.<br \/>\n109. H.-S. Lee, Y. Tsao, Y.-F. Chang, H.-M. Wang, and S.-K. Jeng, &#8220;Speaker Verification Using Kernel-Based Binary Classifiers with Binary Operation Derived Features,&#8221; ICASSP 2014, May 2014.<br \/>\n110. H.-t. Fan, J.-w. Hung, X. Lu, S.-S. Wang, Yu Tsao, &#8220;Speech Enhancement using Segmental Nonnegative Matrix Factorization,&#8221; ICASSP 2014, May 2014.<br \/>\n111. X. Lu, Yu Tsao, S. Matsuda, and C. Hori, &#8220;Sparse Representation Based on a Bag of Spectral Exemplars for Acoustic Event Detection,&#8221; ICASSP 2014, May 2014.<br \/>\n112. H. Jing, Y. Tsao, K.-Y. Chen and H.-M. Wang, &#8220;Semantic Na\u00efve Bayes Classifier for Document Classification,&#8221; IJCNLP, December 2013.<br \/>\n113. C.-H. Wang, T.-W. Kao, S.-H. Fang, Y. Tsao, L.-C. Kuo, S.-W. Kao, and N.-C. Lin, &#8220;Robust Wi-Fi Location Fingerprinting Against Device Diversity based on Spatial Mean Normalization,&#8221; APSIPA 2013, October 2013.<br \/>\n114. H.-T. Hwang, Y. Tsao, H.-M. Wang, Y.-R. Wang, S.-H. Chen, &#8220;Incorporating Global Variance in the Training Phase of GMM-based Voice Conversion,&#8221; APSIPA 2013, October 2013.<br \/>\n115. Bo Li, Yu Tsao and Khe Chai Sim, &#8220;An Investigation of Spectral Restoration Algorithms for Deep Neural Networks based Noise Robust Speech Recognition,&#8221; Interspeech 2013, August 2013.<br \/>\n116. Tsung-Hsien Wen, Aaron Heidel, Hung-yi Lee, Yu Tsao and Lin-Shan Lee, &#8220;Recurrent Neural Network Based Language Model Personalization by Social Network Crowdsourcing,&#8221; Interspeech 2013, August 2013, (Best Student Paper Award Nomination)<br \/>\n117. Hung-yi Lee, Ting-yao Hu, How Jing, Yun-Fan Chang, Yu Tsao, Yu-Cheng Kao and Tsang-Long Pao, &#8220;Ensemble of Machine Learning and Acoustic Segment Model Techniques for Speech Emotion and Autism Spectrum Disorders Recognition,&#8221; Interspeech 2013, August 2013, (Second Place In the Autism Sub-Challenge)<br \/>\n118. Xugang Lu, Yu Tsao, Shigeki Matsuda and Chiori Hori, &#8220;Speech Enhancement Based on Deep Denoising Autoencoder,&#8221; Interspeech 2013, August 2013, Codes: Tensor Flow: https:\/\/github.com\/jonlu0602\/DeepDenoisingAutoencoder; Keras: https:\/\/github.com\/jerrygood0703\/DDAE; Matlab: https:\/\/drive.google.com\/open?id=0B8ZEsMh6ITIlNVZ1VmROdTdQNUU<br \/>\n119. Hsin-Te Hwang, Yu Tsao, Hsin-Min Wang, Yih-Ru Wang and Sin-Horng Chen, &#8220;Alleviating the Over-Smoothing Problem in GMM-Based Voice Conversion with Discriminative Training,&#8221; Interspeech 2013, August 2013.<br \/>\n120. Ying-Hui Lai, Yu-Cheng Su, Yu Tsao, Shuenn-Tsong Young, &#8220;Evaluation of Generalized Maximum a Posteriori Spectral Amplitude (GMAPA) Speech Enhancement Algorithm in Hearing Aids,&#8221; ISCE 2013, June 2013.<br \/>\n121. Syu-Siang Wang, Yu Tsao, Jeih-weih Hung, &#8220;Filtering on the Temporal Probability Sequence in Histogram Equalization for Robust Speech Recognition,&#8221; ICASSP 2013, IEEE, May 2013.<br \/>\n122. Yu-Cheng Su, Yu Tsao, Jung-En Wu, Fu-Rong Jean, &#8220;Speech Enhancement using Generalized Maximum a Posteriori Spectral Amplitude Estimator,&#8221; ICASSP 2013, IEEE, May 2013.<br \/>\n123. How Jing and Yu Tsao, &#8220;Sparse Maximum Entropy Deep Belief Nets,&#8221; IJCNN 2013, IEEE, April 2013.<br \/>\n124. S.-S. Wang, J.-W. Hung, and Yu Tsao, &#8220;A Study on Cepstral Subband Normalization for Robust ASR,&#8221; ISCSLP 2012, IEEE, December 2012.<br \/>\n125. X. Lu, Yu Tsao, S. Matsuda, C. Hori, and H. Kashioka, &#8220;Acoustic Space Partition based on Broad Phonetic Class for Ensemble Acoustic Modeling,&#8221; ISCSLP 2012, IEEE, December 2012.<br \/>\n126. H.-T. Hwang, Yu Tsao, H.-M. Wang, Y.-R. Wang, and S.-H. Chen, &#8220;Exploring Mutual Information for GMM-Based Spectral Conversion,&#8221; ISCSLP 2012, IEEE, December 2012.<br \/>\n127. H.-T. Hwang, Yu Tsao, H.-M. Wang, Y.-R. Wang, and S.-H. Chen, &#8220;A Study of Mutual Information for GMM-Based Spectral Conversion,&#8221; Interspeech 2012, ISCA, September 2012.<\/p>\n<p>128. T.-Y. Hu, Yu Tsao, and L.-S. Lee, &#8220;Discriminative Fuzzy Clustering Maximum a Posterior Linear Regression for Speaker Adaptation,&#8221; Interspeech 2012, ISCA, September 2012.<br \/>\n129. Yu Tsao, C.-L. Huang, S. Matsuda, C. Hori, and H. Kashioka, &#8220;A Linear Projection Approach to Environment Modeling for Robust Speech Recognition,&#8221; ICASSP 2012, IEEE, April 2012.<br \/>\n130. C.-L. Huang, Yu Tsao, and C. Hori, &#8220;Feature Normalization and Selection for Robust Speaker State Recognition,&#8221; COCOSDA 2011, IEEE, October 2011.<br \/>\n131. Yu Tsao, P. R. Dixon, C. Hori, and H. Kawai, &#8220;Incorporating Regional Information to Enhance MAP-based Stochastic Feature Compensation for Robust Speech Recognition,&#8221; Interspeech, ISCA, August 2011.<br \/>\n132. Yu Tsao, R. Isotani, H. Kawai, and S. Nakamura, &#8220;Increasing Discriminative Capability on Map-based Mapping Function Estimation for Acoustic Model Adaptation,&#8221; ICASSP, IEEE, May 2011.<br \/>\n133. Y. Tsao, S. Matsuda, S. Sakai, R. Isotani, H. Kawai, and S. Nakamura, &#8220;A Sampling-based Environment Population Projection Approach for Rapid Acoustic Model Adaptation,&#8221; ICASSP, IEEE, May 2011.<br \/>\n134. J. Li, Y. Tsao, and C.-H. Lee, &#8220;Shrinkage Model Adaptation in Automatic Speech Recognition,&#8221; Interspeech, ISCA, September 2010.<br \/>\n135. A. Mushtaq, Y. Tsao, and C.-H. Lee, &#8220;A Particle Filter Feature Compensation Approach to Robust Speech Recognition,&#8221; Interspeech, ISCA, September 2010.<br \/>\n136. Yu Tsao, H. Sun, H. Li, and C.-H. Lee, &#8220;An Acoustic Segment Model Approach to Incorporating Temporal Information into Speaker Modeling for Text-Independent Speaker Recognition,&#8221; ICASSP, IEEE, May 2010.<br \/>\n137. Y. Tsao, S. Matsuda, S. Nakamura, and C.-H. Lee, &#8220;MAP Estimation of Online Mapping Parameters in Ensemble Speaker and Speaking Environment Modeling,&#8221; ASRU, IEEE, December 2009.<br \/>\n138. Y. Tsao, J. Li, C.-H. Lee, and S. Nakamura, &#8220;Soft Margin Estimation on Improving Environment Structures for Ensemble Speaker and Speaking Environment Modeling,&#8221; IUCS, ACM, December 2009.<br \/>\n139. S. Matsuda, Y. Tsao, J. Li, S. Nakamura, and C.-H. Lee, &#8220;A Study on Soft Margin Estimation of Linear Regression Parameters for Speaker Adaptation,&#8221; Interspeech, ISCA, December 2009.<br \/>\n140. Y. Tsao, J. Li, and C.-H. Lee, &#8220;Ensemble Speaker and Speaking Environment Modeling Approach with Advanced Online Estimation Process,&#8221; ICASSP, IEEE, May 2009.<br \/>\n141. S.-Y. Peng, Y. Tsao, P. E. Hasler, and D. V. Anderson, &#8220;A Programmable Analog Radial-Basis-Function Based Classifier,&#8221; ICASSP, IEEE, December 2008.<br \/>\n142. Y. Tsao and C.-H. Lee, &#8220;Improving the Ensemble Speaker and Speaking Environment Modeling Approach by Enhancing the Precision of the Online Estimation Process,&#8221; Interspeech, ISCA, September 2008.<br \/>\n143. Y. Tsao and C.-H. Lee, &#8220;Two Extensions to Ensemble Speaker and Speaking Environment Modeling for Robust Automatic Speech Recognition,&#8221; ASRU, IEEE, December 2007.<br \/>\n144. I. Bromberg, Q. Fu, J. Hou, J. Li, C. Ma, B. Mattews, A. Moreno-Daniel, J. Morris, S. M. Siniscalchi, Y. Tsao, and Y. Wang, &#8220;Detection-based ASR In the Automatic Speech Attribute Transcription Project,&#8221; Interspeech, ISCA, September 2007.<br \/>\n145. Y. Tsao and C.-H. Lee, &#8220;An Ensemble Modeling Approach to Joint Characterization of Speaker and Speaking Environments,&#8221; Interspeech, ISCA, September 2007.<br \/>\n146. C. Ma, Y. Tsao, and C.-H. Lee, &#8220;A Study on Detection Based Automatic Speech Recognition,&#8221; Interspeech, ISCA, September 2006.<br \/>\n147. Y. Tsao and C.-H. Lee, &#8220;A Vector Space Approach to Environment Modeling for Robust Speech Recognition,&#8221; Interspeech, ISCA, September 2006.<br \/>\n148. Y. Tsao, J. Li, and C.-H. Lee, &#8220;A Study on Separation between Acoustic Models and Its Applications,&#8221; Eurospeech, ISCA, September 2005.<br \/>\n149. J. Li, Y. Tsao, and C.-H. Lee, &#8220;A Study on Knowledge Source Integration for Candidate Rescoring in Automatic Speech Recognition,&#8221; ICASSP, IEEE, April 2005.<br \/>\n150. Y. Tsao, S.-M. Lee, and L.-S. Lee, &#8220;Segmental Eigenvoice for Rapid Speaker Adaptation,&#8221; Eurospeech, ISCA, September 2001.<\/p>\n<p>Technical Reports<\/p>\n<p>1. \u738b\u8c6b\u714c\u3001\u6797\u8aa0\u8b19\u3001\u56b4\u6f22\u5049\u3001\u6797\u5b50\u7693\u3001\u9678\u8072\u5c71\u3001\u66f9\u6631\u3001\u7aef\u6728\u8302\u752f\u3001\u9ec3\u4fca\u5609\u3001\u838a\u5ead\u745e, &#8220;\u4e9e\u6d32\u8072\u666f\u9577\u671f\u76e3\u6e2c\u7db2,&#8221; number 3, \u81fa\u7063\u751f\u614b\u5b78\u6703\u3001\u4e2d\u592e\u7814\u7a76\u9662\u3001\u65e5\u672c\u570b\u7acb\u7814\u7a76\u958b\u767c\u6cd5\u4eba\u6d77\u6d0b\u7814\u7a76\u958b\u767c\u6a5f\u69cb\u3001\u6797\u696d\u8a66\u9a57\u6240\u68ee\u6797\u4fdd\u8b77\u7d44, August 2019.<br \/>\n2. \u66f9\u6631, &#8220;\u57fa\u65bc\u4eba\u5de5\u667a\u6167\u4e4b\u8a9e\u97f3\u6e9d\u901a\u8f14\u5177,&#8221; \u4e2d\u7814\u9662 | \u6578\u7406\u79d1\u5b78, \u6f2b\u6b65\u79d1\u7814, \u79d1\u666e\u5c08\u6b04 2019-06-20, 2019.<br \/>\n3. \u5f35\u4f51\u6995\u3001\u66f9\u6631, &#8220;\u7814\u4e4b\u6709\u7269(\u667a\u6167\u807d),&#8221; \u4e2d\u592e\u7814\u7a76\u9662, 2019.<\/p>\n<p>4. \u7aef\u6728\u8302\u752f, &#8220;\u7814\u4e4b\u6709\u7269(\u8759\u8760\u7684\u8d85\u97f3\u6ce2\uff0c\u85cf\u4e86\u4ec0\u9ebc\u8a0a\u606f\uff1f),&#8221; \u4e2d\u592e\u7814\u7a76\u9662, 2018.<\/p>\n<p>Book &amp; Book Chapters<\/p>\n<p>1. P. Lin, Y. Tsao, and L.-W. Kuo,, chapter &#8220;Controlling the Biocompatibility and Mechanical Effects of Implantable Microelectrodes to Improve Chronic Neural Recordings in the Auditory Nervous System,&#8221; &#8220;An Excursus into Hearing Loss,&#8221; S. Hatzopoulos and A. Ciorba, editor, pages 173-195, IntechOpen, May 2018.<br \/>\n2. Y.-H. Lai, Fe. Chen, and Y. Tsao,, chapter &#8220;Adaptive Dynamic Range Compression for Improving Envelope-Based Speech Perception: Implications for Cochlear Implants,&#8221; &#8220;Emerging Technology and Architecture for Big-data Analytics,&#8221; A. Chattopadhyay and Y. Hao, editor, pages 191-214, Springer, April 2017.<\/p>\n<p>Others<\/p>\n<p>1. Yu Tsao, &#8220;\u57fa\u65bc\u6df1\u5ea6\u5b78\u7fd2\u4e4b\u8a9e\u97f3\u589e\u5f37\u6280\u8853\u53ca\u5176\u61c9\u7528,&#8221;, 2020\u5927\u6578\u64da\u4eba\u5de5\u667a\u80fd.<br \/>\n2. Berrak Sisman, Yu Tsao, Haizhou Li, &#8220;Theory and Practice of<br \/>\nVoice Conversion,&#8221;, Tutorial in APSIPA 2020 December 2020.<\/p>\n<p>3. Fei Chen and Yu Tsao, &#8220;Intelligibility Evaluation and Speech Enhancement based on Deep Learning,&#8221;, Tutorial in Interspeech 2020 October 2020, Video: https:\/\/www.youtube.com\/watch?v=89S4CgfPWG0<br \/>\n4. Yu Tsao, &#8220;Speech Enhancement based on Deep Learning<br \/>\nand Intelligibility Evaluation,&#8221;, Tutorial in APSIPA 2019 November 2019.<br \/>\n5. H.-Y. Lee and Y. Tsao, &#8220;Generative Adversarial Network and its Applications to Speech Signal Processing and Natural Language Processing,&#8221;, Tutorial in Interspeech 2019 September 2019.<\/p>\n<p>6. &#8220;Improving biodiversity monitoring through soundscape information retrieval,&#8221; May 2018.<br \/>\n7. Hung-iy Lee and Yu Tsao, &#8220;Generative Adversarial Network and its Applications to Speech Signal Processing and Natural Language Processing,&#8221;, Tutorial in ICASSP 2018 April 2018.<\/p>\n<p>8. Y.-C. Lin, Y.-H. Lai, H.-W. Chang, Y. Tsao, Y.-p. Chang, and R. Y. Chang, &#8220;PAD-MMRT,&#8221; August 2014, Original corpus is prepared by K.-S. Tsai, L.-H. Tseng, C.-J.Wu, and S.-T. Young: \u201cDevelopment of a Mandarin monosyllable recognition test,\u201d Ear and Hearing, vol. 30, no. 1, pp. 90\u201399, 2009.<br \/>\n9. \u66f9\u6631\uff0c\u8607\u715c\u7a0b\uff0c\u738b\u7dd2\u7fd4, &#8220;\u7dda\u6027\u6620\u5c04\u8f49\u63db\u51fd\u6578\u65bc\u8072\u5b78\u6a21\u578b\u8abf\u9069\u4e4b\u5f37\u5065\u5f0f\u8a9e\u97f3\u8fa8\u8b58,&#8221;, \u8a08\u7b97\u8a9e\u8a00\u5b78\u5b78\u6703\u901a\u8a0a \u7b2c 23 \u5377\u7b2c 2 \u671f (2012 \u5e74 6 \u6708 ) June 2012.<\/p>\n<\/div><\/section><\/p><\/div><\/div><\/div><\/div><!-- close content main div --><\/div><\/div><div id='after_section_3'  class='main_color av_default_container_wrap container_wrap sidebar_right'  ><div class='container av-section-cont-open' ><div class='template-page content  av-content-small alpha units'><div class='post-entry post-entry-type-page post-entry-8910'><div class='entry-content-wrapper clearfix'>\n","protected":false},"excerpt":{"rendered":"<p>Joint Appointment Professor (Academia Sinica)<\/p>\n","protected":false},"featured_media":8112,"comment_status":"closed","ping_status":"closed","template":"","tags":[],"portfolio_entries":[120,121,122],"class_list":["post-8910","portfolio","type-portfolio","status-publish","has-post-thumbnail","hentry","portfolio_entries-full-time-faculty","portfolio_entries-communications-control-systems-group","portfolio_entries-machine-learning-group"],"yoast_head":"<!-- This site is optimized with the Yoast SEO plugin v23.4 - https:\/\/yoast.com\/wordpress\/plugins\/seo\/ -->\n<title>\u66f9\u6631 Yu Tsao - \u4e2d\u539f\u5927\u5b78\u96fb\u6a5f\u5de5\u7a0b\u5b78\u7cfb<\/title>\n<meta name=\"robots\" content=\"index, follow, max-snippet:-1, max-image-preview:large, max-video-preview:-1\" \/>\n<link rel=\"canonical\" href=\"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/\u66f9\u6631-yu-tsao\/\" \/>\n<meta property=\"og:locale\" content=\"en_US\" \/>\n<meta property=\"og:type\" content=\"article\" \/>\n<meta property=\"og:title\" content=\"\u66f9\u6631 Yu Tsao - \u4e2d\u539f\u5927\u5b78\u96fb\u6a5f\u5de5\u7a0b\u5b78\u7cfb\" \/>\n<meta property=\"og:description\" content=\"Joint Appointment Professor (Academia Sinica)\" \/>\n<meta property=\"og:url\" content=\"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/\u66f9\u6631-yu-tsao\/\" \/>\n<meta property=\"og:site_name\" content=\"\u4e2d\u539f\u5927\u5b78\u96fb\u6a5f\u5de5\u7a0b\u5b78\u7cfb\" \/>\n<meta property=\"article:publisher\" content=\"https:\/\/www.facebook.com\/groups\/1169041213961392\/\" \/>\n<meta property=\"article:modified_time\" content=\"2023-07-25T03:36:32+00:00\" \/>\n<meta property=\"og:image\" content=\"https:\/\/eeweb.cycu.edu.tw\/wp-content\/uploads\/2022\/12\/\u66f9\u6631\u8001\u5e2b.png\" \/>\n\t<meta property=\"og:image:width\" content=\"140\" \/>\n\t<meta property=\"og:image:height\" content=\"210\" \/>\n\t<meta property=\"og:image:type\" content=\"image\/png\" \/>\n<meta name=\"twitter:card\" content=\"summary_large_image\" \/>\n<meta name=\"twitter:label1\" content=\"Est. reading time\" \/>\n\t<meta name=\"twitter:data1\" content=\"63 minutes\" \/>\n<script type=\"application\/ld+json\" class=\"yoast-schema-graph\">{\"@context\":\"https:\/\/schema.org\",\"@graph\":[{\"@type\":\"WebPage\",\"@id\":\"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/%e6%9b%b9%e6%98%b1-yu-tsao\/\",\"url\":\"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/%e6%9b%b9%e6%98%b1-yu-tsao\/\",\"name\":\"\u66f9\u6631 Yu Tsao - \u4e2d\u539f\u5927\u5b78\u96fb\u6a5f\u5de5\u7a0b\u5b78\u7cfb\",\"isPartOf\":{\"@id\":\"https:\/\/eeweb.cycu.edu.tw\/#website\"},\"primaryImageOfPage\":{\"@id\":\"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/%e6%9b%b9%e6%98%b1-yu-tsao\/#primaryimage\"},\"image\":{\"@id\":\"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/%e6%9b%b9%e6%98%b1-yu-tsao\/#primaryimage\"},\"thumbnailUrl\":\"https:\/\/eeweb.cycu.edu.tw\/wp-content\/uploads\/2022\/12\/\u66f9\u6631\u8001\u5e2b.png\",\"datePublished\":\"2022-12-08T06:58:46+00:00\",\"dateModified\":\"2023-07-25T03:36:32+00:00\",\"inLanguage\":\"en-US\",\"potentialAction\":[{\"@type\":\"ReadAction\",\"target\":[\"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/%e6%9b%b9%e6%98%b1-yu-tsao\/\"]}]},{\"@type\":\"ImageObject\",\"inLanguage\":\"en-US\",\"@id\":\"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/%e6%9b%b9%e6%98%b1-yu-tsao\/#primaryimage\",\"url\":\"https:\/\/eeweb.cycu.edu.tw\/wp-content\/uploads\/2022\/12\/\u66f9\u6631\u8001\u5e2b.png\",\"contentUrl\":\"https:\/\/eeweb.cycu.edu.tw\/wp-content\/uploads\/2022\/12\/\u66f9\u6631\u8001\u5e2b.png\",\"width\":140,\"height\":210},{\"@type\":\"WebSite\",\"@id\":\"https:\/\/eeweb.cycu.edu.tw\/#website\",\"url\":\"https:\/\/eeweb.cycu.edu.tw\/\",\"name\":\"\u4e2d\u539f\u5927\u5b78\u96fb\u6a5f\u5de5\u7a0b\u5b78\u7cfb\",\"description\":\"\u4e2d\u539f\u5927\u5b78\u96fb\u6a5f\u5de5\u7a0b\u5b78\u7cfb\",\"alternateName\":\"\u4e2d\u539f\u5927\u5b78\u96fb\u6a5f\u7cfb\",\"potentialAction\":[{\"@type\":\"SearchAction\",\"target\":{\"@type\":\"EntryPoint\",\"urlTemplate\":\"https:\/\/eeweb.cycu.edu.tw\/?s={search_term_string}\"},\"query-input\":{\"@type\":\"PropertyValueSpecification\",\"valueRequired\":true,\"valueName\":\"search_term_string\"}}],\"inLanguage\":\"en-US\"}]}<\/script>\n<!-- \/ Yoast SEO plugin. -->","yoast_head_json":{"title":"\u66f9\u6631 Yu Tsao - \u4e2d\u539f\u5927\u5b78\u96fb\u6a5f\u5de5\u7a0b\u5b78\u7cfb","robots":{"index":"index","follow":"follow","max-snippet":"max-snippet:-1","max-image-preview":"max-image-preview:large","max-video-preview":"max-video-preview:-1"},"canonical":"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/\u66f9\u6631-yu-tsao\/","og_locale":"en_US","og_type":"article","og_title":"\u66f9\u6631 Yu Tsao - \u4e2d\u539f\u5927\u5b78\u96fb\u6a5f\u5de5\u7a0b\u5b78\u7cfb","og_description":"Joint Appointment Professor (Academia Sinica)","og_url":"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/\u66f9\u6631-yu-tsao\/","og_site_name":"\u4e2d\u539f\u5927\u5b78\u96fb\u6a5f\u5de5\u7a0b\u5b78\u7cfb","article_publisher":"https:\/\/www.facebook.com\/groups\/1169041213961392\/","article_modified_time":"2023-07-25T03:36:32+00:00","og_image":[{"width":140,"height":210,"url":"https:\/\/eeweb.cycu.edu.tw\/wp-content\/uploads\/2022\/12\/\u66f9\u6631\u8001\u5e2b.png","type":"image\/png"}],"twitter_card":"summary_large_image","twitter_misc":{"Est. reading time":"63 minutes"},"schema":{"@context":"https:\/\/schema.org","@graph":[{"@type":"WebPage","@id":"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/%e6%9b%b9%e6%98%b1-yu-tsao\/","url":"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/%e6%9b%b9%e6%98%b1-yu-tsao\/","name":"\u66f9\u6631 Yu Tsao - \u4e2d\u539f\u5927\u5b78\u96fb\u6a5f\u5de5\u7a0b\u5b78\u7cfb","isPartOf":{"@id":"https:\/\/eeweb.cycu.edu.tw\/#website"},"primaryImageOfPage":{"@id":"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/%e6%9b%b9%e6%98%b1-yu-tsao\/#primaryimage"},"image":{"@id":"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/%e6%9b%b9%e6%98%b1-yu-tsao\/#primaryimage"},"thumbnailUrl":"https:\/\/eeweb.cycu.edu.tw\/wp-content\/uploads\/2022\/12\/\u66f9\u6631\u8001\u5e2b.png","datePublished":"2022-12-08T06:58:46+00:00","dateModified":"2023-07-25T03:36:32+00:00","inLanguage":"en-US","potentialAction":[{"@type":"ReadAction","target":["https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/%e6%9b%b9%e6%98%b1-yu-tsao\/"]}]},{"@type":"ImageObject","inLanguage":"en-US","@id":"https:\/\/eeweb.cycu.edu.tw\/portfolio-item\/%e6%9b%b9%e6%98%b1-yu-tsao\/#primaryimage","url":"https:\/\/eeweb.cycu.edu.tw\/wp-content\/uploads\/2022\/12\/\u66f9\u6631\u8001\u5e2b.png","contentUrl":"https:\/\/eeweb.cycu.edu.tw\/wp-content\/uploads\/2022\/12\/\u66f9\u6631\u8001\u5e2b.png","width":140,"height":210},{"@type":"WebSite","@id":"https:\/\/eeweb.cycu.edu.tw\/#website","url":"https:\/\/eeweb.cycu.edu.tw\/","name":"\u4e2d\u539f\u5927\u5b78\u96fb\u6a5f\u5de5\u7a0b\u5b78\u7cfb","description":"\u4e2d\u539f\u5927\u5b78\u96fb\u6a5f\u5de5\u7a0b\u5b78\u7cfb","alternateName":"\u4e2d\u539f\u5927\u5b78\u96fb\u6a5f\u7cfb","potentialAction":[{"@type":"SearchAction","target":{"@type":"EntryPoint","urlTemplate":"https:\/\/eeweb.cycu.edu.tw\/?s={search_term_string}"},"query-input":{"@type":"PropertyValueSpecification","valueRequired":true,"valueName":"search_term_string"}}],"inLanguage":"en-US"}]}},"_links":{"self":[{"href":"https:\/\/eeweb.cycu.edu.tw\/en\/wp-json\/wp\/v2\/portfolio\/8910"}],"collection":[{"href":"https:\/\/eeweb.cycu.edu.tw\/en\/wp-json\/wp\/v2\/portfolio"}],"about":[{"href":"https:\/\/eeweb.cycu.edu.tw\/en\/wp-json\/wp\/v2\/types\/portfolio"}],"replies":[{"embeddable":true,"href":"https:\/\/eeweb.cycu.edu.tw\/en\/wp-json\/wp\/v2\/comments?post=8910"}],"version-history":[{"count":9,"href":"https:\/\/eeweb.cycu.edu.tw\/en\/wp-json\/wp\/v2\/portfolio\/8910\/revisions"}],"predecessor-version":[{"id":14033,"href":"https:\/\/eeweb.cycu.edu.tw\/en\/wp-json\/wp\/v2\/portfolio\/8910\/revisions\/14033"}],"wp:featuredmedia":[{"embeddable":true,"href":"https:\/\/eeweb.cycu.edu.tw\/en\/wp-json\/wp\/v2\/media\/8112"}],"wp:attachment":[{"href":"https:\/\/eeweb.cycu.edu.tw\/en\/wp-json\/wp\/v2\/media?parent=8910"}],"wp:term":[{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/eeweb.cycu.edu.tw\/en\/wp-json\/wp\/v2\/tags?post=8910"},{"taxonomy":"portfolio_entries","embeddable":true,"href":"https:\/\/eeweb.cycu.edu.tw\/en\/wp-json\/wp\/v2\/portfolio_entries?post=8910"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}